xref: /openbmc/linux/drivers/nvme/host/core.c (revision ff148d8a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/hdreg.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/list_sort.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/pr.h>
18 #include <linux/ptrace.h>
19 #include <linux/nvme_ioctl.h>
20 #include <linux/t10-pi.h>
21 #include <linux/pm_qos.h>
22 #include <asm/unaligned.h>
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
26 
27 #include "nvme.h"
28 #include "fabrics.h"
29 
30 #define NVME_MINORS		(1U << MINORBITS)
31 
32 unsigned int admin_timeout = 60;
33 module_param(admin_timeout, uint, 0644);
34 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
35 EXPORT_SYMBOL_GPL(admin_timeout);
36 
37 unsigned int nvme_io_timeout = 30;
38 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
39 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
40 EXPORT_SYMBOL_GPL(nvme_io_timeout);
41 
42 static unsigned char shutdown_timeout = 5;
43 module_param(shutdown_timeout, byte, 0644);
44 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
45 
46 static u8 nvme_max_retries = 5;
47 module_param_named(max_retries, nvme_max_retries, byte, 0644);
48 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
49 
50 static unsigned long default_ps_max_latency_us = 100000;
51 module_param(default_ps_max_latency_us, ulong, 0644);
52 MODULE_PARM_DESC(default_ps_max_latency_us,
53 		 "max power saving latency for new devices; use PM QOS to change per device");
54 
55 static bool force_apst;
56 module_param(force_apst, bool, 0644);
57 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
58 
59 static bool streams;
60 module_param(streams, bool, 0644);
61 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
62 
63 /*
64  * nvme_wq - hosts nvme related works that are not reset or delete
65  * nvme_reset_wq - hosts nvme reset works
66  * nvme_delete_wq - hosts nvme delete works
67  *
68  * nvme_wq will host works such are scan, aen handling, fw activation,
69  * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
70  * runs reset works which also flush works hosted on nvme_wq for
71  * serialization purposes. nvme_delete_wq host controller deletion
72  * works which flush reset works for serialization.
73  */
74 struct workqueue_struct *nvme_wq;
75 EXPORT_SYMBOL_GPL(nvme_wq);
76 
77 struct workqueue_struct *nvme_reset_wq;
78 EXPORT_SYMBOL_GPL(nvme_reset_wq);
79 
80 struct workqueue_struct *nvme_delete_wq;
81 EXPORT_SYMBOL_GPL(nvme_delete_wq);
82 
83 static DEFINE_IDA(nvme_subsystems_ida);
84 static LIST_HEAD(nvme_subsystems);
85 static DEFINE_MUTEX(nvme_subsystems_lock);
86 
87 static DEFINE_IDA(nvme_instance_ida);
88 static dev_t nvme_chr_devt;
89 static struct class *nvme_class;
90 static struct class *nvme_subsys_class;
91 
92 static int nvme_revalidate_disk(struct gendisk *disk);
93 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
95 					   unsigned nsid);
96 
97 static void nvme_set_queue_dying(struct nvme_ns *ns)
98 {
99 	/*
100 	 * Revalidating a dead namespace sets capacity to 0. This will end
101 	 * buffered writers dirtying pages that can't be synced.
102 	 */
103 	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
104 		return;
105 	revalidate_disk(ns->disk);
106 	blk_set_queue_dying(ns->queue);
107 	/* Forcibly unquiesce queues to avoid blocking dispatch */
108 	blk_mq_unquiesce_queue(ns->queue);
109 }
110 
111 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
112 {
113 	/*
114 	 * Only new queue scan work when admin and IO queues are both alive
115 	 */
116 	if (ctrl->state == NVME_CTRL_LIVE)
117 		queue_work(nvme_wq, &ctrl->scan_work);
118 }
119 
120 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
121 {
122 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
123 		return -EBUSY;
124 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
125 		return -EBUSY;
126 	return 0;
127 }
128 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
129 
130 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
131 {
132 	int ret;
133 
134 	ret = nvme_reset_ctrl(ctrl);
135 	if (!ret) {
136 		flush_work(&ctrl->reset_work);
137 		if (ctrl->state != NVME_CTRL_LIVE &&
138 		    ctrl->state != NVME_CTRL_ADMIN_ONLY)
139 			ret = -ENETRESET;
140 	}
141 
142 	return ret;
143 }
144 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
145 
146 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
147 {
148 	dev_info(ctrl->device,
149 		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
150 
151 	flush_work(&ctrl->reset_work);
152 	nvme_stop_ctrl(ctrl);
153 	nvme_remove_namespaces(ctrl);
154 	ctrl->ops->delete_ctrl(ctrl);
155 	nvme_uninit_ctrl(ctrl);
156 	nvme_put_ctrl(ctrl);
157 }
158 
159 static void nvme_delete_ctrl_work(struct work_struct *work)
160 {
161 	struct nvme_ctrl *ctrl =
162 		container_of(work, struct nvme_ctrl, delete_work);
163 
164 	nvme_do_delete_ctrl(ctrl);
165 }
166 
167 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
168 {
169 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
170 		return -EBUSY;
171 	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
172 		return -EBUSY;
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
176 
177 static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
178 {
179 	int ret = 0;
180 
181 	/*
182 	 * Keep a reference until nvme_do_delete_ctrl() complete,
183 	 * since ->delete_ctrl can free the controller.
184 	 */
185 	nvme_get_ctrl(ctrl);
186 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
187 		ret = -EBUSY;
188 	if (!ret)
189 		nvme_do_delete_ctrl(ctrl);
190 	nvme_put_ctrl(ctrl);
191 	return ret;
192 }
193 
194 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
195 {
196 	return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
197 }
198 
199 static blk_status_t nvme_error_status(struct request *req)
200 {
201 	switch (nvme_req(req)->status & 0x7ff) {
202 	case NVME_SC_SUCCESS:
203 		return BLK_STS_OK;
204 	case NVME_SC_CAP_EXCEEDED:
205 		return BLK_STS_NOSPC;
206 	case NVME_SC_LBA_RANGE:
207 		return BLK_STS_TARGET;
208 	case NVME_SC_BAD_ATTRIBUTES:
209 	case NVME_SC_ONCS_NOT_SUPPORTED:
210 	case NVME_SC_INVALID_OPCODE:
211 	case NVME_SC_INVALID_FIELD:
212 	case NVME_SC_INVALID_NS:
213 		return BLK_STS_NOTSUPP;
214 	case NVME_SC_WRITE_FAULT:
215 	case NVME_SC_READ_ERROR:
216 	case NVME_SC_UNWRITTEN_BLOCK:
217 	case NVME_SC_ACCESS_DENIED:
218 	case NVME_SC_READ_ONLY:
219 	case NVME_SC_COMPARE_FAILED:
220 		return BLK_STS_MEDIUM;
221 	case NVME_SC_GUARD_CHECK:
222 	case NVME_SC_APPTAG_CHECK:
223 	case NVME_SC_REFTAG_CHECK:
224 	case NVME_SC_INVALID_PI:
225 		return BLK_STS_PROTECTION;
226 	case NVME_SC_RESERVATION_CONFLICT:
227 		return BLK_STS_NEXUS;
228 	default:
229 		return BLK_STS_IOERR;
230 	}
231 }
232 
233 static inline bool nvme_req_needs_retry(struct request *req)
234 {
235 	if (blk_noretry_request(req))
236 		return false;
237 	if (nvme_req(req)->status & NVME_SC_DNR)
238 		return false;
239 	if (nvme_req(req)->retries >= nvme_max_retries)
240 		return false;
241 	return true;
242 }
243 
244 static void nvme_retry_req(struct request *req)
245 {
246 	struct nvme_ns *ns = req->q->queuedata;
247 	unsigned long delay = 0;
248 	u16 crd;
249 
250 	/* The mask and shift result must be <= 3 */
251 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
252 	if (ns && crd)
253 		delay = ns->ctrl->crdt[crd - 1] * 100;
254 
255 	nvme_req(req)->retries++;
256 	blk_mq_requeue_request(req, false);
257 	blk_mq_delay_kick_requeue_list(req->q, delay);
258 }
259 
260 void nvme_complete_rq(struct request *req)
261 {
262 	blk_status_t status = nvme_error_status(req);
263 
264 	trace_nvme_complete_rq(req);
265 
266 	if (nvme_req(req)->ctrl->kas)
267 		nvme_req(req)->ctrl->comp_seen = true;
268 
269 	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
270 		if ((req->cmd_flags & REQ_NVME_MPATH) &&
271 		    blk_path_error(status)) {
272 			nvme_failover_req(req);
273 			return;
274 		}
275 
276 		if (!blk_queue_dying(req->q)) {
277 			nvme_retry_req(req);
278 			return;
279 		}
280 	}
281 	blk_mq_end_request(req, status);
282 }
283 EXPORT_SYMBOL_GPL(nvme_complete_rq);
284 
285 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
286 {
287 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
288 				"Cancelling I/O %d", req->tag);
289 
290 	nvme_req(req)->status = NVME_SC_ABORT_REQ;
291 	blk_mq_complete_request_sync(req);
292 	return true;
293 }
294 EXPORT_SYMBOL_GPL(nvme_cancel_request);
295 
296 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
297 		enum nvme_ctrl_state new_state)
298 {
299 	enum nvme_ctrl_state old_state;
300 	unsigned long flags;
301 	bool changed = false;
302 
303 	spin_lock_irqsave(&ctrl->lock, flags);
304 
305 	old_state = ctrl->state;
306 	switch (new_state) {
307 	case NVME_CTRL_ADMIN_ONLY:
308 		switch (old_state) {
309 		case NVME_CTRL_CONNECTING:
310 			changed = true;
311 			/* FALLTHRU */
312 		default:
313 			break;
314 		}
315 		break;
316 	case NVME_CTRL_LIVE:
317 		switch (old_state) {
318 		case NVME_CTRL_NEW:
319 		case NVME_CTRL_RESETTING:
320 		case NVME_CTRL_CONNECTING:
321 			changed = true;
322 			/* FALLTHRU */
323 		default:
324 			break;
325 		}
326 		break;
327 	case NVME_CTRL_RESETTING:
328 		switch (old_state) {
329 		case NVME_CTRL_NEW:
330 		case NVME_CTRL_LIVE:
331 		case NVME_CTRL_ADMIN_ONLY:
332 			changed = true;
333 			/* FALLTHRU */
334 		default:
335 			break;
336 		}
337 		break;
338 	case NVME_CTRL_CONNECTING:
339 		switch (old_state) {
340 		case NVME_CTRL_NEW:
341 		case NVME_CTRL_RESETTING:
342 			changed = true;
343 			/* FALLTHRU */
344 		default:
345 			break;
346 		}
347 		break;
348 	case NVME_CTRL_DELETING:
349 		switch (old_state) {
350 		case NVME_CTRL_LIVE:
351 		case NVME_CTRL_ADMIN_ONLY:
352 		case NVME_CTRL_RESETTING:
353 		case NVME_CTRL_CONNECTING:
354 			changed = true;
355 			/* FALLTHRU */
356 		default:
357 			break;
358 		}
359 		break;
360 	case NVME_CTRL_DEAD:
361 		switch (old_state) {
362 		case NVME_CTRL_DELETING:
363 			changed = true;
364 			/* FALLTHRU */
365 		default:
366 			break;
367 		}
368 		break;
369 	default:
370 		break;
371 	}
372 
373 	if (changed)
374 		ctrl->state = new_state;
375 
376 	spin_unlock_irqrestore(&ctrl->lock, flags);
377 	if (changed && ctrl->state == NVME_CTRL_LIVE)
378 		nvme_kick_requeue_lists(ctrl);
379 	return changed;
380 }
381 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
382 
383 static void nvme_free_ns_head(struct kref *ref)
384 {
385 	struct nvme_ns_head *head =
386 		container_of(ref, struct nvme_ns_head, ref);
387 
388 	nvme_mpath_remove_disk(head);
389 	ida_simple_remove(&head->subsys->ns_ida, head->instance);
390 	list_del_init(&head->entry);
391 	cleanup_srcu_struct(&head->srcu);
392 	nvme_put_subsystem(head->subsys);
393 	kfree(head);
394 }
395 
396 static void nvme_put_ns_head(struct nvme_ns_head *head)
397 {
398 	kref_put(&head->ref, nvme_free_ns_head);
399 }
400 
401 static void nvme_free_ns(struct kref *kref)
402 {
403 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
404 
405 	if (ns->ndev)
406 		nvme_nvm_unregister(ns);
407 
408 	put_disk(ns->disk);
409 	nvme_put_ns_head(ns->head);
410 	nvme_put_ctrl(ns->ctrl);
411 	kfree(ns);
412 }
413 
414 static void nvme_put_ns(struct nvme_ns *ns)
415 {
416 	kref_put(&ns->kref, nvme_free_ns);
417 }
418 
419 static inline void nvme_clear_nvme_request(struct request *req)
420 {
421 	if (!(req->rq_flags & RQF_DONTPREP)) {
422 		nvme_req(req)->retries = 0;
423 		nvme_req(req)->flags = 0;
424 		req->rq_flags |= RQF_DONTPREP;
425 	}
426 }
427 
428 struct request *nvme_alloc_request(struct request_queue *q,
429 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
430 {
431 	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
432 	struct request *req;
433 
434 	if (qid == NVME_QID_ANY) {
435 		req = blk_mq_alloc_request(q, op, flags);
436 	} else {
437 		req = blk_mq_alloc_request_hctx(q, op, flags,
438 				qid ? qid - 1 : 0);
439 	}
440 	if (IS_ERR(req))
441 		return req;
442 
443 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
444 	nvme_clear_nvme_request(req);
445 	nvme_req(req)->cmd = cmd;
446 
447 	return req;
448 }
449 EXPORT_SYMBOL_GPL(nvme_alloc_request);
450 
451 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
452 {
453 	struct nvme_command c;
454 
455 	memset(&c, 0, sizeof(c));
456 
457 	c.directive.opcode = nvme_admin_directive_send;
458 	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
459 	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
460 	c.directive.dtype = NVME_DIR_IDENTIFY;
461 	c.directive.tdtype = NVME_DIR_STREAMS;
462 	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
463 
464 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
465 }
466 
467 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
468 {
469 	return nvme_toggle_streams(ctrl, false);
470 }
471 
472 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
473 {
474 	return nvme_toggle_streams(ctrl, true);
475 }
476 
477 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
478 				  struct streams_directive_params *s, u32 nsid)
479 {
480 	struct nvme_command c;
481 
482 	memset(&c, 0, sizeof(c));
483 	memset(s, 0, sizeof(*s));
484 
485 	c.directive.opcode = nvme_admin_directive_recv;
486 	c.directive.nsid = cpu_to_le32(nsid);
487 	c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
488 	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
489 	c.directive.dtype = NVME_DIR_STREAMS;
490 
491 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
492 }
493 
494 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
495 {
496 	struct streams_directive_params s;
497 	int ret;
498 
499 	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
500 		return 0;
501 	if (!streams)
502 		return 0;
503 
504 	ret = nvme_enable_streams(ctrl);
505 	if (ret)
506 		return ret;
507 
508 	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
509 	if (ret)
510 		return ret;
511 
512 	ctrl->nssa = le16_to_cpu(s.nssa);
513 	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
514 		dev_info(ctrl->device, "too few streams (%u) available\n",
515 					ctrl->nssa);
516 		nvme_disable_streams(ctrl);
517 		return 0;
518 	}
519 
520 	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
521 	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
522 	return 0;
523 }
524 
525 /*
526  * Check if 'req' has a write hint associated with it. If it does, assign
527  * a valid namespace stream to the write.
528  */
529 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
530 				     struct request *req, u16 *control,
531 				     u32 *dsmgmt)
532 {
533 	enum rw_hint streamid = req->write_hint;
534 
535 	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
536 		streamid = 0;
537 	else {
538 		streamid--;
539 		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
540 			return;
541 
542 		*control |= NVME_RW_DTYPE_STREAMS;
543 		*dsmgmt |= streamid << 16;
544 	}
545 
546 	if (streamid < ARRAY_SIZE(req->q->write_hints))
547 		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
548 }
549 
550 static inline void nvme_setup_flush(struct nvme_ns *ns,
551 		struct nvme_command *cmnd)
552 {
553 	cmnd->common.opcode = nvme_cmd_flush;
554 	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
555 }
556 
557 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
558 		struct nvme_command *cmnd)
559 {
560 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
561 	struct nvme_dsm_range *range;
562 	struct bio *bio;
563 
564 	range = kmalloc_array(segments, sizeof(*range),
565 				GFP_ATOMIC | __GFP_NOWARN);
566 	if (!range) {
567 		/*
568 		 * If we fail allocation our range, fallback to the controller
569 		 * discard page. If that's also busy, it's safe to return
570 		 * busy, as we know we can make progress once that's freed.
571 		 */
572 		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
573 			return BLK_STS_RESOURCE;
574 
575 		range = page_address(ns->ctrl->discard_page);
576 	}
577 
578 	__rq_for_each_bio(bio, req) {
579 		u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
580 		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
581 
582 		if (n < segments) {
583 			range[n].cattr = cpu_to_le32(0);
584 			range[n].nlb = cpu_to_le32(nlb);
585 			range[n].slba = cpu_to_le64(slba);
586 		}
587 		n++;
588 	}
589 
590 	if (WARN_ON_ONCE(n != segments)) {
591 		if (virt_to_page(range) == ns->ctrl->discard_page)
592 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
593 		else
594 			kfree(range);
595 		return BLK_STS_IOERR;
596 	}
597 
598 	cmnd->dsm.opcode = nvme_cmd_dsm;
599 	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
600 	cmnd->dsm.nr = cpu_to_le32(segments - 1);
601 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
602 
603 	req->special_vec.bv_page = virt_to_page(range);
604 	req->special_vec.bv_offset = offset_in_page(range);
605 	req->special_vec.bv_len = sizeof(*range) * segments;
606 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
607 
608 	return BLK_STS_OK;
609 }
610 
611 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
612 		struct request *req, struct nvme_command *cmnd)
613 {
614 	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
615 		return nvme_setup_discard(ns, req, cmnd);
616 
617 	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
618 	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
619 	cmnd->write_zeroes.slba =
620 		cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
621 	cmnd->write_zeroes.length =
622 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
623 	cmnd->write_zeroes.control = 0;
624 	return BLK_STS_OK;
625 }
626 
627 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
628 		struct request *req, struct nvme_command *cmnd)
629 {
630 	struct nvme_ctrl *ctrl = ns->ctrl;
631 	u16 control = 0;
632 	u32 dsmgmt = 0;
633 
634 	if (req->cmd_flags & REQ_FUA)
635 		control |= NVME_RW_FUA;
636 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
637 		control |= NVME_RW_LR;
638 
639 	if (req->cmd_flags & REQ_RAHEAD)
640 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
641 
642 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
643 	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
644 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
645 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
646 
647 	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
648 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
649 
650 	if (ns->ms) {
651 		/*
652 		 * If formated with metadata, the block layer always provides a
653 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
654 		 * we enable the PRACT bit for protection information or set the
655 		 * namespace capacity to zero to prevent any I/O.
656 		 */
657 		if (!blk_integrity_rq(req)) {
658 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
659 				return BLK_STS_NOTSUPP;
660 			control |= NVME_RW_PRINFO_PRACT;
661 		} else if (req_op(req) == REQ_OP_WRITE) {
662 			t10_pi_prepare(req, ns->pi_type);
663 		}
664 
665 		switch (ns->pi_type) {
666 		case NVME_NS_DPS_PI_TYPE3:
667 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
668 			break;
669 		case NVME_NS_DPS_PI_TYPE1:
670 		case NVME_NS_DPS_PI_TYPE2:
671 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
672 					NVME_RW_PRINFO_PRCHK_REF;
673 			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
674 			break;
675 		}
676 	}
677 
678 	cmnd->rw.control = cpu_to_le16(control);
679 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
680 	return 0;
681 }
682 
683 void nvme_cleanup_cmd(struct request *req)
684 {
685 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
686 	    nvme_req(req)->status == 0) {
687 		struct nvme_ns *ns = req->rq_disk->private_data;
688 
689 		t10_pi_complete(req, ns->pi_type,
690 				blk_rq_bytes(req) >> ns->lba_shift);
691 	}
692 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
693 		struct nvme_ns *ns = req->rq_disk->private_data;
694 		struct page *page = req->special_vec.bv_page;
695 
696 		if (page == ns->ctrl->discard_page)
697 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
698 		else
699 			kfree(page_address(page) + req->special_vec.bv_offset);
700 	}
701 }
702 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
703 
704 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
705 		struct nvme_command *cmd)
706 {
707 	blk_status_t ret = BLK_STS_OK;
708 
709 	nvme_clear_nvme_request(req);
710 
711 	memset(cmd, 0, sizeof(*cmd));
712 	switch (req_op(req)) {
713 	case REQ_OP_DRV_IN:
714 	case REQ_OP_DRV_OUT:
715 		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
716 		break;
717 	case REQ_OP_FLUSH:
718 		nvme_setup_flush(ns, cmd);
719 		break;
720 	case REQ_OP_WRITE_ZEROES:
721 		ret = nvme_setup_write_zeroes(ns, req, cmd);
722 		break;
723 	case REQ_OP_DISCARD:
724 		ret = nvme_setup_discard(ns, req, cmd);
725 		break;
726 	case REQ_OP_READ:
727 	case REQ_OP_WRITE:
728 		ret = nvme_setup_rw(ns, req, cmd);
729 		break;
730 	default:
731 		WARN_ON_ONCE(1);
732 		return BLK_STS_IOERR;
733 	}
734 
735 	cmd->common.command_id = req->tag;
736 	trace_nvme_setup_cmd(req, cmd);
737 	return ret;
738 }
739 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
740 
741 static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
742 {
743 	struct completion *waiting = rq->end_io_data;
744 
745 	rq->end_io_data = NULL;
746 	complete(waiting);
747 }
748 
749 static void nvme_execute_rq_polled(struct request_queue *q,
750 		struct gendisk *bd_disk, struct request *rq, int at_head)
751 {
752 	DECLARE_COMPLETION_ONSTACK(wait);
753 
754 	WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
755 
756 	rq->cmd_flags |= REQ_HIPRI;
757 	rq->end_io_data = &wait;
758 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
759 
760 	while (!completion_done(&wait)) {
761 		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
762 		cond_resched();
763 	}
764 }
765 
766 /*
767  * Returns 0 on success.  If the result is negative, it's a Linux error code;
768  * if the result is positive, it's an NVM Express status code
769  */
770 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
771 		union nvme_result *result, void *buffer, unsigned bufflen,
772 		unsigned timeout, int qid, int at_head,
773 		blk_mq_req_flags_t flags, bool poll)
774 {
775 	struct request *req;
776 	int ret;
777 
778 	req = nvme_alloc_request(q, cmd, flags, qid);
779 	if (IS_ERR(req))
780 		return PTR_ERR(req);
781 
782 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
783 
784 	if (buffer && bufflen) {
785 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
786 		if (ret)
787 			goto out;
788 	}
789 
790 	if (poll)
791 		nvme_execute_rq_polled(req->q, NULL, req, at_head);
792 	else
793 		blk_execute_rq(req->q, NULL, req, at_head);
794 	if (result)
795 		*result = nvme_req(req)->result;
796 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
797 		ret = -EINTR;
798 	else
799 		ret = nvme_req(req)->status;
800  out:
801 	blk_mq_free_request(req);
802 	return ret;
803 }
804 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
805 
806 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
807 		void *buffer, unsigned bufflen)
808 {
809 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
810 			NVME_QID_ANY, 0, 0, false);
811 }
812 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
813 
814 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
815 		unsigned len, u32 seed, bool write)
816 {
817 	struct bio_integrity_payload *bip;
818 	int ret = -ENOMEM;
819 	void *buf;
820 
821 	buf = kmalloc(len, GFP_KERNEL);
822 	if (!buf)
823 		goto out;
824 
825 	ret = -EFAULT;
826 	if (write && copy_from_user(buf, ubuf, len))
827 		goto out_free_meta;
828 
829 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
830 	if (IS_ERR(bip)) {
831 		ret = PTR_ERR(bip);
832 		goto out_free_meta;
833 	}
834 
835 	bip->bip_iter.bi_size = len;
836 	bip->bip_iter.bi_sector = seed;
837 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
838 			offset_in_page(buf));
839 	if (ret == len)
840 		return buf;
841 	ret = -ENOMEM;
842 out_free_meta:
843 	kfree(buf);
844 out:
845 	return ERR_PTR(ret);
846 }
847 
848 static int nvme_submit_user_cmd(struct request_queue *q,
849 		struct nvme_command *cmd, void __user *ubuffer,
850 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
851 		u32 meta_seed, u32 *result, unsigned timeout)
852 {
853 	bool write = nvme_is_write(cmd);
854 	struct nvme_ns *ns = q->queuedata;
855 	struct gendisk *disk = ns ? ns->disk : NULL;
856 	struct request *req;
857 	struct bio *bio = NULL;
858 	void *meta = NULL;
859 	int ret;
860 
861 	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
862 	if (IS_ERR(req))
863 		return PTR_ERR(req);
864 
865 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
866 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
867 
868 	if (ubuffer && bufflen) {
869 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
870 				GFP_KERNEL);
871 		if (ret)
872 			goto out;
873 		bio = req->bio;
874 		bio->bi_disk = disk;
875 		if (disk && meta_buffer && meta_len) {
876 			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
877 					meta_seed, write);
878 			if (IS_ERR(meta)) {
879 				ret = PTR_ERR(meta);
880 				goto out_unmap;
881 			}
882 			req->cmd_flags |= REQ_INTEGRITY;
883 		}
884 	}
885 
886 	blk_execute_rq(req->q, disk, req, 0);
887 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
888 		ret = -EINTR;
889 	else
890 		ret = nvme_req(req)->status;
891 	if (result)
892 		*result = le32_to_cpu(nvme_req(req)->result.u32);
893 	if (meta && !ret && !write) {
894 		if (copy_to_user(meta_buffer, meta, meta_len))
895 			ret = -EFAULT;
896 	}
897 	kfree(meta);
898  out_unmap:
899 	if (bio)
900 		blk_rq_unmap_user(bio);
901  out:
902 	blk_mq_free_request(req);
903 	return ret;
904 }
905 
906 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
907 {
908 	struct nvme_ctrl *ctrl = rq->end_io_data;
909 	unsigned long flags;
910 	bool startka = false;
911 
912 	blk_mq_free_request(rq);
913 
914 	if (status) {
915 		dev_err(ctrl->device,
916 			"failed nvme_keep_alive_end_io error=%d\n",
917 				status);
918 		return;
919 	}
920 
921 	ctrl->comp_seen = false;
922 	spin_lock_irqsave(&ctrl->lock, flags);
923 	if (ctrl->state == NVME_CTRL_LIVE ||
924 	    ctrl->state == NVME_CTRL_CONNECTING)
925 		startka = true;
926 	spin_unlock_irqrestore(&ctrl->lock, flags);
927 	if (startka)
928 		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
929 }
930 
931 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
932 {
933 	struct request *rq;
934 
935 	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
936 			NVME_QID_ANY);
937 	if (IS_ERR(rq))
938 		return PTR_ERR(rq);
939 
940 	rq->timeout = ctrl->kato * HZ;
941 	rq->end_io_data = ctrl;
942 
943 	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
944 
945 	return 0;
946 }
947 
948 static void nvme_keep_alive_work(struct work_struct *work)
949 {
950 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
951 			struct nvme_ctrl, ka_work);
952 	bool comp_seen = ctrl->comp_seen;
953 
954 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
955 		dev_dbg(ctrl->device,
956 			"reschedule traffic based keep-alive timer\n");
957 		ctrl->comp_seen = false;
958 		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
959 		return;
960 	}
961 
962 	if (nvme_keep_alive(ctrl)) {
963 		/* allocation failure, reset the controller */
964 		dev_err(ctrl->device, "keep-alive failed\n");
965 		nvme_reset_ctrl(ctrl);
966 		return;
967 	}
968 }
969 
970 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
971 {
972 	if (unlikely(ctrl->kato == 0))
973 		return;
974 
975 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
976 }
977 
978 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
979 {
980 	if (unlikely(ctrl->kato == 0))
981 		return;
982 
983 	cancel_delayed_work_sync(&ctrl->ka_work);
984 }
985 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
986 
987 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
988 {
989 	struct nvme_command c = { };
990 	int error;
991 
992 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
993 	c.identify.opcode = nvme_admin_identify;
994 	c.identify.cns = NVME_ID_CNS_CTRL;
995 
996 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
997 	if (!*id)
998 		return -ENOMEM;
999 
1000 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1001 			sizeof(struct nvme_id_ctrl));
1002 	if (error)
1003 		kfree(*id);
1004 	return error;
1005 }
1006 
1007 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1008 		struct nvme_ns_ids *ids)
1009 {
1010 	struct nvme_command c = { };
1011 	int status;
1012 	void *data;
1013 	int pos;
1014 	int len;
1015 
1016 	c.identify.opcode = nvme_admin_identify;
1017 	c.identify.nsid = cpu_to_le32(nsid);
1018 	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1019 
1020 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1021 	if (!data)
1022 		return -ENOMEM;
1023 
1024 	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1025 				      NVME_IDENTIFY_DATA_SIZE);
1026 	if (status)
1027 		goto free_data;
1028 
1029 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1030 		struct nvme_ns_id_desc *cur = data + pos;
1031 
1032 		if (cur->nidl == 0)
1033 			break;
1034 
1035 		switch (cur->nidt) {
1036 		case NVME_NIDT_EUI64:
1037 			if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1038 				dev_warn(ctrl->device,
1039 					 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
1040 					 cur->nidl);
1041 				goto free_data;
1042 			}
1043 			len = NVME_NIDT_EUI64_LEN;
1044 			memcpy(ids->eui64, data + pos + sizeof(*cur), len);
1045 			break;
1046 		case NVME_NIDT_NGUID:
1047 			if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1048 				dev_warn(ctrl->device,
1049 					 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
1050 					 cur->nidl);
1051 				goto free_data;
1052 			}
1053 			len = NVME_NIDT_NGUID_LEN;
1054 			memcpy(ids->nguid, data + pos + sizeof(*cur), len);
1055 			break;
1056 		case NVME_NIDT_UUID:
1057 			if (cur->nidl != NVME_NIDT_UUID_LEN) {
1058 				dev_warn(ctrl->device,
1059 					 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
1060 					 cur->nidl);
1061 				goto free_data;
1062 			}
1063 			len = NVME_NIDT_UUID_LEN;
1064 			uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
1065 			break;
1066 		default:
1067 			/* Skip unknown types */
1068 			len = cur->nidl;
1069 			break;
1070 		}
1071 
1072 		len += sizeof(*cur);
1073 	}
1074 free_data:
1075 	kfree(data);
1076 	return status;
1077 }
1078 
1079 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1080 {
1081 	struct nvme_command c = { };
1082 
1083 	c.identify.opcode = nvme_admin_identify;
1084 	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1085 	c.identify.nsid = cpu_to_le32(nsid);
1086 	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1087 				    NVME_IDENTIFY_DATA_SIZE);
1088 }
1089 
1090 static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
1091 		unsigned nsid)
1092 {
1093 	struct nvme_id_ns *id;
1094 	struct nvme_command c = { };
1095 	int error;
1096 
1097 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1098 	c.identify.opcode = nvme_admin_identify;
1099 	c.identify.nsid = cpu_to_le32(nsid);
1100 	c.identify.cns = NVME_ID_CNS_NS;
1101 
1102 	id = kmalloc(sizeof(*id), GFP_KERNEL);
1103 	if (!id)
1104 		return NULL;
1105 
1106 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1107 	if (error) {
1108 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1109 		kfree(id);
1110 		return NULL;
1111 	}
1112 
1113 	return id;
1114 }
1115 
1116 static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
1117 		      void *buffer, size_t buflen, u32 *result)
1118 {
1119 	struct nvme_command c;
1120 	union nvme_result res;
1121 	int ret;
1122 
1123 	memset(&c, 0, sizeof(c));
1124 	c.features.opcode = nvme_admin_set_features;
1125 	c.features.fid = cpu_to_le32(fid);
1126 	c.features.dword11 = cpu_to_le32(dword11);
1127 
1128 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1129 			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1130 	if (ret >= 0 && result)
1131 		*result = le32_to_cpu(res.u32);
1132 	return ret;
1133 }
1134 
1135 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1136 {
1137 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
1138 	u32 result;
1139 	int status, nr_io_queues;
1140 
1141 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1142 			&result);
1143 	if (status < 0)
1144 		return status;
1145 
1146 	/*
1147 	 * Degraded controllers might return an error when setting the queue
1148 	 * count.  We still want to be able to bring them online and offer
1149 	 * access to the admin queue, as that might be only way to fix them up.
1150 	 */
1151 	if (status > 0) {
1152 		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1153 		*count = 0;
1154 	} else {
1155 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1156 		*count = min(*count, nr_io_queues);
1157 	}
1158 
1159 	return 0;
1160 }
1161 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1162 
1163 #define NVME_AEN_SUPPORTED \
1164 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
1165 
1166 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1167 {
1168 	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1169 	int status;
1170 
1171 	if (!supported_aens)
1172 		return;
1173 
1174 	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1175 			NULL, 0, &result);
1176 	if (status)
1177 		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1178 			 supported_aens);
1179 }
1180 
1181 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1182 {
1183 	struct nvme_user_io io;
1184 	struct nvme_command c;
1185 	unsigned length, meta_len;
1186 	void __user *metadata;
1187 
1188 	if (copy_from_user(&io, uio, sizeof(io)))
1189 		return -EFAULT;
1190 	if (io.flags)
1191 		return -EINVAL;
1192 
1193 	switch (io.opcode) {
1194 	case nvme_cmd_write:
1195 	case nvme_cmd_read:
1196 	case nvme_cmd_compare:
1197 		break;
1198 	default:
1199 		return -EINVAL;
1200 	}
1201 
1202 	length = (io.nblocks + 1) << ns->lba_shift;
1203 	meta_len = (io.nblocks + 1) * ns->ms;
1204 	metadata = (void __user *)(uintptr_t)io.metadata;
1205 
1206 	if (ns->ext) {
1207 		length += meta_len;
1208 		meta_len = 0;
1209 	} else if (meta_len) {
1210 		if ((io.metadata & 3) || !io.metadata)
1211 			return -EINVAL;
1212 	}
1213 
1214 	memset(&c, 0, sizeof(c));
1215 	c.rw.opcode = io.opcode;
1216 	c.rw.flags = io.flags;
1217 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1218 	c.rw.slba = cpu_to_le64(io.slba);
1219 	c.rw.length = cpu_to_le16(io.nblocks);
1220 	c.rw.control = cpu_to_le16(io.control);
1221 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1222 	c.rw.reftag = cpu_to_le32(io.reftag);
1223 	c.rw.apptag = cpu_to_le16(io.apptag);
1224 	c.rw.appmask = cpu_to_le16(io.appmask);
1225 
1226 	return nvme_submit_user_cmd(ns->queue, &c,
1227 			(void __user *)(uintptr_t)io.addr, length,
1228 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1229 }
1230 
1231 static u32 nvme_known_admin_effects(u8 opcode)
1232 {
1233 	switch (opcode) {
1234 	case nvme_admin_format_nvm:
1235 		return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1236 					NVME_CMD_EFFECTS_CSE_MASK;
1237 	case nvme_admin_sanitize_nvm:
1238 		return NVME_CMD_EFFECTS_CSE_MASK;
1239 	default:
1240 		break;
1241 	}
1242 	return 0;
1243 }
1244 
1245 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1246 								u8 opcode)
1247 {
1248 	u32 effects = 0;
1249 
1250 	if (ns) {
1251 		if (ctrl->effects)
1252 			effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1253 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1254 			dev_warn(ctrl->device,
1255 				 "IO command:%02x has unhandled effects:%08x\n",
1256 				 opcode, effects);
1257 		return 0;
1258 	}
1259 
1260 	effects |= nvme_known_admin_effects(opcode);
1261 	if (ctrl->effects)
1262 		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1263 
1264 	/*
1265 	 * For simplicity, IO to all namespaces is quiesced even if the command
1266 	 * effects say only one namespace is affected.
1267 	 */
1268 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1269 		mutex_lock(&ctrl->scan_lock);
1270 		nvme_start_freeze(ctrl);
1271 		nvme_wait_freeze(ctrl);
1272 	}
1273 	return effects;
1274 }
1275 
1276 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1277 {
1278 	struct nvme_ns *ns;
1279 
1280 	down_read(&ctrl->namespaces_rwsem);
1281 	list_for_each_entry(ns, &ctrl->namespaces, list)
1282 		if (ns->disk && nvme_revalidate_disk(ns->disk))
1283 			nvme_set_queue_dying(ns);
1284 	up_read(&ctrl->namespaces_rwsem);
1285 
1286 	nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1287 }
1288 
1289 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1290 {
1291 	/*
1292 	 * Revalidate LBA changes prior to unfreezing. This is necessary to
1293 	 * prevent memory corruption if a logical block size was changed by
1294 	 * this command.
1295 	 */
1296 	if (effects & NVME_CMD_EFFECTS_LBCC)
1297 		nvme_update_formats(ctrl);
1298 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1299 		nvme_unfreeze(ctrl);
1300 		mutex_unlock(&ctrl->scan_lock);
1301 	}
1302 	if (effects & NVME_CMD_EFFECTS_CCC)
1303 		nvme_init_identify(ctrl);
1304 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
1305 		nvme_queue_scan(ctrl);
1306 }
1307 
1308 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1309 			struct nvme_passthru_cmd __user *ucmd)
1310 {
1311 	struct nvme_passthru_cmd cmd;
1312 	struct nvme_command c;
1313 	unsigned timeout = 0;
1314 	u32 effects;
1315 	int status;
1316 
1317 	if (!capable(CAP_SYS_ADMIN))
1318 		return -EACCES;
1319 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1320 		return -EFAULT;
1321 	if (cmd.flags)
1322 		return -EINVAL;
1323 
1324 	memset(&c, 0, sizeof(c));
1325 	c.common.opcode = cmd.opcode;
1326 	c.common.flags = cmd.flags;
1327 	c.common.nsid = cpu_to_le32(cmd.nsid);
1328 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1329 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1330 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1331 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1332 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1333 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1334 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1335 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1336 
1337 	if (cmd.timeout_ms)
1338 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1339 
1340 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1341 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1342 			(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1343 			(void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
1344 			0, &cmd.result, timeout);
1345 	nvme_passthru_end(ctrl, effects);
1346 
1347 	if (status >= 0) {
1348 		if (put_user(cmd.result, &ucmd->result))
1349 			return -EFAULT;
1350 	}
1351 
1352 	return status;
1353 }
1354 
1355 /*
1356  * Issue ioctl requests on the first available path.  Note that unlike normal
1357  * block layer requests we will not retry failed request on another controller.
1358  */
1359 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1360 		struct nvme_ns_head **head, int *srcu_idx)
1361 {
1362 #ifdef CONFIG_NVME_MULTIPATH
1363 	if (disk->fops == &nvme_ns_head_ops) {
1364 		*head = disk->private_data;
1365 		*srcu_idx = srcu_read_lock(&(*head)->srcu);
1366 		return nvme_find_path(*head);
1367 	}
1368 #endif
1369 	*head = NULL;
1370 	*srcu_idx = -1;
1371 	return disk->private_data;
1372 }
1373 
1374 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1375 {
1376 	if (head)
1377 		srcu_read_unlock(&head->srcu, idx);
1378 }
1379 
1380 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg)
1381 {
1382 	switch (cmd) {
1383 	case NVME_IOCTL_ID:
1384 		force_successful_syscall_return();
1385 		return ns->head->ns_id;
1386 	case NVME_IOCTL_ADMIN_CMD:
1387 		return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
1388 	case NVME_IOCTL_IO_CMD:
1389 		return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
1390 	case NVME_IOCTL_SUBMIT_IO:
1391 		return nvme_submit_io(ns, (void __user *)arg);
1392 	default:
1393 #ifdef CONFIG_NVM
1394 		if (ns->ndev)
1395 			return nvme_nvm_ioctl(ns, cmd, arg);
1396 #endif
1397 		if (is_sed_ioctl(cmd))
1398 			return sed_ioctl(ns->ctrl->opal_dev, cmd,
1399 					 (void __user *) arg);
1400 		return -ENOTTY;
1401 	}
1402 }
1403 
1404 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1405 		unsigned int cmd, unsigned long arg)
1406 {
1407 	struct nvme_ns_head *head = NULL;
1408 	struct nvme_ns *ns;
1409 	int srcu_idx, ret;
1410 
1411 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1412 	if (unlikely(!ns))
1413 		ret = -EWOULDBLOCK;
1414 	else
1415 		ret = nvme_ns_ioctl(ns, cmd, arg);
1416 	nvme_put_ns_from_disk(head, srcu_idx);
1417 	return ret;
1418 }
1419 
1420 static int nvme_open(struct block_device *bdev, fmode_t mode)
1421 {
1422 	struct nvme_ns *ns = bdev->bd_disk->private_data;
1423 
1424 #ifdef CONFIG_NVME_MULTIPATH
1425 	/* should never be called due to GENHD_FL_HIDDEN */
1426 	if (WARN_ON_ONCE(ns->head->disk))
1427 		goto fail;
1428 #endif
1429 	if (!kref_get_unless_zero(&ns->kref))
1430 		goto fail;
1431 	if (!try_module_get(ns->ctrl->ops->module))
1432 		goto fail_put_ns;
1433 
1434 	return 0;
1435 
1436 fail_put_ns:
1437 	nvme_put_ns(ns);
1438 fail:
1439 	return -ENXIO;
1440 }
1441 
1442 static void nvme_release(struct gendisk *disk, fmode_t mode)
1443 {
1444 	struct nvme_ns *ns = disk->private_data;
1445 
1446 	module_put(ns->ctrl->ops->module);
1447 	nvme_put_ns(ns);
1448 }
1449 
1450 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1451 {
1452 	/* some standard values */
1453 	geo->heads = 1 << 6;
1454 	geo->sectors = 1 << 5;
1455 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1456 	return 0;
1457 }
1458 
1459 #ifdef CONFIG_BLK_DEV_INTEGRITY
1460 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1461 {
1462 	struct blk_integrity integrity;
1463 
1464 	memset(&integrity, 0, sizeof(integrity));
1465 	switch (pi_type) {
1466 	case NVME_NS_DPS_PI_TYPE3:
1467 		integrity.profile = &t10_pi_type3_crc;
1468 		integrity.tag_size = sizeof(u16) + sizeof(u32);
1469 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1470 		break;
1471 	case NVME_NS_DPS_PI_TYPE1:
1472 	case NVME_NS_DPS_PI_TYPE2:
1473 		integrity.profile = &t10_pi_type1_crc;
1474 		integrity.tag_size = sizeof(u16);
1475 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1476 		break;
1477 	default:
1478 		integrity.profile = NULL;
1479 		break;
1480 	}
1481 	integrity.tuple_size = ms;
1482 	blk_integrity_register(disk, &integrity);
1483 	blk_queue_max_integrity_segments(disk->queue, 1);
1484 }
1485 #else
1486 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1487 {
1488 }
1489 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1490 
1491 static void nvme_set_chunk_size(struct nvme_ns *ns)
1492 {
1493 	u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
1494 	blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
1495 }
1496 
1497 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1498 {
1499 	struct nvme_ctrl *ctrl = ns->ctrl;
1500 	struct request_queue *queue = disk->queue;
1501 	u32 size = queue_logical_block_size(queue);
1502 
1503 	if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1504 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1505 		return;
1506 	}
1507 
1508 	if (ctrl->nr_streams && ns->sws && ns->sgs)
1509 		size *= ns->sws * ns->sgs;
1510 
1511 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1512 			NVME_DSM_MAX_RANGES);
1513 
1514 	queue->limits.discard_alignment = 0;
1515 	queue->limits.discard_granularity = size;
1516 
1517 	/* If discard is already enabled, don't reset queue limits */
1518 	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1519 		return;
1520 
1521 	blk_queue_max_discard_sectors(queue, UINT_MAX);
1522 	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1523 
1524 	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1525 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1526 }
1527 
1528 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1529 {
1530 	u32 max_sectors;
1531 	unsigned short bs = 1 << ns->lba_shift;
1532 
1533 	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1534 	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1535 		return;
1536 	/*
1537 	 * Even though NVMe spec explicitly states that MDTS is not
1538 	 * applicable to the write-zeroes:- "The restriction does not apply to
1539 	 * commands that do not transfer data between the host and the
1540 	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1541 	 * In order to be more cautious use controller's max_hw_sectors value
1542 	 * to configure the maximum sectors for the write-zeroes which is
1543 	 * configured based on the controller's MDTS field in the
1544 	 * nvme_init_identify() if available.
1545 	 */
1546 	if (ns->ctrl->max_hw_sectors == UINT_MAX)
1547 		max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
1548 	else
1549 		max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
1550 
1551 	blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
1552 }
1553 
1554 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1555 		struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1556 {
1557 	memset(ids, 0, sizeof(*ids));
1558 
1559 	if (ctrl->vs >= NVME_VS(1, 1, 0))
1560 		memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1561 	if (ctrl->vs >= NVME_VS(1, 2, 0))
1562 		memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1563 	if (ctrl->vs >= NVME_VS(1, 3, 0)) {
1564 		 /* Don't treat error as fatal we potentially
1565 		  * already have a NGUID or EUI-64
1566 		  */
1567 		if (nvme_identify_ns_descs(ctrl, nsid, ids))
1568 			dev_warn(ctrl->device,
1569 				 "%s: Identify Descriptors failed\n", __func__);
1570 	}
1571 }
1572 
1573 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1574 {
1575 	return !uuid_is_null(&ids->uuid) ||
1576 		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1577 		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1578 }
1579 
1580 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1581 {
1582 	return uuid_equal(&a->uuid, &b->uuid) &&
1583 		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1584 		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1585 }
1586 
1587 static void nvme_update_disk_info(struct gendisk *disk,
1588 		struct nvme_ns *ns, struct nvme_id_ns *id)
1589 {
1590 	sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
1591 	unsigned short bs = 1 << ns->lba_shift;
1592 
1593 	if (ns->lba_shift > PAGE_SHIFT) {
1594 		/* unsupported block size, set capacity to 0 later */
1595 		bs = (1 << 9);
1596 	}
1597 	blk_mq_freeze_queue(disk->queue);
1598 	blk_integrity_unregister(disk);
1599 
1600 	blk_queue_logical_block_size(disk->queue, bs);
1601 	blk_queue_physical_block_size(disk->queue, bs);
1602 	blk_queue_io_min(disk->queue, bs);
1603 
1604 	if (ns->ms && !ns->ext &&
1605 	    (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1606 		nvme_init_integrity(disk, ns->ms, ns->pi_type);
1607 	if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
1608 	    ns->lba_shift > PAGE_SHIFT)
1609 		capacity = 0;
1610 
1611 	set_capacity(disk, capacity);
1612 
1613 	nvme_config_discard(disk, ns);
1614 	nvme_config_write_zeroes(disk, ns);
1615 
1616 	if (id->nsattr & (1 << 0))
1617 		set_disk_ro(disk, true);
1618 	else
1619 		set_disk_ro(disk, false);
1620 
1621 	blk_mq_unfreeze_queue(disk->queue);
1622 }
1623 
1624 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1625 {
1626 	struct nvme_ns *ns = disk->private_data;
1627 
1628 	/*
1629 	 * If identify namespace failed, use default 512 byte block size so
1630 	 * block layer can use before failing read/write for 0 capacity.
1631 	 */
1632 	ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1633 	if (ns->lba_shift == 0)
1634 		ns->lba_shift = 9;
1635 	ns->noiob = le16_to_cpu(id->noiob);
1636 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1637 	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1638 	/* the PI implementation requires metadata equal t10 pi tuple size */
1639 	if (ns->ms == sizeof(struct t10_pi_tuple))
1640 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1641 	else
1642 		ns->pi_type = 0;
1643 
1644 	if (ns->noiob)
1645 		nvme_set_chunk_size(ns);
1646 	nvme_update_disk_info(disk, ns, id);
1647 #ifdef CONFIG_NVME_MULTIPATH
1648 	if (ns->head->disk) {
1649 		nvme_update_disk_info(ns->head->disk, ns, id);
1650 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1651 	}
1652 #endif
1653 }
1654 
1655 static int nvme_revalidate_disk(struct gendisk *disk)
1656 {
1657 	struct nvme_ns *ns = disk->private_data;
1658 	struct nvme_ctrl *ctrl = ns->ctrl;
1659 	struct nvme_id_ns *id;
1660 	struct nvme_ns_ids ids;
1661 	int ret = 0;
1662 
1663 	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1664 		set_capacity(disk, 0);
1665 		return -ENODEV;
1666 	}
1667 
1668 	id = nvme_identify_ns(ctrl, ns->head->ns_id);
1669 	if (!id)
1670 		return -ENODEV;
1671 
1672 	if (id->ncap == 0) {
1673 		ret = -ENODEV;
1674 		goto out;
1675 	}
1676 
1677 	__nvme_revalidate_disk(disk, id);
1678 	nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
1679 	if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
1680 		dev_err(ctrl->device,
1681 			"identifiers changed for nsid %d\n", ns->head->ns_id);
1682 		ret = -ENODEV;
1683 	}
1684 
1685 out:
1686 	kfree(id);
1687 	return ret;
1688 }
1689 
1690 static char nvme_pr_type(enum pr_type type)
1691 {
1692 	switch (type) {
1693 	case PR_WRITE_EXCLUSIVE:
1694 		return 1;
1695 	case PR_EXCLUSIVE_ACCESS:
1696 		return 2;
1697 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
1698 		return 3;
1699 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1700 		return 4;
1701 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
1702 		return 5;
1703 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1704 		return 6;
1705 	default:
1706 		return 0;
1707 	}
1708 };
1709 
1710 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
1711 				u64 key, u64 sa_key, u8 op)
1712 {
1713 	struct nvme_ns_head *head = NULL;
1714 	struct nvme_ns *ns;
1715 	struct nvme_command c;
1716 	int srcu_idx, ret;
1717 	u8 data[16] = { 0, };
1718 
1719 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1720 	if (unlikely(!ns))
1721 		return -EWOULDBLOCK;
1722 
1723 	put_unaligned_le64(key, &data[0]);
1724 	put_unaligned_le64(sa_key, &data[8]);
1725 
1726 	memset(&c, 0, sizeof(c));
1727 	c.common.opcode = op;
1728 	c.common.nsid = cpu_to_le32(ns->head->ns_id);
1729 	c.common.cdw10 = cpu_to_le32(cdw10);
1730 
1731 	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
1732 	nvme_put_ns_from_disk(head, srcu_idx);
1733 	return ret;
1734 }
1735 
1736 static int nvme_pr_register(struct block_device *bdev, u64 old,
1737 		u64 new, unsigned flags)
1738 {
1739 	u32 cdw10;
1740 
1741 	if (flags & ~PR_FL_IGNORE_KEY)
1742 		return -EOPNOTSUPP;
1743 
1744 	cdw10 = old ? 2 : 0;
1745 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1746 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
1747 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1748 }
1749 
1750 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1751 		enum pr_type type, unsigned flags)
1752 {
1753 	u32 cdw10;
1754 
1755 	if (flags & ~PR_FL_IGNORE_KEY)
1756 		return -EOPNOTSUPP;
1757 
1758 	cdw10 = nvme_pr_type(type) << 8;
1759 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1760 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1761 }
1762 
1763 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
1764 		enum pr_type type, bool abort)
1765 {
1766 	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
1767 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
1768 }
1769 
1770 static int nvme_pr_clear(struct block_device *bdev, u64 key)
1771 {
1772 	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1773 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
1774 }
1775 
1776 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1777 {
1778 	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
1779 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
1780 }
1781 
1782 static const struct pr_ops nvme_pr_ops = {
1783 	.pr_register	= nvme_pr_register,
1784 	.pr_reserve	= nvme_pr_reserve,
1785 	.pr_release	= nvme_pr_release,
1786 	.pr_preempt	= nvme_pr_preempt,
1787 	.pr_clear	= nvme_pr_clear,
1788 };
1789 
1790 #ifdef CONFIG_BLK_SED_OPAL
1791 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
1792 		bool send)
1793 {
1794 	struct nvme_ctrl *ctrl = data;
1795 	struct nvme_command cmd;
1796 
1797 	memset(&cmd, 0, sizeof(cmd));
1798 	if (send)
1799 		cmd.common.opcode = nvme_admin_security_send;
1800 	else
1801 		cmd.common.opcode = nvme_admin_security_recv;
1802 	cmd.common.nsid = 0;
1803 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
1804 	cmd.common.cdw11 = cpu_to_le32(len);
1805 
1806 	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
1807 				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
1808 }
1809 EXPORT_SYMBOL_GPL(nvme_sec_submit);
1810 #endif /* CONFIG_BLK_SED_OPAL */
1811 
1812 static const struct block_device_operations nvme_fops = {
1813 	.owner		= THIS_MODULE,
1814 	.ioctl		= nvme_ioctl,
1815 	.compat_ioctl	= nvme_ioctl,
1816 	.open		= nvme_open,
1817 	.release	= nvme_release,
1818 	.getgeo		= nvme_getgeo,
1819 	.revalidate_disk= nvme_revalidate_disk,
1820 	.pr_ops		= &nvme_pr_ops,
1821 };
1822 
1823 #ifdef CONFIG_NVME_MULTIPATH
1824 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
1825 {
1826 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
1827 
1828 	if (!kref_get_unless_zero(&head->ref))
1829 		return -ENXIO;
1830 	return 0;
1831 }
1832 
1833 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
1834 {
1835 	nvme_put_ns_head(disk->private_data);
1836 }
1837 
1838 const struct block_device_operations nvme_ns_head_ops = {
1839 	.owner		= THIS_MODULE,
1840 	.open		= nvme_ns_head_open,
1841 	.release	= nvme_ns_head_release,
1842 	.ioctl		= nvme_ioctl,
1843 	.compat_ioctl	= nvme_ioctl,
1844 	.getgeo		= nvme_getgeo,
1845 	.pr_ops		= &nvme_pr_ops,
1846 };
1847 #endif /* CONFIG_NVME_MULTIPATH */
1848 
1849 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1850 {
1851 	unsigned long timeout =
1852 		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1853 	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
1854 	int ret;
1855 
1856 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1857 		if (csts == ~0)
1858 			return -ENODEV;
1859 		if ((csts & NVME_CSTS_RDY) == bit)
1860 			break;
1861 
1862 		msleep(100);
1863 		if (fatal_signal_pending(current))
1864 			return -EINTR;
1865 		if (time_after(jiffies, timeout)) {
1866 			dev_err(ctrl->device,
1867 				"Device not ready; aborting %s\n", enabled ?
1868 						"initialisation" : "reset");
1869 			return -ENODEV;
1870 		}
1871 	}
1872 
1873 	return ret;
1874 }
1875 
1876 /*
1877  * If the device has been passed off to us in an enabled state, just clear
1878  * the enabled bit.  The spec says we should set the 'shutdown notification
1879  * bits', but doing so may cause the device to complete commands to the
1880  * admin queue ... and we don't know what memory that might be pointing at!
1881  */
1882 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1883 {
1884 	int ret;
1885 
1886 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1887 	ctrl->ctrl_config &= ~NVME_CC_ENABLE;
1888 
1889 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1890 	if (ret)
1891 		return ret;
1892 
1893 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1894 		msleep(NVME_QUIRK_DELAY_AMOUNT);
1895 
1896 	return nvme_wait_ready(ctrl, cap, false);
1897 }
1898 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
1899 
1900 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1901 {
1902 	/*
1903 	 * Default to a 4K page size, with the intention to update this
1904 	 * path in the future to accomodate architectures with differing
1905 	 * kernel and IO page sizes.
1906 	 */
1907 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
1908 	int ret;
1909 
1910 	if (page_shift < dev_page_min) {
1911 		dev_err(ctrl->device,
1912 			"Minimum device page size %u too large for host (%u)\n",
1913 			1 << dev_page_min, 1 << page_shift);
1914 		return -ENODEV;
1915 	}
1916 
1917 	ctrl->page_size = 1 << page_shift;
1918 
1919 	ctrl->ctrl_config = NVME_CC_CSS_NVM;
1920 	ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1921 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
1922 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1923 	ctrl->ctrl_config |= NVME_CC_ENABLE;
1924 
1925 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1926 	if (ret)
1927 		return ret;
1928 	return nvme_wait_ready(ctrl, cap, true);
1929 }
1930 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
1931 
1932 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
1933 {
1934 	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
1935 	u32 csts;
1936 	int ret;
1937 
1938 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1939 	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
1940 
1941 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1942 	if (ret)
1943 		return ret;
1944 
1945 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1946 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
1947 			break;
1948 
1949 		msleep(100);
1950 		if (fatal_signal_pending(current))
1951 			return -EINTR;
1952 		if (time_after(jiffies, timeout)) {
1953 			dev_err(ctrl->device,
1954 				"Device shutdown incomplete; abort shutdown\n");
1955 			return -ENODEV;
1956 		}
1957 	}
1958 
1959 	return ret;
1960 }
1961 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
1962 
1963 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1964 		struct request_queue *q)
1965 {
1966 	bool vwc = false;
1967 
1968 	if (ctrl->max_hw_sectors) {
1969 		u32 max_segments =
1970 			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1971 
1972 		max_segments = min_not_zero(max_segments, ctrl->max_segments);
1973 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1974 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1975 	}
1976 	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1977 	    is_power_of_2(ctrl->max_hw_sectors))
1978 		blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1979 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
1980 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1981 		vwc = true;
1982 	blk_queue_write_cache(q, vwc, vwc);
1983 }
1984 
1985 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
1986 {
1987 	__le64 ts;
1988 	int ret;
1989 
1990 	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
1991 		return 0;
1992 
1993 	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
1994 	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
1995 			NULL);
1996 	if (ret)
1997 		dev_warn_once(ctrl->device,
1998 			"could not set timestamp (%d)\n", ret);
1999 	return ret;
2000 }
2001 
2002 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2003 {
2004 	struct nvme_feat_host_behavior *host;
2005 	int ret;
2006 
2007 	/* Don't bother enabling the feature if retry delay is not reported */
2008 	if (!ctrl->crdt[0])
2009 		return 0;
2010 
2011 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2012 	if (!host)
2013 		return 0;
2014 
2015 	host->acre = NVME_ENABLE_ACRE;
2016 	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2017 				host, sizeof(*host), NULL);
2018 	kfree(host);
2019 	return ret;
2020 }
2021 
2022 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2023 {
2024 	/*
2025 	 * APST (Autonomous Power State Transition) lets us program a
2026 	 * table of power state transitions that the controller will
2027 	 * perform automatically.  We configure it with a simple
2028 	 * heuristic: we are willing to spend at most 2% of the time
2029 	 * transitioning between power states.  Therefore, when running
2030 	 * in any given state, we will enter the next lower-power
2031 	 * non-operational state after waiting 50 * (enlat + exlat)
2032 	 * microseconds, as long as that state's exit latency is under
2033 	 * the requested maximum latency.
2034 	 *
2035 	 * We will not autonomously enter any non-operational state for
2036 	 * which the total latency exceeds ps_max_latency_us.  Users
2037 	 * can set ps_max_latency_us to zero to turn off APST.
2038 	 */
2039 
2040 	unsigned apste;
2041 	struct nvme_feat_auto_pst *table;
2042 	u64 max_lat_us = 0;
2043 	int max_ps = -1;
2044 	int ret;
2045 
2046 	/*
2047 	 * If APST isn't supported or if we haven't been initialized yet,
2048 	 * then don't do anything.
2049 	 */
2050 	if (!ctrl->apsta)
2051 		return 0;
2052 
2053 	if (ctrl->npss > 31) {
2054 		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2055 		return 0;
2056 	}
2057 
2058 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2059 	if (!table)
2060 		return 0;
2061 
2062 	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2063 		/* Turn off APST. */
2064 		apste = 0;
2065 		dev_dbg(ctrl->device, "APST disabled\n");
2066 	} else {
2067 		__le64 target = cpu_to_le64(0);
2068 		int state;
2069 
2070 		/*
2071 		 * Walk through all states from lowest- to highest-power.
2072 		 * According to the spec, lower-numbered states use more
2073 		 * power.  NPSS, despite the name, is the index of the
2074 		 * lowest-power state, not the number of states.
2075 		 */
2076 		for (state = (int)ctrl->npss; state >= 0; state--) {
2077 			u64 total_latency_us, exit_latency_us, transition_ms;
2078 
2079 			if (target)
2080 				table->entries[state] = target;
2081 
2082 			/*
2083 			 * Don't allow transitions to the deepest state
2084 			 * if it's quirked off.
2085 			 */
2086 			if (state == ctrl->npss &&
2087 			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2088 				continue;
2089 
2090 			/*
2091 			 * Is this state a useful non-operational state for
2092 			 * higher-power states to autonomously transition to?
2093 			 */
2094 			if (!(ctrl->psd[state].flags &
2095 			      NVME_PS_FLAGS_NON_OP_STATE))
2096 				continue;
2097 
2098 			exit_latency_us =
2099 				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2100 			if (exit_latency_us > ctrl->ps_max_latency_us)
2101 				continue;
2102 
2103 			total_latency_us =
2104 				exit_latency_us +
2105 				le32_to_cpu(ctrl->psd[state].entry_lat);
2106 
2107 			/*
2108 			 * This state is good.  Use it as the APST idle
2109 			 * target for higher power states.
2110 			 */
2111 			transition_ms = total_latency_us + 19;
2112 			do_div(transition_ms, 20);
2113 			if (transition_ms > (1 << 24) - 1)
2114 				transition_ms = (1 << 24) - 1;
2115 
2116 			target = cpu_to_le64((state << 3) |
2117 					     (transition_ms << 8));
2118 
2119 			if (max_ps == -1)
2120 				max_ps = state;
2121 
2122 			if (total_latency_us > max_lat_us)
2123 				max_lat_us = total_latency_us;
2124 		}
2125 
2126 		apste = 1;
2127 
2128 		if (max_ps == -1) {
2129 			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2130 		} else {
2131 			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2132 				max_ps, max_lat_us, (int)sizeof(*table), table);
2133 		}
2134 	}
2135 
2136 	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2137 				table, sizeof(*table), NULL);
2138 	if (ret)
2139 		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2140 
2141 	kfree(table);
2142 	return ret;
2143 }
2144 
2145 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2146 {
2147 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2148 	u64 latency;
2149 
2150 	switch (val) {
2151 	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2152 	case PM_QOS_LATENCY_ANY:
2153 		latency = U64_MAX;
2154 		break;
2155 
2156 	default:
2157 		latency = val;
2158 	}
2159 
2160 	if (ctrl->ps_max_latency_us != latency) {
2161 		ctrl->ps_max_latency_us = latency;
2162 		nvme_configure_apst(ctrl);
2163 	}
2164 }
2165 
2166 struct nvme_core_quirk_entry {
2167 	/*
2168 	 * NVMe model and firmware strings are padded with spaces.  For
2169 	 * simplicity, strings in the quirk table are padded with NULLs
2170 	 * instead.
2171 	 */
2172 	u16 vid;
2173 	const char *mn;
2174 	const char *fr;
2175 	unsigned long quirks;
2176 };
2177 
2178 static const struct nvme_core_quirk_entry core_quirks[] = {
2179 	{
2180 		/*
2181 		 * This Toshiba device seems to die using any APST states.  See:
2182 		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2183 		 */
2184 		.vid = 0x1179,
2185 		.mn = "THNSF5256GPUK TOSHIBA",
2186 		.quirks = NVME_QUIRK_NO_APST,
2187 	}
2188 };
2189 
2190 /* match is null-terminated but idstr is space-padded. */
2191 static bool string_matches(const char *idstr, const char *match, size_t len)
2192 {
2193 	size_t matchlen;
2194 
2195 	if (!match)
2196 		return true;
2197 
2198 	matchlen = strlen(match);
2199 	WARN_ON_ONCE(matchlen > len);
2200 
2201 	if (memcmp(idstr, match, matchlen))
2202 		return false;
2203 
2204 	for (; matchlen < len; matchlen++)
2205 		if (idstr[matchlen] != ' ')
2206 			return false;
2207 
2208 	return true;
2209 }
2210 
2211 static bool quirk_matches(const struct nvme_id_ctrl *id,
2212 			  const struct nvme_core_quirk_entry *q)
2213 {
2214 	return q->vid == le16_to_cpu(id->vid) &&
2215 		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2216 		string_matches(id->fr, q->fr, sizeof(id->fr));
2217 }
2218 
2219 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2220 		struct nvme_id_ctrl *id)
2221 {
2222 	size_t nqnlen;
2223 	int off;
2224 
2225 	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2226 		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2227 		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2228 			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2229 			return;
2230 		}
2231 
2232 		if (ctrl->vs >= NVME_VS(1, 2, 1))
2233 			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2234 	}
2235 
2236 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2237 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2238 			"nqn.2014.08.org.nvmexpress:%04x%04x",
2239 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2240 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2241 	off += sizeof(id->sn);
2242 	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2243 	off += sizeof(id->mn);
2244 	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2245 }
2246 
2247 static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
2248 {
2249 	ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
2250 	kfree(subsys);
2251 }
2252 
2253 static void nvme_release_subsystem(struct device *dev)
2254 {
2255 	__nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
2256 }
2257 
2258 static void nvme_destroy_subsystem(struct kref *ref)
2259 {
2260 	struct nvme_subsystem *subsys =
2261 			container_of(ref, struct nvme_subsystem, ref);
2262 
2263 	mutex_lock(&nvme_subsystems_lock);
2264 	list_del(&subsys->entry);
2265 	mutex_unlock(&nvme_subsystems_lock);
2266 
2267 	ida_destroy(&subsys->ns_ida);
2268 	device_del(&subsys->dev);
2269 	put_device(&subsys->dev);
2270 }
2271 
2272 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2273 {
2274 	kref_put(&subsys->ref, nvme_destroy_subsystem);
2275 }
2276 
2277 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2278 {
2279 	struct nvme_subsystem *subsys;
2280 
2281 	lockdep_assert_held(&nvme_subsystems_lock);
2282 
2283 	list_for_each_entry(subsys, &nvme_subsystems, entry) {
2284 		if (strcmp(subsys->subnqn, subsysnqn))
2285 			continue;
2286 		if (!kref_get_unless_zero(&subsys->ref))
2287 			continue;
2288 		return subsys;
2289 	}
2290 
2291 	return NULL;
2292 }
2293 
2294 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
2295 	struct device_attribute subsys_attr_##_name = \
2296 		__ATTR(_name, _mode, _show, NULL)
2297 
2298 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2299 				    struct device_attribute *attr,
2300 				    char *buf)
2301 {
2302 	struct nvme_subsystem *subsys =
2303 		container_of(dev, struct nvme_subsystem, dev);
2304 
2305 	return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2306 }
2307 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2308 
2309 #define nvme_subsys_show_str_function(field)				\
2310 static ssize_t subsys_##field##_show(struct device *dev,		\
2311 			    struct device_attribute *attr, char *buf)	\
2312 {									\
2313 	struct nvme_subsystem *subsys =					\
2314 		container_of(dev, struct nvme_subsystem, dev);		\
2315 	return sprintf(buf, "%.*s\n",					\
2316 		       (int)sizeof(subsys->field), subsys->field);	\
2317 }									\
2318 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2319 
2320 nvme_subsys_show_str_function(model);
2321 nvme_subsys_show_str_function(serial);
2322 nvme_subsys_show_str_function(firmware_rev);
2323 
2324 static struct attribute *nvme_subsys_attrs[] = {
2325 	&subsys_attr_model.attr,
2326 	&subsys_attr_serial.attr,
2327 	&subsys_attr_firmware_rev.attr,
2328 	&subsys_attr_subsysnqn.attr,
2329 #ifdef CONFIG_NVME_MULTIPATH
2330 	&subsys_attr_iopolicy.attr,
2331 #endif
2332 	NULL,
2333 };
2334 
2335 static struct attribute_group nvme_subsys_attrs_group = {
2336 	.attrs = nvme_subsys_attrs,
2337 };
2338 
2339 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2340 	&nvme_subsys_attrs_group,
2341 	NULL,
2342 };
2343 
2344 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2345 		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2346 {
2347 	struct nvme_ctrl *tmp;
2348 
2349 	lockdep_assert_held(&nvme_subsystems_lock);
2350 
2351 	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2352 		if (ctrl->state == NVME_CTRL_DELETING ||
2353 		    ctrl->state == NVME_CTRL_DEAD)
2354 			continue;
2355 
2356 		if (tmp->cntlid == ctrl->cntlid) {
2357 			dev_err(ctrl->device,
2358 				"Duplicate cntlid %u with %s, rejecting\n",
2359 				ctrl->cntlid, dev_name(tmp->device));
2360 			return false;
2361 		}
2362 
2363 		if ((id->cmic & (1 << 1)) ||
2364 		    (ctrl->opts && ctrl->opts->discovery_nqn))
2365 			continue;
2366 
2367 		dev_err(ctrl->device,
2368 			"Subsystem does not support multiple controllers\n");
2369 		return false;
2370 	}
2371 
2372 	return true;
2373 }
2374 
2375 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2376 {
2377 	struct nvme_subsystem *subsys, *found;
2378 	int ret;
2379 
2380 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2381 	if (!subsys)
2382 		return -ENOMEM;
2383 	ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL);
2384 	if (ret < 0) {
2385 		kfree(subsys);
2386 		return ret;
2387 	}
2388 	subsys->instance = ret;
2389 	mutex_init(&subsys->lock);
2390 	kref_init(&subsys->ref);
2391 	INIT_LIST_HEAD(&subsys->ctrls);
2392 	INIT_LIST_HEAD(&subsys->nsheads);
2393 	nvme_init_subnqn(subsys, ctrl, id);
2394 	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2395 	memcpy(subsys->model, id->mn, sizeof(subsys->model));
2396 	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2397 	subsys->vendor_id = le16_to_cpu(id->vid);
2398 	subsys->cmic = id->cmic;
2399 #ifdef CONFIG_NVME_MULTIPATH
2400 	subsys->iopolicy = NVME_IOPOLICY_NUMA;
2401 #endif
2402 
2403 	subsys->dev.class = nvme_subsys_class;
2404 	subsys->dev.release = nvme_release_subsystem;
2405 	subsys->dev.groups = nvme_subsys_attrs_groups;
2406 	dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance);
2407 	device_initialize(&subsys->dev);
2408 
2409 	mutex_lock(&nvme_subsystems_lock);
2410 	found = __nvme_find_get_subsystem(subsys->subnqn);
2411 	if (found) {
2412 		__nvme_release_subsystem(subsys);
2413 		subsys = found;
2414 
2415 		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2416 			ret = -EINVAL;
2417 			goto out_put_subsystem;
2418 		}
2419 	} else {
2420 		ret = device_add(&subsys->dev);
2421 		if (ret) {
2422 			dev_err(ctrl->device,
2423 				"failed to register subsystem device.\n");
2424 			goto out_unlock;
2425 		}
2426 		ida_init(&subsys->ns_ida);
2427 		list_add_tail(&subsys->entry, &nvme_subsystems);
2428 	}
2429 
2430 	if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2431 			dev_name(ctrl->device))) {
2432 		dev_err(ctrl->device,
2433 			"failed to create sysfs link from subsystem.\n");
2434 		goto out_put_subsystem;
2435 	}
2436 
2437 	ctrl->subsys = subsys;
2438 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2439 	mutex_unlock(&nvme_subsystems_lock);
2440 	return 0;
2441 
2442 out_put_subsystem:
2443 	nvme_put_subsystem(subsys);
2444 out_unlock:
2445 	mutex_unlock(&nvme_subsystems_lock);
2446 	put_device(&subsys->dev);
2447 	return ret;
2448 }
2449 
2450 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2451 		void *log, size_t size, u64 offset)
2452 {
2453 	struct nvme_command c = { };
2454 	unsigned long dwlen = size / 4 - 1;
2455 
2456 	c.get_log_page.opcode = nvme_admin_get_log_page;
2457 	c.get_log_page.nsid = cpu_to_le32(nsid);
2458 	c.get_log_page.lid = log_page;
2459 	c.get_log_page.lsp = lsp;
2460 	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2461 	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2462 	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2463 	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2464 
2465 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2466 }
2467 
2468 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2469 {
2470 	int ret;
2471 
2472 	if (!ctrl->effects)
2473 		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2474 
2475 	if (!ctrl->effects)
2476 		return 0;
2477 
2478 	ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2479 			ctrl->effects, sizeof(*ctrl->effects), 0);
2480 	if (ret) {
2481 		kfree(ctrl->effects);
2482 		ctrl->effects = NULL;
2483 	}
2484 	return ret;
2485 }
2486 
2487 /*
2488  * Initialize the cached copies of the Identify data and various controller
2489  * register in our nvme_ctrl structure.  This should be called as soon as
2490  * the admin queue is fully up and running.
2491  */
2492 int nvme_init_identify(struct nvme_ctrl *ctrl)
2493 {
2494 	struct nvme_id_ctrl *id;
2495 	u64 cap;
2496 	int ret, page_shift;
2497 	u32 max_hw_sectors;
2498 	bool prev_apst_enabled;
2499 
2500 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2501 	if (ret) {
2502 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2503 		return ret;
2504 	}
2505 
2506 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
2507 	if (ret) {
2508 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2509 		return ret;
2510 	}
2511 	page_shift = NVME_CAP_MPSMIN(cap) + 12;
2512 
2513 	if (ctrl->vs >= NVME_VS(1, 1, 0))
2514 		ctrl->subsystem = NVME_CAP_NSSRC(cap);
2515 
2516 	ret = nvme_identify_ctrl(ctrl, &id);
2517 	if (ret) {
2518 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2519 		return -EIO;
2520 	}
2521 
2522 	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2523 		ret = nvme_get_effects_log(ctrl);
2524 		if (ret < 0)
2525 			goto out_free;
2526 	}
2527 
2528 	if (!ctrl->identified) {
2529 		int i;
2530 
2531 		ret = nvme_init_subsystem(ctrl, id);
2532 		if (ret)
2533 			goto out_free;
2534 
2535 		/*
2536 		 * Check for quirks.  Quirk can depend on firmware version,
2537 		 * so, in principle, the set of quirks present can change
2538 		 * across a reset.  As a possible future enhancement, we
2539 		 * could re-scan for quirks every time we reinitialize
2540 		 * the device, but we'd have to make sure that the driver
2541 		 * behaves intelligently if the quirks change.
2542 		 */
2543 		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2544 			if (quirk_matches(id, &core_quirks[i]))
2545 				ctrl->quirks |= core_quirks[i].quirks;
2546 		}
2547 	}
2548 
2549 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2550 		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2551 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2552 	}
2553 
2554 	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2555 	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2556 	ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2557 
2558 	ctrl->oacs = le16_to_cpu(id->oacs);
2559 	ctrl->oncs = le16_to_cpu(id->oncs);
2560 	ctrl->oaes = le32_to_cpu(id->oaes);
2561 	atomic_set(&ctrl->abort_limit, id->acl + 1);
2562 	ctrl->vwc = id->vwc;
2563 	if (id->mdts)
2564 		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2565 	else
2566 		max_hw_sectors = UINT_MAX;
2567 	ctrl->max_hw_sectors =
2568 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2569 
2570 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
2571 	ctrl->sgls = le32_to_cpu(id->sgls);
2572 	ctrl->kas = le16_to_cpu(id->kas);
2573 	ctrl->max_namespaces = le32_to_cpu(id->mnan);
2574 	ctrl->ctratt = le32_to_cpu(id->ctratt);
2575 
2576 	if (id->rtd3e) {
2577 		/* us -> s */
2578 		u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2579 
2580 		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2581 						 shutdown_timeout, 60);
2582 
2583 		if (ctrl->shutdown_timeout != shutdown_timeout)
2584 			dev_info(ctrl->device,
2585 				 "Shutdown timeout set to %u seconds\n",
2586 				 ctrl->shutdown_timeout);
2587 	} else
2588 		ctrl->shutdown_timeout = shutdown_timeout;
2589 
2590 	ctrl->npss = id->npss;
2591 	ctrl->apsta = id->apsta;
2592 	prev_apst_enabled = ctrl->apst_enabled;
2593 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2594 		if (force_apst && id->apsta) {
2595 			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2596 			ctrl->apst_enabled = true;
2597 		} else {
2598 			ctrl->apst_enabled = false;
2599 		}
2600 	} else {
2601 		ctrl->apst_enabled = id->apsta;
2602 	}
2603 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2604 
2605 	if (ctrl->ops->flags & NVME_F_FABRICS) {
2606 		ctrl->icdoff = le16_to_cpu(id->icdoff);
2607 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2608 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2609 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2610 
2611 		/*
2612 		 * In fabrics we need to verify the cntlid matches the
2613 		 * admin connect
2614 		 */
2615 		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2616 			ret = -EINVAL;
2617 			goto out_free;
2618 		}
2619 
2620 		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2621 			dev_err(ctrl->device,
2622 				"keep-alive support is mandatory for fabrics\n");
2623 			ret = -EINVAL;
2624 			goto out_free;
2625 		}
2626 	} else {
2627 		ctrl->cntlid = le16_to_cpu(id->cntlid);
2628 		ctrl->hmpre = le32_to_cpu(id->hmpre);
2629 		ctrl->hmmin = le32_to_cpu(id->hmmin);
2630 		ctrl->hmminds = le32_to_cpu(id->hmminds);
2631 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2632 	}
2633 
2634 	ret = nvme_mpath_init(ctrl, id);
2635 	kfree(id);
2636 
2637 	if (ret < 0)
2638 		return ret;
2639 
2640 	if (ctrl->apst_enabled && !prev_apst_enabled)
2641 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
2642 	else if (!ctrl->apst_enabled && prev_apst_enabled)
2643 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
2644 
2645 	ret = nvme_configure_apst(ctrl);
2646 	if (ret < 0)
2647 		return ret;
2648 
2649 	ret = nvme_configure_timestamp(ctrl);
2650 	if (ret < 0)
2651 		return ret;
2652 
2653 	ret = nvme_configure_directives(ctrl);
2654 	if (ret < 0)
2655 		return ret;
2656 
2657 	ret = nvme_configure_acre(ctrl);
2658 	if (ret < 0)
2659 		return ret;
2660 
2661 	ctrl->identified = true;
2662 
2663 	return 0;
2664 
2665 out_free:
2666 	kfree(id);
2667 	return ret;
2668 }
2669 EXPORT_SYMBOL_GPL(nvme_init_identify);
2670 
2671 static int nvme_dev_open(struct inode *inode, struct file *file)
2672 {
2673 	struct nvme_ctrl *ctrl =
2674 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
2675 
2676 	switch (ctrl->state) {
2677 	case NVME_CTRL_LIVE:
2678 	case NVME_CTRL_ADMIN_ONLY:
2679 		break;
2680 	default:
2681 		return -EWOULDBLOCK;
2682 	}
2683 
2684 	file->private_data = ctrl;
2685 	return 0;
2686 }
2687 
2688 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
2689 {
2690 	struct nvme_ns *ns;
2691 	int ret;
2692 
2693 	down_read(&ctrl->namespaces_rwsem);
2694 	if (list_empty(&ctrl->namespaces)) {
2695 		ret = -ENOTTY;
2696 		goto out_unlock;
2697 	}
2698 
2699 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
2700 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
2701 		dev_warn(ctrl->device,
2702 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2703 		ret = -EINVAL;
2704 		goto out_unlock;
2705 	}
2706 
2707 	dev_warn(ctrl->device,
2708 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2709 	kref_get(&ns->kref);
2710 	up_read(&ctrl->namespaces_rwsem);
2711 
2712 	ret = nvme_user_cmd(ctrl, ns, argp);
2713 	nvme_put_ns(ns);
2714 	return ret;
2715 
2716 out_unlock:
2717 	up_read(&ctrl->namespaces_rwsem);
2718 	return ret;
2719 }
2720 
2721 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
2722 		unsigned long arg)
2723 {
2724 	struct nvme_ctrl *ctrl = file->private_data;
2725 	void __user *argp = (void __user *)arg;
2726 
2727 	switch (cmd) {
2728 	case NVME_IOCTL_ADMIN_CMD:
2729 		return nvme_user_cmd(ctrl, NULL, argp);
2730 	case NVME_IOCTL_IO_CMD:
2731 		return nvme_dev_user_cmd(ctrl, argp);
2732 	case NVME_IOCTL_RESET:
2733 		dev_warn(ctrl->device, "resetting controller\n");
2734 		return nvme_reset_ctrl_sync(ctrl);
2735 	case NVME_IOCTL_SUBSYS_RESET:
2736 		return nvme_reset_subsystem(ctrl);
2737 	case NVME_IOCTL_RESCAN:
2738 		nvme_queue_scan(ctrl);
2739 		return 0;
2740 	default:
2741 		return -ENOTTY;
2742 	}
2743 }
2744 
2745 static const struct file_operations nvme_dev_fops = {
2746 	.owner		= THIS_MODULE,
2747 	.open		= nvme_dev_open,
2748 	.unlocked_ioctl	= nvme_dev_ioctl,
2749 	.compat_ioctl	= nvme_dev_ioctl,
2750 };
2751 
2752 static ssize_t nvme_sysfs_reset(struct device *dev,
2753 				struct device_attribute *attr, const char *buf,
2754 				size_t count)
2755 {
2756 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2757 	int ret;
2758 
2759 	ret = nvme_reset_ctrl_sync(ctrl);
2760 	if (ret < 0)
2761 		return ret;
2762 	return count;
2763 }
2764 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
2765 
2766 static ssize_t nvme_sysfs_rescan(struct device *dev,
2767 				struct device_attribute *attr, const char *buf,
2768 				size_t count)
2769 {
2770 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2771 
2772 	nvme_queue_scan(ctrl);
2773 	return count;
2774 }
2775 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
2776 
2777 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
2778 {
2779 	struct gendisk *disk = dev_to_disk(dev);
2780 
2781 	if (disk->fops == &nvme_fops)
2782 		return nvme_get_ns_from_dev(dev)->head;
2783 	else
2784 		return disk->private_data;
2785 }
2786 
2787 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
2788 		char *buf)
2789 {
2790 	struct nvme_ns_head *head = dev_to_ns_head(dev);
2791 	struct nvme_ns_ids *ids = &head->ids;
2792 	struct nvme_subsystem *subsys = head->subsys;
2793 	int serial_len = sizeof(subsys->serial);
2794 	int model_len = sizeof(subsys->model);
2795 
2796 	if (!uuid_is_null(&ids->uuid))
2797 		return sprintf(buf, "uuid.%pU\n", &ids->uuid);
2798 
2799 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2800 		return sprintf(buf, "eui.%16phN\n", ids->nguid);
2801 
2802 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2803 		return sprintf(buf, "eui.%8phN\n", ids->eui64);
2804 
2805 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
2806 				  subsys->serial[serial_len - 1] == '\0'))
2807 		serial_len--;
2808 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
2809 				 subsys->model[model_len - 1] == '\0'))
2810 		model_len--;
2811 
2812 	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
2813 		serial_len, subsys->serial, model_len, subsys->model,
2814 		head->ns_id);
2815 }
2816 static DEVICE_ATTR_RO(wwid);
2817 
2818 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
2819 		char *buf)
2820 {
2821 	return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
2822 }
2823 static DEVICE_ATTR_RO(nguid);
2824 
2825 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
2826 		char *buf)
2827 {
2828 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
2829 
2830 	/* For backward compatibility expose the NGUID to userspace if
2831 	 * we have no UUID set
2832 	 */
2833 	if (uuid_is_null(&ids->uuid)) {
2834 		printk_ratelimited(KERN_WARNING
2835 				   "No UUID available providing old NGUID\n");
2836 		return sprintf(buf, "%pU\n", ids->nguid);
2837 	}
2838 	return sprintf(buf, "%pU\n", &ids->uuid);
2839 }
2840 static DEVICE_ATTR_RO(uuid);
2841 
2842 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
2843 		char *buf)
2844 {
2845 	return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
2846 }
2847 static DEVICE_ATTR_RO(eui);
2848 
2849 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
2850 		char *buf)
2851 {
2852 	return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
2853 }
2854 static DEVICE_ATTR_RO(nsid);
2855 
2856 static struct attribute *nvme_ns_id_attrs[] = {
2857 	&dev_attr_wwid.attr,
2858 	&dev_attr_uuid.attr,
2859 	&dev_attr_nguid.attr,
2860 	&dev_attr_eui.attr,
2861 	&dev_attr_nsid.attr,
2862 #ifdef CONFIG_NVME_MULTIPATH
2863 	&dev_attr_ana_grpid.attr,
2864 	&dev_attr_ana_state.attr,
2865 #endif
2866 	NULL,
2867 };
2868 
2869 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
2870 		struct attribute *a, int n)
2871 {
2872 	struct device *dev = container_of(kobj, struct device, kobj);
2873 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
2874 
2875 	if (a == &dev_attr_uuid.attr) {
2876 		if (uuid_is_null(&ids->uuid) &&
2877 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2878 			return 0;
2879 	}
2880 	if (a == &dev_attr_nguid.attr) {
2881 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2882 			return 0;
2883 	}
2884 	if (a == &dev_attr_eui.attr) {
2885 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2886 			return 0;
2887 	}
2888 #ifdef CONFIG_NVME_MULTIPATH
2889 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
2890 		if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
2891 			return 0;
2892 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
2893 			return 0;
2894 	}
2895 #endif
2896 	return a->mode;
2897 }
2898 
2899 static const struct attribute_group nvme_ns_id_attr_group = {
2900 	.attrs		= nvme_ns_id_attrs,
2901 	.is_visible	= nvme_ns_id_attrs_are_visible,
2902 };
2903 
2904 const struct attribute_group *nvme_ns_id_attr_groups[] = {
2905 	&nvme_ns_id_attr_group,
2906 #ifdef CONFIG_NVM
2907 	&nvme_nvm_attr_group,
2908 #endif
2909 	NULL,
2910 };
2911 
2912 #define nvme_show_str_function(field)						\
2913 static ssize_t  field##_show(struct device *dev,				\
2914 			    struct device_attribute *attr, char *buf)		\
2915 {										\
2916         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
2917         return sprintf(buf, "%.*s\n",						\
2918 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
2919 }										\
2920 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2921 
2922 nvme_show_str_function(model);
2923 nvme_show_str_function(serial);
2924 nvme_show_str_function(firmware_rev);
2925 
2926 #define nvme_show_int_function(field)						\
2927 static ssize_t  field##_show(struct device *dev,				\
2928 			    struct device_attribute *attr, char *buf)		\
2929 {										\
2930         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
2931         return sprintf(buf, "%d\n", ctrl->field);	\
2932 }										\
2933 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2934 
2935 nvme_show_int_function(cntlid);
2936 nvme_show_int_function(numa_node);
2937 
2938 static ssize_t nvme_sysfs_delete(struct device *dev,
2939 				struct device_attribute *attr, const char *buf,
2940 				size_t count)
2941 {
2942 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2943 
2944 	if (device_remove_file_self(dev, attr))
2945 		nvme_delete_ctrl_sync(ctrl);
2946 	return count;
2947 }
2948 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
2949 
2950 static ssize_t nvme_sysfs_show_transport(struct device *dev,
2951 					 struct device_attribute *attr,
2952 					 char *buf)
2953 {
2954 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2955 
2956 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
2957 }
2958 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
2959 
2960 static ssize_t nvme_sysfs_show_state(struct device *dev,
2961 				     struct device_attribute *attr,
2962 				     char *buf)
2963 {
2964 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2965 	static const char *const state_name[] = {
2966 		[NVME_CTRL_NEW]		= "new",
2967 		[NVME_CTRL_LIVE]	= "live",
2968 		[NVME_CTRL_ADMIN_ONLY]	= "only-admin",
2969 		[NVME_CTRL_RESETTING]	= "resetting",
2970 		[NVME_CTRL_CONNECTING]	= "connecting",
2971 		[NVME_CTRL_DELETING]	= "deleting",
2972 		[NVME_CTRL_DEAD]	= "dead",
2973 	};
2974 
2975 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
2976 	    state_name[ctrl->state])
2977 		return sprintf(buf, "%s\n", state_name[ctrl->state]);
2978 
2979 	return sprintf(buf, "unknown state\n");
2980 }
2981 
2982 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
2983 
2984 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
2985 					 struct device_attribute *attr,
2986 					 char *buf)
2987 {
2988 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2989 
2990 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
2991 }
2992 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
2993 
2994 static ssize_t nvme_sysfs_show_address(struct device *dev,
2995 					 struct device_attribute *attr,
2996 					 char *buf)
2997 {
2998 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2999 
3000 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3001 }
3002 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3003 
3004 static struct attribute *nvme_dev_attrs[] = {
3005 	&dev_attr_reset_controller.attr,
3006 	&dev_attr_rescan_controller.attr,
3007 	&dev_attr_model.attr,
3008 	&dev_attr_serial.attr,
3009 	&dev_attr_firmware_rev.attr,
3010 	&dev_attr_cntlid.attr,
3011 	&dev_attr_delete_controller.attr,
3012 	&dev_attr_transport.attr,
3013 	&dev_attr_subsysnqn.attr,
3014 	&dev_attr_address.attr,
3015 	&dev_attr_state.attr,
3016 	&dev_attr_numa_node.attr,
3017 	NULL
3018 };
3019 
3020 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3021 		struct attribute *a, int n)
3022 {
3023 	struct device *dev = container_of(kobj, struct device, kobj);
3024 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3025 
3026 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3027 		return 0;
3028 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3029 		return 0;
3030 
3031 	return a->mode;
3032 }
3033 
3034 static struct attribute_group nvme_dev_attrs_group = {
3035 	.attrs		= nvme_dev_attrs,
3036 	.is_visible	= nvme_dev_attrs_are_visible,
3037 };
3038 
3039 static const struct attribute_group *nvme_dev_attr_groups[] = {
3040 	&nvme_dev_attrs_group,
3041 	NULL,
3042 };
3043 
3044 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
3045 		unsigned nsid)
3046 {
3047 	struct nvme_ns_head *h;
3048 
3049 	lockdep_assert_held(&subsys->lock);
3050 
3051 	list_for_each_entry(h, &subsys->nsheads, entry) {
3052 		if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
3053 			return h;
3054 	}
3055 
3056 	return NULL;
3057 }
3058 
3059 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3060 		struct nvme_ns_head *new)
3061 {
3062 	struct nvme_ns_head *h;
3063 
3064 	lockdep_assert_held(&subsys->lock);
3065 
3066 	list_for_each_entry(h, &subsys->nsheads, entry) {
3067 		if (nvme_ns_ids_valid(&new->ids) &&
3068 		    !list_empty(&h->list) &&
3069 		    nvme_ns_ids_equal(&new->ids, &h->ids))
3070 			return -EINVAL;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
3076 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3077 		unsigned nsid, struct nvme_id_ns *id)
3078 {
3079 	struct nvme_ns_head *head;
3080 	size_t size = sizeof(*head);
3081 	int ret = -ENOMEM;
3082 
3083 #ifdef CONFIG_NVME_MULTIPATH
3084 	size += num_possible_nodes() * sizeof(struct nvme_ns *);
3085 #endif
3086 
3087 	head = kzalloc(size, GFP_KERNEL);
3088 	if (!head)
3089 		goto out;
3090 	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3091 	if (ret < 0)
3092 		goto out_free_head;
3093 	head->instance = ret;
3094 	INIT_LIST_HEAD(&head->list);
3095 	ret = init_srcu_struct(&head->srcu);
3096 	if (ret)
3097 		goto out_ida_remove;
3098 	head->subsys = ctrl->subsys;
3099 	head->ns_id = nsid;
3100 	kref_init(&head->ref);
3101 
3102 	nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
3103 
3104 	ret = __nvme_check_ids(ctrl->subsys, head);
3105 	if (ret) {
3106 		dev_err(ctrl->device,
3107 			"duplicate IDs for nsid %d\n", nsid);
3108 		goto out_cleanup_srcu;
3109 	}
3110 
3111 	ret = nvme_mpath_alloc_disk(ctrl, head);
3112 	if (ret)
3113 		goto out_cleanup_srcu;
3114 
3115 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3116 
3117 	kref_get(&ctrl->subsys->ref);
3118 
3119 	return head;
3120 out_cleanup_srcu:
3121 	cleanup_srcu_struct(&head->srcu);
3122 out_ida_remove:
3123 	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3124 out_free_head:
3125 	kfree(head);
3126 out:
3127 	return ERR_PTR(ret);
3128 }
3129 
3130 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3131 		struct nvme_id_ns *id)
3132 {
3133 	struct nvme_ctrl *ctrl = ns->ctrl;
3134 	bool is_shared = id->nmic & (1 << 0);
3135 	struct nvme_ns_head *head = NULL;
3136 	int ret = 0;
3137 
3138 	mutex_lock(&ctrl->subsys->lock);
3139 	if (is_shared)
3140 		head = __nvme_find_ns_head(ctrl->subsys, nsid);
3141 	if (!head) {
3142 		head = nvme_alloc_ns_head(ctrl, nsid, id);
3143 		if (IS_ERR(head)) {
3144 			ret = PTR_ERR(head);
3145 			goto out_unlock;
3146 		}
3147 	} else {
3148 		struct nvme_ns_ids ids;
3149 
3150 		nvme_report_ns_ids(ctrl, nsid, id, &ids);
3151 		if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3152 			dev_err(ctrl->device,
3153 				"IDs don't match for shared namespace %d\n",
3154 					nsid);
3155 			ret = -EINVAL;
3156 			goto out_unlock;
3157 		}
3158 	}
3159 
3160 	list_add_tail(&ns->siblings, &head->list);
3161 	ns->head = head;
3162 
3163 out_unlock:
3164 	mutex_unlock(&ctrl->subsys->lock);
3165 	return ret;
3166 }
3167 
3168 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3169 {
3170 	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3171 	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3172 
3173 	return nsa->head->ns_id - nsb->head->ns_id;
3174 }
3175 
3176 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3177 {
3178 	struct nvme_ns *ns, *ret = NULL;
3179 
3180 	down_read(&ctrl->namespaces_rwsem);
3181 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3182 		if (ns->head->ns_id == nsid) {
3183 			if (!kref_get_unless_zero(&ns->kref))
3184 				continue;
3185 			ret = ns;
3186 			break;
3187 		}
3188 		if (ns->head->ns_id > nsid)
3189 			break;
3190 	}
3191 	up_read(&ctrl->namespaces_rwsem);
3192 	return ret;
3193 }
3194 
3195 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
3196 {
3197 	struct streams_directive_params s;
3198 	int ret;
3199 
3200 	if (!ctrl->nr_streams)
3201 		return 0;
3202 
3203 	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
3204 	if (ret)
3205 		return ret;
3206 
3207 	ns->sws = le32_to_cpu(s.sws);
3208 	ns->sgs = le16_to_cpu(s.sgs);
3209 
3210 	if (ns->sws) {
3211 		unsigned int bs = 1 << ns->lba_shift;
3212 
3213 		blk_queue_io_min(ns->queue, bs * ns->sws);
3214 		if (ns->sgs)
3215 			blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
3216 	}
3217 
3218 	return 0;
3219 }
3220 
3221 static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3222 {
3223 	struct nvme_ns *ns;
3224 	struct gendisk *disk;
3225 	struct nvme_id_ns *id;
3226 	char disk_name[DISK_NAME_LEN];
3227 	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3228 
3229 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3230 	if (!ns)
3231 		return -ENOMEM;
3232 
3233 	ns->queue = blk_mq_init_queue(ctrl->tagset);
3234 	if (IS_ERR(ns->queue)) {
3235 		ret = PTR_ERR(ns->queue);
3236 		goto out_free_ns;
3237 	}
3238 
3239 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3240 	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3241 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3242 
3243 	ns->queue->queuedata = ns;
3244 	ns->ctrl = ctrl;
3245 
3246 	kref_init(&ns->kref);
3247 	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3248 
3249 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3250 	nvme_set_queue_limits(ctrl, ns->queue);
3251 
3252 	id = nvme_identify_ns(ctrl, nsid);
3253 	if (!id) {
3254 		ret = -EIO;
3255 		goto out_free_queue;
3256 	}
3257 
3258 	if (id->ncap == 0) {
3259 		ret = -EINVAL;
3260 		goto out_free_id;
3261 	}
3262 
3263 	ret = nvme_init_ns_head(ns, nsid, id);
3264 	if (ret)
3265 		goto out_free_id;
3266 	nvme_setup_streams_ns(ctrl, ns);
3267 	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3268 
3269 	disk = alloc_disk_node(0, node);
3270 	if (!disk) {
3271 		ret = -ENOMEM;
3272 		goto out_unlink_ns;
3273 	}
3274 
3275 	disk->fops = &nvme_fops;
3276 	disk->private_data = ns;
3277 	disk->queue = ns->queue;
3278 	disk->flags = flags;
3279 	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3280 	ns->disk = disk;
3281 
3282 	__nvme_revalidate_disk(disk, id);
3283 
3284 	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3285 		ret = nvme_nvm_register(ns, disk_name, node);
3286 		if (ret) {
3287 			dev_warn(ctrl->device, "LightNVM init failure\n");
3288 			goto out_put_disk;
3289 		}
3290 	}
3291 
3292 	down_write(&ctrl->namespaces_rwsem);
3293 	list_add_tail(&ns->list, &ctrl->namespaces);
3294 	up_write(&ctrl->namespaces_rwsem);
3295 
3296 	nvme_get_ctrl(ctrl);
3297 
3298 	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3299 
3300 	nvme_mpath_add_disk(ns, id);
3301 	nvme_fault_inject_init(ns);
3302 	kfree(id);
3303 
3304 	return 0;
3305  out_put_disk:
3306 	put_disk(ns->disk);
3307  out_unlink_ns:
3308 	mutex_lock(&ctrl->subsys->lock);
3309 	list_del_rcu(&ns->siblings);
3310 	mutex_unlock(&ctrl->subsys->lock);
3311 	nvme_put_ns_head(ns->head);
3312  out_free_id:
3313 	kfree(id);
3314  out_free_queue:
3315 	blk_cleanup_queue(ns->queue);
3316  out_free_ns:
3317 	kfree(ns);
3318 	return ret;
3319 }
3320 
3321 static void nvme_ns_remove(struct nvme_ns *ns)
3322 {
3323 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3324 		return;
3325 
3326 	nvme_fault_inject_fini(ns);
3327 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3328 		del_gendisk(ns->disk);
3329 		blk_cleanup_queue(ns->queue);
3330 		if (blk_get_integrity(ns->disk))
3331 			blk_integrity_unregister(ns->disk);
3332 	}
3333 
3334 	mutex_lock(&ns->ctrl->subsys->lock);
3335 	list_del_rcu(&ns->siblings);
3336 	nvme_mpath_clear_current_path(ns);
3337 	mutex_unlock(&ns->ctrl->subsys->lock);
3338 
3339 	down_write(&ns->ctrl->namespaces_rwsem);
3340 	list_del_init(&ns->list);
3341 	up_write(&ns->ctrl->namespaces_rwsem);
3342 
3343 	synchronize_srcu(&ns->head->srcu);
3344 	nvme_mpath_check_last_path(ns);
3345 	nvme_put_ns(ns);
3346 }
3347 
3348 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3349 {
3350 	struct nvme_ns *ns;
3351 
3352 	ns = nvme_find_get_ns(ctrl, nsid);
3353 	if (ns) {
3354 		if (ns->disk && revalidate_disk(ns->disk))
3355 			nvme_ns_remove(ns);
3356 		nvme_put_ns(ns);
3357 	} else
3358 		nvme_alloc_ns(ctrl, nsid);
3359 }
3360 
3361 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3362 					unsigned nsid)
3363 {
3364 	struct nvme_ns *ns, *next;
3365 	LIST_HEAD(rm_list);
3366 
3367 	down_write(&ctrl->namespaces_rwsem);
3368 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3369 		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3370 			list_move_tail(&ns->list, &rm_list);
3371 	}
3372 	up_write(&ctrl->namespaces_rwsem);
3373 
3374 	list_for_each_entry_safe(ns, next, &rm_list, list)
3375 		nvme_ns_remove(ns);
3376 
3377 }
3378 
3379 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
3380 {
3381 	struct nvme_ns *ns;
3382 	__le32 *ns_list;
3383 	unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
3384 	int ret = 0;
3385 
3386 	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3387 	if (!ns_list)
3388 		return -ENOMEM;
3389 
3390 	for (i = 0; i < num_lists; i++) {
3391 		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3392 		if (ret)
3393 			goto free;
3394 
3395 		for (j = 0; j < min(nn, 1024U); j++) {
3396 			nsid = le32_to_cpu(ns_list[j]);
3397 			if (!nsid)
3398 				goto out;
3399 
3400 			nvme_validate_ns(ctrl, nsid);
3401 
3402 			while (++prev < nsid) {
3403 				ns = nvme_find_get_ns(ctrl, prev);
3404 				if (ns) {
3405 					nvme_ns_remove(ns);
3406 					nvme_put_ns(ns);
3407 				}
3408 			}
3409 		}
3410 		nn -= j;
3411 	}
3412  out:
3413 	nvme_remove_invalid_namespaces(ctrl, prev);
3414  free:
3415 	kfree(ns_list);
3416 	return ret;
3417 }
3418 
3419 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
3420 {
3421 	unsigned i;
3422 
3423 	for (i = 1; i <= nn; i++)
3424 		nvme_validate_ns(ctrl, i);
3425 
3426 	nvme_remove_invalid_namespaces(ctrl, nn);
3427 }
3428 
3429 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3430 {
3431 	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3432 	__le32 *log;
3433 	int error;
3434 
3435 	log = kzalloc(log_size, GFP_KERNEL);
3436 	if (!log)
3437 		return;
3438 
3439 	/*
3440 	 * We need to read the log to clear the AEN, but we don't want to rely
3441 	 * on it for the changed namespace information as userspace could have
3442 	 * raced with us in reading the log page, which could cause us to miss
3443 	 * updates.
3444 	 */
3445 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3446 			log_size, 0);
3447 	if (error)
3448 		dev_warn(ctrl->device,
3449 			"reading changed ns log failed: %d\n", error);
3450 
3451 	kfree(log);
3452 }
3453 
3454 static void nvme_scan_work(struct work_struct *work)
3455 {
3456 	struct nvme_ctrl *ctrl =
3457 		container_of(work, struct nvme_ctrl, scan_work);
3458 	struct nvme_id_ctrl *id;
3459 	unsigned nn;
3460 
3461 	if (ctrl->state != NVME_CTRL_LIVE)
3462 		return;
3463 
3464 	WARN_ON_ONCE(!ctrl->tagset);
3465 
3466 	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3467 		dev_info(ctrl->device, "rescanning namespaces.\n");
3468 		nvme_clear_changed_ns_log(ctrl);
3469 	}
3470 
3471 	if (nvme_identify_ctrl(ctrl, &id))
3472 		return;
3473 
3474 	mutex_lock(&ctrl->scan_lock);
3475 	nn = le32_to_cpu(id->nn);
3476 	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3477 	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
3478 		if (!nvme_scan_ns_list(ctrl, nn))
3479 			goto out_free_id;
3480 	}
3481 	nvme_scan_ns_sequential(ctrl, nn);
3482 out_free_id:
3483 	mutex_unlock(&ctrl->scan_lock);
3484 	kfree(id);
3485 	down_write(&ctrl->namespaces_rwsem);
3486 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
3487 	up_write(&ctrl->namespaces_rwsem);
3488 }
3489 
3490 /*
3491  * This function iterates the namespace list unlocked to allow recovery from
3492  * controller failure. It is up to the caller to ensure the namespace list is
3493  * not modified by scan work while this function is executing.
3494  */
3495 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3496 {
3497 	struct nvme_ns *ns, *next;
3498 	LIST_HEAD(ns_list);
3499 
3500 	/* prevent racing with ns scanning */
3501 	flush_work(&ctrl->scan_work);
3502 
3503 	/*
3504 	 * The dead states indicates the controller was not gracefully
3505 	 * disconnected. In that case, we won't be able to flush any data while
3506 	 * removing the namespaces' disks; fail all the queues now to avoid
3507 	 * potentially having to clean up the failed sync later.
3508 	 */
3509 	if (ctrl->state == NVME_CTRL_DEAD)
3510 		nvme_kill_queues(ctrl);
3511 
3512 	down_write(&ctrl->namespaces_rwsem);
3513 	list_splice_init(&ctrl->namespaces, &ns_list);
3514 	up_write(&ctrl->namespaces_rwsem);
3515 
3516 	list_for_each_entry_safe(ns, next, &ns_list, list)
3517 		nvme_ns_remove(ns);
3518 }
3519 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3520 
3521 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3522 {
3523 	char *envp[2] = { NULL, NULL };
3524 	u32 aen_result = ctrl->aen_result;
3525 
3526 	ctrl->aen_result = 0;
3527 	if (!aen_result)
3528 		return;
3529 
3530 	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3531 	if (!envp[0])
3532 		return;
3533 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3534 	kfree(envp[0]);
3535 }
3536 
3537 static void nvme_async_event_work(struct work_struct *work)
3538 {
3539 	struct nvme_ctrl *ctrl =
3540 		container_of(work, struct nvme_ctrl, async_event_work);
3541 
3542 	nvme_aen_uevent(ctrl);
3543 	ctrl->ops->submit_async_event(ctrl);
3544 }
3545 
3546 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3547 {
3548 
3549 	u32 csts;
3550 
3551 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3552 		return false;
3553 
3554 	if (csts == ~0)
3555 		return false;
3556 
3557 	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3558 }
3559 
3560 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3561 {
3562 	struct nvme_fw_slot_info_log *log;
3563 
3564 	log = kmalloc(sizeof(*log), GFP_KERNEL);
3565 	if (!log)
3566 		return;
3567 
3568 	if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
3569 			sizeof(*log), 0))
3570 		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
3571 	kfree(log);
3572 }
3573 
3574 static void nvme_fw_act_work(struct work_struct *work)
3575 {
3576 	struct nvme_ctrl *ctrl = container_of(work,
3577 				struct nvme_ctrl, fw_act_work);
3578 	unsigned long fw_act_timeout;
3579 
3580 	if (ctrl->mtfa)
3581 		fw_act_timeout = jiffies +
3582 				msecs_to_jiffies(ctrl->mtfa * 100);
3583 	else
3584 		fw_act_timeout = jiffies +
3585 				msecs_to_jiffies(admin_timeout * 1000);
3586 
3587 	nvme_stop_queues(ctrl);
3588 	while (nvme_ctrl_pp_status(ctrl)) {
3589 		if (time_after(jiffies, fw_act_timeout)) {
3590 			dev_warn(ctrl->device,
3591 				"Fw activation timeout, reset controller\n");
3592 			nvme_reset_ctrl(ctrl);
3593 			break;
3594 		}
3595 		msleep(100);
3596 	}
3597 
3598 	if (ctrl->state != NVME_CTRL_LIVE)
3599 		return;
3600 
3601 	nvme_start_queues(ctrl);
3602 	/* read FW slot information to clear the AER */
3603 	nvme_get_fw_slot_info(ctrl);
3604 }
3605 
3606 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3607 {
3608 	u32 aer_notice_type = (result & 0xff00) >> 8;
3609 
3610 	trace_nvme_async_event(ctrl, aer_notice_type);
3611 
3612 	switch (aer_notice_type) {
3613 	case NVME_AER_NOTICE_NS_CHANGED:
3614 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
3615 		nvme_queue_scan(ctrl);
3616 		break;
3617 	case NVME_AER_NOTICE_FW_ACT_STARTING:
3618 		queue_work(nvme_wq, &ctrl->fw_act_work);
3619 		break;
3620 #ifdef CONFIG_NVME_MULTIPATH
3621 	case NVME_AER_NOTICE_ANA:
3622 		if (!ctrl->ana_log_buf)
3623 			break;
3624 		queue_work(nvme_wq, &ctrl->ana_work);
3625 		break;
3626 #endif
3627 	default:
3628 		dev_warn(ctrl->device, "async event result %08x\n", result);
3629 	}
3630 }
3631 
3632 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
3633 		volatile union nvme_result *res)
3634 {
3635 	u32 result = le32_to_cpu(res->u32);
3636 	u32 aer_type = result & 0x07;
3637 
3638 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
3639 		return;
3640 
3641 	switch (aer_type) {
3642 	case NVME_AER_NOTICE:
3643 		nvme_handle_aen_notice(ctrl, result);
3644 		break;
3645 	case NVME_AER_ERROR:
3646 	case NVME_AER_SMART:
3647 	case NVME_AER_CSS:
3648 	case NVME_AER_VS:
3649 		trace_nvme_async_event(ctrl, aer_type);
3650 		ctrl->aen_result = result;
3651 		break;
3652 	default:
3653 		break;
3654 	}
3655 	queue_work(nvme_wq, &ctrl->async_event_work);
3656 }
3657 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
3658 
3659 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
3660 {
3661 	nvme_mpath_stop(ctrl);
3662 	nvme_stop_keep_alive(ctrl);
3663 	flush_work(&ctrl->async_event_work);
3664 	cancel_work_sync(&ctrl->fw_act_work);
3665 }
3666 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
3667 
3668 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
3669 {
3670 	if (ctrl->kato)
3671 		nvme_start_keep_alive(ctrl);
3672 
3673 	if (ctrl->queue_count > 1) {
3674 		nvme_queue_scan(ctrl);
3675 		nvme_enable_aen(ctrl);
3676 		queue_work(nvme_wq, &ctrl->async_event_work);
3677 		nvme_start_queues(ctrl);
3678 	}
3679 }
3680 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
3681 
3682 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
3683 {
3684 	cdev_device_del(&ctrl->cdev, ctrl->device);
3685 }
3686 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
3687 
3688 static void nvme_free_ctrl(struct device *dev)
3689 {
3690 	struct nvme_ctrl *ctrl =
3691 		container_of(dev, struct nvme_ctrl, ctrl_device);
3692 	struct nvme_subsystem *subsys = ctrl->subsys;
3693 
3694 	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3695 	kfree(ctrl->effects);
3696 	nvme_mpath_uninit(ctrl);
3697 	__free_page(ctrl->discard_page);
3698 
3699 	if (subsys) {
3700 		mutex_lock(&nvme_subsystems_lock);
3701 		list_del(&ctrl->subsys_entry);
3702 		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
3703 		mutex_unlock(&nvme_subsystems_lock);
3704 	}
3705 
3706 	ctrl->ops->free_ctrl(ctrl);
3707 
3708 	if (subsys)
3709 		nvme_put_subsystem(subsys);
3710 }
3711 
3712 /*
3713  * Initialize a NVMe controller structures.  This needs to be called during
3714  * earliest initialization so that we have the initialized structured around
3715  * during probing.
3716  */
3717 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3718 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
3719 {
3720 	int ret;
3721 
3722 	ctrl->state = NVME_CTRL_NEW;
3723 	spin_lock_init(&ctrl->lock);
3724 	mutex_init(&ctrl->scan_lock);
3725 	INIT_LIST_HEAD(&ctrl->namespaces);
3726 	init_rwsem(&ctrl->namespaces_rwsem);
3727 	ctrl->dev = dev;
3728 	ctrl->ops = ops;
3729 	ctrl->quirks = quirks;
3730 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
3731 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
3732 	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
3733 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
3734 
3735 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
3736 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
3737 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
3738 
3739 	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
3740 			PAGE_SIZE);
3741 	ctrl->discard_page = alloc_page(GFP_KERNEL);
3742 	if (!ctrl->discard_page) {
3743 		ret = -ENOMEM;
3744 		goto out;
3745 	}
3746 
3747 	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
3748 	if (ret < 0)
3749 		goto out;
3750 	ctrl->instance = ret;
3751 
3752 	device_initialize(&ctrl->ctrl_device);
3753 	ctrl->device = &ctrl->ctrl_device;
3754 	ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
3755 	ctrl->device->class = nvme_class;
3756 	ctrl->device->parent = ctrl->dev;
3757 	ctrl->device->groups = nvme_dev_attr_groups;
3758 	ctrl->device->release = nvme_free_ctrl;
3759 	dev_set_drvdata(ctrl->device, ctrl);
3760 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
3761 	if (ret)
3762 		goto out_release_instance;
3763 
3764 	cdev_init(&ctrl->cdev, &nvme_dev_fops);
3765 	ctrl->cdev.owner = ops->module;
3766 	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
3767 	if (ret)
3768 		goto out_free_name;
3769 
3770 	/*
3771 	 * Initialize latency tolerance controls.  The sysfs files won't
3772 	 * be visible to userspace unless the device actually supports APST.
3773 	 */
3774 	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
3775 	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
3776 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
3777 
3778 	return 0;
3779 out_free_name:
3780 	kfree_const(ctrl->device->kobj.name);
3781 out_release_instance:
3782 	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3783 out:
3784 	if (ctrl->discard_page)
3785 		__free_page(ctrl->discard_page);
3786 	return ret;
3787 }
3788 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
3789 
3790 /**
3791  * nvme_kill_queues(): Ends all namespace queues
3792  * @ctrl: the dead controller that needs to end
3793  *
3794  * Call this function when the driver determines it is unable to get the
3795  * controller in a state capable of servicing IO.
3796  */
3797 void nvme_kill_queues(struct nvme_ctrl *ctrl)
3798 {
3799 	struct nvme_ns *ns;
3800 
3801 	down_read(&ctrl->namespaces_rwsem);
3802 
3803 	/* Forcibly unquiesce queues to avoid blocking dispatch */
3804 	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
3805 		blk_mq_unquiesce_queue(ctrl->admin_q);
3806 
3807 	list_for_each_entry(ns, &ctrl->namespaces, list)
3808 		nvme_set_queue_dying(ns);
3809 
3810 	up_read(&ctrl->namespaces_rwsem);
3811 }
3812 EXPORT_SYMBOL_GPL(nvme_kill_queues);
3813 
3814 void nvme_unfreeze(struct nvme_ctrl *ctrl)
3815 {
3816 	struct nvme_ns *ns;
3817 
3818 	down_read(&ctrl->namespaces_rwsem);
3819 	list_for_each_entry(ns, &ctrl->namespaces, list)
3820 		blk_mq_unfreeze_queue(ns->queue);
3821 	up_read(&ctrl->namespaces_rwsem);
3822 }
3823 EXPORT_SYMBOL_GPL(nvme_unfreeze);
3824 
3825 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
3826 {
3827 	struct nvme_ns *ns;
3828 
3829 	down_read(&ctrl->namespaces_rwsem);
3830 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3831 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
3832 		if (timeout <= 0)
3833 			break;
3834 	}
3835 	up_read(&ctrl->namespaces_rwsem);
3836 }
3837 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
3838 
3839 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
3840 {
3841 	struct nvme_ns *ns;
3842 
3843 	down_read(&ctrl->namespaces_rwsem);
3844 	list_for_each_entry(ns, &ctrl->namespaces, list)
3845 		blk_mq_freeze_queue_wait(ns->queue);
3846 	up_read(&ctrl->namespaces_rwsem);
3847 }
3848 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
3849 
3850 void nvme_start_freeze(struct nvme_ctrl *ctrl)
3851 {
3852 	struct nvme_ns *ns;
3853 
3854 	down_read(&ctrl->namespaces_rwsem);
3855 	list_for_each_entry(ns, &ctrl->namespaces, list)
3856 		blk_freeze_queue_start(ns->queue);
3857 	up_read(&ctrl->namespaces_rwsem);
3858 }
3859 EXPORT_SYMBOL_GPL(nvme_start_freeze);
3860 
3861 void nvme_stop_queues(struct nvme_ctrl *ctrl)
3862 {
3863 	struct nvme_ns *ns;
3864 
3865 	down_read(&ctrl->namespaces_rwsem);
3866 	list_for_each_entry(ns, &ctrl->namespaces, list)
3867 		blk_mq_quiesce_queue(ns->queue);
3868 	up_read(&ctrl->namespaces_rwsem);
3869 }
3870 EXPORT_SYMBOL_GPL(nvme_stop_queues);
3871 
3872 void nvme_start_queues(struct nvme_ctrl *ctrl)
3873 {
3874 	struct nvme_ns *ns;
3875 
3876 	down_read(&ctrl->namespaces_rwsem);
3877 	list_for_each_entry(ns, &ctrl->namespaces, list)
3878 		blk_mq_unquiesce_queue(ns->queue);
3879 	up_read(&ctrl->namespaces_rwsem);
3880 }
3881 EXPORT_SYMBOL_GPL(nvme_start_queues);
3882 
3883 /*
3884  * Check we didn't inadvertently grow the command structure sizes:
3885  */
3886 static inline void _nvme_check_size(void)
3887 {
3888 	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
3889 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
3890 	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
3891 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
3892 	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
3893 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
3894 	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
3895 	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
3896 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
3897 	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
3898 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
3899 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
3900 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
3901 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
3902 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
3903 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
3904 	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
3905 }
3906 
3907 
3908 static int __init nvme_core_init(void)
3909 {
3910 	int result = -ENOMEM;
3911 
3912 	_nvme_check_size();
3913 
3914 	nvme_wq = alloc_workqueue("nvme-wq",
3915 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
3916 	if (!nvme_wq)
3917 		goto out;
3918 
3919 	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
3920 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
3921 	if (!nvme_reset_wq)
3922 		goto destroy_wq;
3923 
3924 	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
3925 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
3926 	if (!nvme_delete_wq)
3927 		goto destroy_reset_wq;
3928 
3929 	result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
3930 	if (result < 0)
3931 		goto destroy_delete_wq;
3932 
3933 	nvme_class = class_create(THIS_MODULE, "nvme");
3934 	if (IS_ERR(nvme_class)) {
3935 		result = PTR_ERR(nvme_class);
3936 		goto unregister_chrdev;
3937 	}
3938 
3939 	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
3940 	if (IS_ERR(nvme_subsys_class)) {
3941 		result = PTR_ERR(nvme_subsys_class);
3942 		goto destroy_class;
3943 	}
3944 	return 0;
3945 
3946 destroy_class:
3947 	class_destroy(nvme_class);
3948 unregister_chrdev:
3949 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
3950 destroy_delete_wq:
3951 	destroy_workqueue(nvme_delete_wq);
3952 destroy_reset_wq:
3953 	destroy_workqueue(nvme_reset_wq);
3954 destroy_wq:
3955 	destroy_workqueue(nvme_wq);
3956 out:
3957 	return result;
3958 }
3959 
3960 static void __exit nvme_core_exit(void)
3961 {
3962 	ida_destroy(&nvme_subsystems_ida);
3963 	class_destroy(nvme_subsys_class);
3964 	class_destroy(nvme_class);
3965 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
3966 	destroy_workqueue(nvme_delete_wq);
3967 	destroy_workqueue(nvme_reset_wq);
3968 	destroy_workqueue(nvme_wq);
3969 }
3970 
3971 MODULE_LICENSE("GPL");
3972 MODULE_VERSION("1.0");
3973 module_init(nvme_core_init);
3974 module_exit(nvme_core_exit);
3975