xref: /openbmc/linux/drivers/nvme/target/admin-cmd.c (revision ab5d0b38c0475d6ff59f1a6ccf7c668b9ec2e0a4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14 
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18 
19 	len <<= 16;
20 	len += le16_to_cpu(cmd->get_log_page.numdl);
21 	/* NUMD is a 0's based value */
22 	len += 1;
23 	len *= sizeof(u32);
24 
25 	return len;
26 }
27 
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 	switch (cdw10 & 0xff) {
31 	case NVME_FEAT_HOST_ID:
32 		return sizeof(req->sq->ctrl->hostid);
33 	default:
34 		return 0;
35 	}
36 }
37 
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 	return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42 
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47 
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 	unsigned long flags;
52 	off_t offset = 0;
53 	u64 slot;
54 	u64 i;
55 
56 	spin_lock_irqsave(&ctrl->error_lock, flags);
57 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58 
59 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 				sizeof(struct nvme_error_slot)))
62 			break;
63 
64 		if (slot == 0)
65 			slot = NVMET_ERROR_LOG_SLOTS - 1;
66 		else
67 			slot--;
68 		offset += sizeof(struct nvme_error_slot);
69 	}
70 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 	nvmet_req_complete(req, 0);
72 }
73 
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 		struct nvme_smart_log *slog)
76 {
77 	u64 host_reads, host_writes, data_units_read, data_units_written;
78 	u16 status;
79 
80 	status = nvmet_req_find_ns(req);
81 	if (status)
82 		return status;
83 
84 	/* we don't have the right data for file backed ns */
85 	if (!req->ns->bdev)
86 		return NVME_SC_SUCCESS;
87 
88 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89 	data_units_read =
90 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92 	data_units_written =
93 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94 
95 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99 
100 	return NVME_SC_SUCCESS;
101 }
102 
103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 		struct nvme_smart_log *slog)
105 {
106 	u64 host_reads = 0, host_writes = 0;
107 	u64 data_units_read = 0, data_units_written = 0;
108 	struct nvmet_ns *ns;
109 	struct nvmet_ctrl *ctrl;
110 	unsigned long idx;
111 
112 	ctrl = req->sq->ctrl;
113 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114 		/* we don't have the right data for file backed ns */
115 		if (!ns->bdev)
116 			continue;
117 		host_reads += part_stat_read(ns->bdev, ios[READ]);
118 		data_units_read += DIV_ROUND_UP(
119 			part_stat_read(ns->bdev, sectors[READ]), 1000);
120 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121 		data_units_written += DIV_ROUND_UP(
122 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123 	}
124 
125 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129 
130 	return NVME_SC_SUCCESS;
131 }
132 
133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135 	struct nvme_smart_log *log;
136 	u16 status = NVME_SC_INTERNAL;
137 	unsigned long flags;
138 
139 	if (req->transfer_len != sizeof(*log))
140 		goto out;
141 
142 	log = kzalloc(sizeof(*log), GFP_KERNEL);
143 	if (!log)
144 		goto out;
145 
146 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 		status = nvmet_get_smart_log_all(req, log);
148 	else
149 		status = nvmet_get_smart_log_nsid(req, log);
150 	if (status)
151 		goto out_free_log;
152 
153 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 	put_unaligned_le64(req->sq->ctrl->err_counter,
155 			&log->num_err_log_entries);
156 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157 
158 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160 	kfree(log);
161 out:
162 	nvmet_req_complete(req, status);
163 }
164 
165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167 	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
168 	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
169 	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
170 	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
171 	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
172 	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
173 	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
174 
175 	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
176 	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
177 	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
178 	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
179 	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
180 }
181 
182 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
183 {
184 	struct nvme_effects_log *log;
185 	u16 status = NVME_SC_SUCCESS;
186 
187 	log = kzalloc(sizeof(*log), GFP_KERNEL);
188 	if (!log) {
189 		status = NVME_SC_INTERNAL;
190 		goto out;
191 	}
192 
193 	switch (req->cmd->get_log_page.csi) {
194 	case NVME_CSI_NVM:
195 		nvmet_get_cmd_effects_nvm(log);
196 		break;
197 	default:
198 		status = NVME_SC_INVALID_LOG_PAGE;
199 		goto free;
200 	}
201 
202 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
203 free:
204 	kfree(log);
205 out:
206 	nvmet_req_complete(req, status);
207 }
208 
209 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
210 {
211 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
212 	u16 status = NVME_SC_INTERNAL;
213 	size_t len;
214 
215 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
216 		goto out;
217 
218 	mutex_lock(&ctrl->lock);
219 	if (ctrl->nr_changed_ns == U32_MAX)
220 		len = sizeof(__le32);
221 	else
222 		len = ctrl->nr_changed_ns * sizeof(__le32);
223 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
224 	if (!status)
225 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
226 	ctrl->nr_changed_ns = 0;
227 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
228 	mutex_unlock(&ctrl->lock);
229 out:
230 	nvmet_req_complete(req, status);
231 }
232 
233 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
234 		struct nvme_ana_group_desc *desc)
235 {
236 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
237 	struct nvmet_ns *ns;
238 	unsigned long idx;
239 	u32 count = 0;
240 
241 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
242 		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
243 			if (ns->anagrpid == grpid)
244 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
245 	}
246 
247 	desc->grpid = cpu_to_le32(grpid);
248 	desc->nnsids = cpu_to_le32(count);
249 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
250 	desc->state = req->port->ana_state[grpid];
251 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
252 	return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
253 }
254 
255 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
256 {
257 	struct nvme_ana_rsp_hdr hdr = { 0, };
258 	struct nvme_ana_group_desc *desc;
259 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
260 	size_t len;
261 	u32 grpid;
262 	u16 ngrps = 0;
263 	u16 status;
264 
265 	status = NVME_SC_INTERNAL;
266 	desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
267 			NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
268 	if (!desc)
269 		goto out;
270 
271 	down_read(&nvmet_ana_sem);
272 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
273 		if (!nvmet_ana_group_enabled[grpid])
274 			continue;
275 		len = nvmet_format_ana_group(req, grpid, desc);
276 		status = nvmet_copy_to_sgl(req, offset, desc, len);
277 		if (status)
278 			break;
279 		offset += len;
280 		ngrps++;
281 	}
282 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
283 		if (nvmet_ana_group_enabled[grpid])
284 			ngrps++;
285 	}
286 
287 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
288 	hdr.ngrps = cpu_to_le16(ngrps);
289 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
290 	up_read(&nvmet_ana_sem);
291 
292 	kfree(desc);
293 
294 	/* copy the header last once we know the number of groups */
295 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
296 out:
297 	nvmet_req_complete(req, status);
298 }
299 
300 static void nvmet_execute_get_log_page(struct nvmet_req *req)
301 {
302 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
303 		return;
304 
305 	switch (req->cmd->get_log_page.lid) {
306 	case NVME_LOG_ERROR:
307 		return nvmet_execute_get_log_page_error(req);
308 	case NVME_LOG_SMART:
309 		return nvmet_execute_get_log_page_smart(req);
310 	case NVME_LOG_FW_SLOT:
311 		/*
312 		 * We only support a single firmware slot which always is
313 		 * active, so we can zero out the whole firmware slot log and
314 		 * still claim to fully implement this mandatory log page.
315 		 */
316 		return nvmet_execute_get_log_page_noop(req);
317 	case NVME_LOG_CHANGED_NS:
318 		return nvmet_execute_get_log_changed_ns(req);
319 	case NVME_LOG_CMD_EFFECTS:
320 		return nvmet_execute_get_log_cmd_effects_ns(req);
321 	case NVME_LOG_ANA:
322 		return nvmet_execute_get_log_page_ana(req);
323 	}
324 	pr_debug("unhandled lid %d on qid %d\n",
325 	       req->cmd->get_log_page.lid, req->sq->qid);
326 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
327 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
328 }
329 
330 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
331 {
332 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
333 	struct nvmet_subsys *subsys = ctrl->subsys;
334 	struct nvme_id_ctrl *id;
335 	u32 cmd_capsule_size;
336 	u16 status = 0;
337 
338 	if (!subsys->subsys_discovered) {
339 		mutex_lock(&subsys->lock);
340 		subsys->subsys_discovered = true;
341 		mutex_unlock(&subsys->lock);
342 	}
343 
344 	id = kzalloc(sizeof(*id), GFP_KERNEL);
345 	if (!id) {
346 		status = NVME_SC_INTERNAL;
347 		goto out;
348 	}
349 
350 	/* XXX: figure out how to assign real vendors IDs. */
351 	id->vid = 0;
352 	id->ssvid = 0;
353 
354 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
355 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
356 		       strlen(subsys->model_number), ' ');
357 	memcpy_and_pad(id->fr, sizeof(id->fr),
358 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
359 
360 	id->rab = 6;
361 
362 	/*
363 	 * XXX: figure out how we can assign a IEEE OUI, but until then
364 	 * the safest is to leave it as zeroes.
365 	 */
366 
367 	/* we support multiple ports, multiples hosts and ANA: */
368 	id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
369 
370 	/* Limit MDTS according to transport capability */
371 	if (ctrl->ops->get_mdts)
372 		id->mdts = ctrl->ops->get_mdts(ctrl);
373 	else
374 		id->mdts = 0;
375 
376 	id->cntlid = cpu_to_le16(ctrl->cntlid);
377 	id->ver = cpu_to_le32(ctrl->subsys->ver);
378 
379 	/* XXX: figure out what to do about RTD3R/RTD3 */
380 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
381 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
382 		NVME_CTRL_ATTR_TBKAS);
383 
384 	id->oacs = 0;
385 
386 	/*
387 	 * We don't really have a practical limit on the number of abort
388 	 * comands.  But we don't do anything useful for abort either, so
389 	 * no point in allowing more abort commands than the spec requires.
390 	 */
391 	id->acl = 3;
392 
393 	id->aerl = NVMET_ASYNC_EVENTS - 1;
394 
395 	/* first slot is read-only, only one slot supported */
396 	id->frmw = (1 << 0) | (1 << 1);
397 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
398 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
399 	id->npss = 0;
400 
401 	/* We support keep-alive timeout in granularity of seconds */
402 	id->kas = cpu_to_le16(NVMET_KAS);
403 
404 	id->sqes = (0x6 << 4) | 0x6;
405 	id->cqes = (0x4 << 4) | 0x4;
406 
407 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
408 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
409 
410 	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
411 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
412 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
413 			NVME_CTRL_ONCS_WRITE_ZEROES);
414 
415 	/* XXX: don't report vwc if the underlying device is write through */
416 	id->vwc = NVME_CTRL_VWC_PRESENT;
417 
418 	/*
419 	 * We can't support atomic writes bigger than a LBA without support
420 	 * from the backend device.
421 	 */
422 	id->awun = 0;
423 	id->awupf = 0;
424 
425 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
426 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
427 		id->sgls |= cpu_to_le32(1 << 2);
428 	if (req->port->inline_data_size)
429 		id->sgls |= cpu_to_le32(1 << 20);
430 
431 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
432 
433 	/*
434 	 * Max command capsule size is sqe + in-capsule data size.
435 	 * Disable in-capsule data for Metadata capable controllers.
436 	 */
437 	cmd_capsule_size = sizeof(struct nvme_command);
438 	if (!ctrl->pi_support)
439 		cmd_capsule_size += req->port->inline_data_size;
440 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
441 
442 	/* Max response capsule size is cqe */
443 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
444 
445 	id->msdbd = ctrl->ops->msdbd;
446 
447 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
448 	id->anatt = 10; /* random value */
449 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
450 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
451 
452 	/*
453 	 * Meh, we don't really support any power state.  Fake up the same
454 	 * values that qemu does.
455 	 */
456 	id->psd[0].max_power = cpu_to_le16(0x9c4);
457 	id->psd[0].entry_lat = cpu_to_le32(0x10);
458 	id->psd[0].exit_lat = cpu_to_le32(0x4);
459 
460 	id->nwpc = 1 << 0; /* write protect and no write protect */
461 
462 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
463 
464 	kfree(id);
465 out:
466 	nvmet_req_complete(req, status);
467 }
468 
469 static void nvmet_execute_identify_ns(struct nvmet_req *req)
470 {
471 	struct nvme_id_ns *id;
472 	u16 status;
473 
474 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
475 		req->error_loc = offsetof(struct nvme_identify, nsid);
476 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
477 		goto out;
478 	}
479 
480 	id = kzalloc(sizeof(*id), GFP_KERNEL);
481 	if (!id) {
482 		status = NVME_SC_INTERNAL;
483 		goto out;
484 	}
485 
486 	/* return an all zeroed buffer if we can't find an active namespace */
487 	status = nvmet_req_find_ns(req);
488 	if (status) {
489 		status = 0;
490 		goto done;
491 	}
492 
493 	nvmet_ns_revalidate(req->ns);
494 
495 	/*
496 	 * nuse = ncap = nsze isn't always true, but we have no way to find
497 	 * that out from the underlying device.
498 	 */
499 	id->ncap = id->nsze =
500 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
501 	switch (req->port->ana_state[req->ns->anagrpid]) {
502 	case NVME_ANA_INACCESSIBLE:
503 	case NVME_ANA_PERSISTENT_LOSS:
504 		break;
505 	default:
506 		id->nuse = id->nsze;
507 		break;
508 	}
509 
510 	if (req->ns->bdev)
511 		nvmet_bdev_set_limits(req->ns->bdev, id);
512 
513 	/*
514 	 * We just provide a single LBA format that matches what the
515 	 * underlying device reports.
516 	 */
517 	id->nlbaf = 0;
518 	id->flbas = 0;
519 
520 	/*
521 	 * Our namespace might always be shared.  Not just with other
522 	 * controllers, but also with any other user of the block device.
523 	 */
524 	id->nmic = (1 << 0);
525 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
526 
527 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
528 
529 	id->lbaf[0].ds = req->ns->blksize_shift;
530 
531 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
532 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
533 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
534 			  NVME_NS_DPC_PI_TYPE3;
535 		id->mc = NVME_MC_EXTENDED_LBA;
536 		id->dps = req->ns->pi_type;
537 		id->flbas = NVME_NS_FLBAS_META_EXT;
538 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
539 	}
540 
541 	if (req->ns->readonly)
542 		id->nsattr |= (1 << 0);
543 done:
544 	if (!status)
545 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
546 
547 	kfree(id);
548 out:
549 	nvmet_req_complete(req, status);
550 }
551 
552 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
553 {
554 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
555 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
556 	struct nvmet_ns *ns;
557 	unsigned long idx;
558 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
559 	__le32 *list;
560 	u16 status = 0;
561 	int i = 0;
562 
563 	list = kzalloc(buf_size, GFP_KERNEL);
564 	if (!list) {
565 		status = NVME_SC_INTERNAL;
566 		goto out;
567 	}
568 
569 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
570 		if (ns->nsid <= min_nsid)
571 			continue;
572 		list[i++] = cpu_to_le32(ns->nsid);
573 		if (i == buf_size / sizeof(__le32))
574 			break;
575 	}
576 
577 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
578 
579 	kfree(list);
580 out:
581 	nvmet_req_complete(req, status);
582 }
583 
584 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
585 				    void *id, off_t *off)
586 {
587 	struct nvme_ns_id_desc desc = {
588 		.nidt = type,
589 		.nidl = len,
590 	};
591 	u16 status;
592 
593 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
594 	if (status)
595 		return status;
596 	*off += sizeof(desc);
597 
598 	status = nvmet_copy_to_sgl(req, *off, id, len);
599 	if (status)
600 		return status;
601 	*off += len;
602 
603 	return 0;
604 }
605 
606 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
607 {
608 	off_t off = 0;
609 	u16 status;
610 
611 	status = nvmet_req_find_ns(req);
612 	if (status)
613 		goto out;
614 
615 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
616 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
617 						  NVME_NIDT_UUID_LEN,
618 						  &req->ns->uuid, &off);
619 		if (status)
620 			goto out;
621 	}
622 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
623 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
624 						  NVME_NIDT_NGUID_LEN,
625 						  &req->ns->nguid, &off);
626 		if (status)
627 			goto out;
628 	}
629 
630 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
631 					  NVME_NIDT_CSI_LEN,
632 					  &req->ns->csi, &off);
633 	if (status)
634 		goto out;
635 
636 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
637 			off) != NVME_IDENTIFY_DATA_SIZE - off)
638 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
639 
640 out:
641 	nvmet_req_complete(req, status);
642 }
643 
644 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
645 {
646 	switch (req->cmd->identify.csi) {
647 	case NVME_CSI_NVM:
648 		nvmet_execute_identify_desclist(req);
649 		return true;
650 	default:
651 		return false;
652 	}
653 }
654 
655 static void nvmet_execute_identify(struct nvmet_req *req)
656 {
657 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
658 		return;
659 
660 	switch (req->cmd->identify.cns) {
661 	case NVME_ID_CNS_NS:
662 		switch (req->cmd->identify.csi) {
663 		case NVME_CSI_NVM:
664 			return nvmet_execute_identify_ns(req);
665 		default:
666 			break;
667 		}
668 		break;
669 	case NVME_ID_CNS_CTRL:
670 		switch (req->cmd->identify.csi) {
671 		case NVME_CSI_NVM:
672 			return nvmet_execute_identify_ctrl(req);
673 		}
674 		break;
675 	case NVME_ID_CNS_NS_ACTIVE_LIST:
676 		switch (req->cmd->identify.csi) {
677 		case NVME_CSI_NVM:
678 			return nvmet_execute_identify_nslist(req);
679 		default:
680 			break;
681 		}
682 		break;
683 	case NVME_ID_CNS_NS_DESC_LIST:
684 		if (nvmet_handle_identify_desclist(req) == true)
685 			return;
686 		break;
687 	}
688 
689 	nvmet_req_cns_error_complete(req);
690 }
691 
692 /*
693  * A "minimum viable" abort implementation: the command is mandatory in the
694  * spec, but we are not required to do any useful work.  We couldn't really
695  * do a useful abort, so don't bother even with waiting for the command
696  * to be exectuted and return immediately telling the command to abort
697  * wasn't found.
698  */
699 static void nvmet_execute_abort(struct nvmet_req *req)
700 {
701 	if (!nvmet_check_transfer_len(req, 0))
702 		return;
703 	nvmet_set_result(req, 1);
704 	nvmet_req_complete(req, 0);
705 }
706 
707 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
708 {
709 	u16 status;
710 
711 	if (req->ns->file)
712 		status = nvmet_file_flush(req);
713 	else
714 		status = nvmet_bdev_flush(req);
715 
716 	if (status)
717 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
718 	return status;
719 }
720 
721 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
722 {
723 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
724 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
725 	u16 status;
726 
727 	status = nvmet_req_find_ns(req);
728 	if (status)
729 		return status;
730 
731 	mutex_lock(&subsys->lock);
732 	switch (write_protect) {
733 	case NVME_NS_WRITE_PROTECT:
734 		req->ns->readonly = true;
735 		status = nvmet_write_protect_flush_sync(req);
736 		if (status)
737 			req->ns->readonly = false;
738 		break;
739 	case NVME_NS_NO_WRITE_PROTECT:
740 		req->ns->readonly = false;
741 		status = 0;
742 		break;
743 	default:
744 		break;
745 	}
746 
747 	if (!status)
748 		nvmet_ns_changed(subsys, req->ns->nsid);
749 	mutex_unlock(&subsys->lock);
750 	return status;
751 }
752 
753 u16 nvmet_set_feat_kato(struct nvmet_req *req)
754 {
755 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
756 
757 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
758 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
759 	nvmet_start_keep_alive_timer(req->sq->ctrl);
760 
761 	nvmet_set_result(req, req->sq->ctrl->kato);
762 
763 	return 0;
764 }
765 
766 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
767 {
768 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
769 
770 	if (val32 & ~mask) {
771 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
772 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
773 	}
774 
775 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
776 	nvmet_set_result(req, val32);
777 
778 	return 0;
779 }
780 
781 void nvmet_execute_set_features(struct nvmet_req *req)
782 {
783 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
784 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
785 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
786 	u16 status = 0;
787 	u16 nsqr;
788 	u16 ncqr;
789 
790 	if (!nvmet_check_transfer_len(req, 0))
791 		return;
792 
793 	switch (cdw10 & 0xff) {
794 	case NVME_FEAT_NUM_QUEUES:
795 		ncqr = (cdw11 >> 16) & 0xffff;
796 		nsqr = cdw11 & 0xffff;
797 		if (ncqr == 0xffff || nsqr == 0xffff) {
798 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
799 			break;
800 		}
801 		nvmet_set_result(req,
802 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
803 		break;
804 	case NVME_FEAT_KATO:
805 		status = nvmet_set_feat_kato(req);
806 		break;
807 	case NVME_FEAT_ASYNC_EVENT:
808 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
809 		break;
810 	case NVME_FEAT_HOST_ID:
811 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
812 		break;
813 	case NVME_FEAT_WRITE_PROTECT:
814 		status = nvmet_set_feat_write_protect(req);
815 		break;
816 	default:
817 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
818 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
819 		break;
820 	}
821 
822 	nvmet_req_complete(req, status);
823 }
824 
825 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
826 {
827 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
828 	u32 result;
829 
830 	result = nvmet_req_find_ns(req);
831 	if (result)
832 		return result;
833 
834 	mutex_lock(&subsys->lock);
835 	if (req->ns->readonly == true)
836 		result = NVME_NS_WRITE_PROTECT;
837 	else
838 		result = NVME_NS_NO_WRITE_PROTECT;
839 	nvmet_set_result(req, result);
840 	mutex_unlock(&subsys->lock);
841 
842 	return 0;
843 }
844 
845 void nvmet_get_feat_kato(struct nvmet_req *req)
846 {
847 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
848 }
849 
850 void nvmet_get_feat_async_event(struct nvmet_req *req)
851 {
852 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
853 }
854 
855 void nvmet_execute_get_features(struct nvmet_req *req)
856 {
857 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
858 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
859 	u16 status = 0;
860 
861 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
862 		return;
863 
864 	switch (cdw10 & 0xff) {
865 	/*
866 	 * These features are mandatory in the spec, but we don't
867 	 * have a useful way to implement them.  We'll eventually
868 	 * need to come up with some fake values for these.
869 	 */
870 #if 0
871 	case NVME_FEAT_ARBITRATION:
872 		break;
873 	case NVME_FEAT_POWER_MGMT:
874 		break;
875 	case NVME_FEAT_TEMP_THRESH:
876 		break;
877 	case NVME_FEAT_ERR_RECOVERY:
878 		break;
879 	case NVME_FEAT_IRQ_COALESCE:
880 		break;
881 	case NVME_FEAT_IRQ_CONFIG:
882 		break;
883 	case NVME_FEAT_WRITE_ATOMIC:
884 		break;
885 #endif
886 	case NVME_FEAT_ASYNC_EVENT:
887 		nvmet_get_feat_async_event(req);
888 		break;
889 	case NVME_FEAT_VOLATILE_WC:
890 		nvmet_set_result(req, 1);
891 		break;
892 	case NVME_FEAT_NUM_QUEUES:
893 		nvmet_set_result(req,
894 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
895 		break;
896 	case NVME_FEAT_KATO:
897 		nvmet_get_feat_kato(req);
898 		break;
899 	case NVME_FEAT_HOST_ID:
900 		/* need 128-bit host identifier flag */
901 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
902 			req->error_loc =
903 				offsetof(struct nvme_common_command, cdw11);
904 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
905 			break;
906 		}
907 
908 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
909 				sizeof(req->sq->ctrl->hostid));
910 		break;
911 	case NVME_FEAT_WRITE_PROTECT:
912 		status = nvmet_get_feat_write_protect(req);
913 		break;
914 	default:
915 		req->error_loc =
916 			offsetof(struct nvme_common_command, cdw10);
917 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
918 		break;
919 	}
920 
921 	nvmet_req_complete(req, status);
922 }
923 
924 void nvmet_execute_async_event(struct nvmet_req *req)
925 {
926 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
927 
928 	if (!nvmet_check_transfer_len(req, 0))
929 		return;
930 
931 	mutex_lock(&ctrl->lock);
932 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
933 		mutex_unlock(&ctrl->lock);
934 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
935 		return;
936 	}
937 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
938 	mutex_unlock(&ctrl->lock);
939 
940 	schedule_work(&ctrl->async_event_work);
941 }
942 
943 void nvmet_execute_keep_alive(struct nvmet_req *req)
944 {
945 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
946 	u16 status = 0;
947 
948 	if (!nvmet_check_transfer_len(req, 0))
949 		return;
950 
951 	if (!ctrl->kato) {
952 		status = NVME_SC_KA_TIMEOUT_INVALID;
953 		goto out;
954 	}
955 
956 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
957 		ctrl->cntlid, ctrl->kato);
958 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
959 out:
960 	nvmet_req_complete(req, status);
961 }
962 
963 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
964 {
965 	struct nvme_command *cmd = req->cmd;
966 	u16 ret;
967 
968 	if (nvme_is_fabrics(cmd))
969 		return nvmet_parse_fabrics_cmd(req);
970 	if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
971 		return nvmet_parse_discovery_cmd(req);
972 
973 	ret = nvmet_check_ctrl_status(req);
974 	if (unlikely(ret))
975 		return ret;
976 
977 	if (nvmet_req_passthru_ctrl(req))
978 		return nvmet_parse_passthru_admin_cmd(req);
979 
980 	switch (cmd->common.opcode) {
981 	case nvme_admin_get_log_page:
982 		req->execute = nvmet_execute_get_log_page;
983 		return 0;
984 	case nvme_admin_identify:
985 		req->execute = nvmet_execute_identify;
986 		return 0;
987 	case nvme_admin_abort_cmd:
988 		req->execute = nvmet_execute_abort;
989 		return 0;
990 	case nvme_admin_set_features:
991 		req->execute = nvmet_execute_set_features;
992 		return 0;
993 	case nvme_admin_get_features:
994 		req->execute = nvmet_execute_get_features;
995 		return 0;
996 	case nvme_admin_async_event:
997 		req->execute = nvmet_execute_async_event;
998 		return 0;
999 	case nvme_admin_keep_alive:
1000 		req->execute = nvmet_execute_keep_alive;
1001 		return 0;
1002 	default:
1003 		return nvmet_report_invalid_opcode(req);
1004 	}
1005 }
1006