xref: /openbmc/linux/drivers/nvme/target/admin-cmd.c (revision 6c33a6f4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 
10 #include <generated/utsrelease.h>
11 #include <asm/unaligned.h>
12 #include "nvmet.h"
13 
14 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
15 {
16 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
17 
18 	len <<= 16;
19 	len += le16_to_cpu(cmd->get_log_page.numdl);
20 	/* NUMD is a 0's based value */
21 	len += 1;
22 	len *= sizeof(u32);
23 
24 	return len;
25 }
26 
27 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
28 {
29 	switch (cdw10 & 0xff) {
30 	case NVME_FEAT_HOST_ID:
31 		return sizeof(req->sq->ctrl->hostid);
32 	default:
33 		return 0;
34 	}
35 }
36 
37 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
38 {
39 	return le64_to_cpu(cmd->get_log_page.lpo);
40 }
41 
42 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
43 {
44 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
45 }
46 
47 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
48 {
49 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
50 	unsigned long flags;
51 	off_t offset = 0;
52 	u64 slot;
53 	u64 i;
54 
55 	spin_lock_irqsave(&ctrl->error_lock, flags);
56 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
57 
58 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
59 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
60 				sizeof(struct nvme_error_slot)))
61 			break;
62 
63 		if (slot == 0)
64 			slot = NVMET_ERROR_LOG_SLOTS - 1;
65 		else
66 			slot--;
67 		offset += sizeof(struct nvme_error_slot);
68 	}
69 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
70 	nvmet_req_complete(req, 0);
71 }
72 
73 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
74 		struct nvme_smart_log *slog)
75 {
76 	struct nvmet_ns *ns;
77 	u64 host_reads, host_writes, data_units_read, data_units_written;
78 
79 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
80 	if (!ns) {
81 		pr_err("Could not find namespace id : %d\n",
82 				le32_to_cpu(req->cmd->get_log_page.nsid));
83 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
84 		return NVME_SC_INVALID_NS;
85 	}
86 
87 	/* we don't have the right data for file backed ns */
88 	if (!ns->bdev)
89 		goto out;
90 
91 	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
92 	data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
93 		sectors[READ]), 1000);
94 	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
95 	data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
96 		sectors[WRITE]), 1000);
97 
98 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
99 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
100 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
101 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
102 out:
103 	nvmet_put_namespace(ns);
104 
105 	return NVME_SC_SUCCESS;
106 }
107 
108 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
109 		struct nvme_smart_log *slog)
110 {
111 	u64 host_reads = 0, host_writes = 0;
112 	u64 data_units_read = 0, data_units_written = 0;
113 	struct nvmet_ns *ns;
114 	struct nvmet_ctrl *ctrl;
115 
116 	ctrl = req->sq->ctrl;
117 
118 	rcu_read_lock();
119 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
120 		/* we don't have the right data for file backed ns */
121 		if (!ns->bdev)
122 			continue;
123 		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
124 		data_units_read += DIV_ROUND_UP(
125 			part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
126 		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
127 		data_units_written += DIV_ROUND_UP(
128 			part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
129 
130 	}
131 	rcu_read_unlock();
132 
133 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
134 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
135 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
136 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
137 
138 	return NVME_SC_SUCCESS;
139 }
140 
141 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
142 {
143 	struct nvme_smart_log *log;
144 	u16 status = NVME_SC_INTERNAL;
145 	unsigned long flags;
146 
147 	if (req->transfer_len != sizeof(*log))
148 		goto out;
149 
150 	log = kzalloc(sizeof(*log), GFP_KERNEL);
151 	if (!log)
152 		goto out;
153 
154 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
155 		status = nvmet_get_smart_log_all(req, log);
156 	else
157 		status = nvmet_get_smart_log_nsid(req, log);
158 	if (status)
159 		goto out_free_log;
160 
161 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
162 	put_unaligned_le64(req->sq->ctrl->err_counter,
163 			&log->num_err_log_entries);
164 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
165 
166 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
167 out_free_log:
168 	kfree(log);
169 out:
170 	nvmet_req_complete(req, status);
171 }
172 
173 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
174 {
175 	u16 status = NVME_SC_INTERNAL;
176 	struct nvme_effects_log *log;
177 
178 	log = kzalloc(sizeof(*log), GFP_KERNEL);
179 	if (!log)
180 		goto out;
181 
182 	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
183 	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
184 	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
185 	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
186 	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
187 	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
188 	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
189 
190 	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
191 	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
192 	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
193 	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
194 	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
195 
196 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
197 
198 	kfree(log);
199 out:
200 	nvmet_req_complete(req, status);
201 }
202 
203 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
204 {
205 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
206 	u16 status = NVME_SC_INTERNAL;
207 	size_t len;
208 
209 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
210 		goto out;
211 
212 	mutex_lock(&ctrl->lock);
213 	if (ctrl->nr_changed_ns == U32_MAX)
214 		len = sizeof(__le32);
215 	else
216 		len = ctrl->nr_changed_ns * sizeof(__le32);
217 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
218 	if (!status)
219 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
220 	ctrl->nr_changed_ns = 0;
221 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
222 	mutex_unlock(&ctrl->lock);
223 out:
224 	nvmet_req_complete(req, status);
225 }
226 
227 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
228 		struct nvme_ana_group_desc *desc)
229 {
230 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
231 	struct nvmet_ns *ns;
232 	u32 count = 0;
233 
234 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
235 		rcu_read_lock();
236 		list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
237 			if (ns->anagrpid == grpid)
238 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
239 		rcu_read_unlock();
240 	}
241 
242 	desc->grpid = cpu_to_le32(grpid);
243 	desc->nnsids = cpu_to_le32(count);
244 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
245 	desc->state = req->port->ana_state[grpid];
246 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
247 	return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
248 }
249 
250 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
251 {
252 	struct nvme_ana_rsp_hdr hdr = { 0, };
253 	struct nvme_ana_group_desc *desc;
254 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
255 	size_t len;
256 	u32 grpid;
257 	u16 ngrps = 0;
258 	u16 status;
259 
260 	status = NVME_SC_INTERNAL;
261 	desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
262 			NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
263 	if (!desc)
264 		goto out;
265 
266 	down_read(&nvmet_ana_sem);
267 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
268 		if (!nvmet_ana_group_enabled[grpid])
269 			continue;
270 		len = nvmet_format_ana_group(req, grpid, desc);
271 		status = nvmet_copy_to_sgl(req, offset, desc, len);
272 		if (status)
273 			break;
274 		offset += len;
275 		ngrps++;
276 	}
277 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
278 		if (nvmet_ana_group_enabled[grpid])
279 			ngrps++;
280 	}
281 
282 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
283 	hdr.ngrps = cpu_to_le16(ngrps);
284 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
285 	up_read(&nvmet_ana_sem);
286 
287 	kfree(desc);
288 
289 	/* copy the header last once we know the number of groups */
290 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
291 out:
292 	nvmet_req_complete(req, status);
293 }
294 
295 static void nvmet_execute_get_log_page(struct nvmet_req *req)
296 {
297 	if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
298 		return;
299 
300 	switch (req->cmd->get_log_page.lid) {
301 	case NVME_LOG_ERROR:
302 		return nvmet_execute_get_log_page_error(req);
303 	case NVME_LOG_SMART:
304 		return nvmet_execute_get_log_page_smart(req);
305 	case NVME_LOG_FW_SLOT:
306 		/*
307 		 * We only support a single firmware slot which always is
308 		 * active, so we can zero out the whole firmware slot log and
309 		 * still claim to fully implement this mandatory log page.
310 		 */
311 		return nvmet_execute_get_log_page_noop(req);
312 	case NVME_LOG_CHANGED_NS:
313 		return nvmet_execute_get_log_changed_ns(req);
314 	case NVME_LOG_CMD_EFFECTS:
315 		return nvmet_execute_get_log_cmd_effects_ns(req);
316 	case NVME_LOG_ANA:
317 		return nvmet_execute_get_log_page_ana(req);
318 	}
319 	pr_err("unhandled lid %d on qid %d\n",
320 	       req->cmd->get_log_page.lid, req->sq->qid);
321 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
322 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
323 }
324 
325 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
326 {
327 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
328 	struct nvme_id_ctrl *id;
329 	u16 status = 0;
330 	const char model[] = "Linux";
331 
332 	id = kzalloc(sizeof(*id), GFP_KERNEL);
333 	if (!id) {
334 		status = NVME_SC_INTERNAL;
335 		goto out;
336 	}
337 
338 	/* XXX: figure out how to assign real vendors IDs. */
339 	id->vid = 0;
340 	id->ssvid = 0;
341 
342 	memset(id->sn, ' ', sizeof(id->sn));
343 	bin2hex(id->sn, &ctrl->subsys->serial,
344 		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
345 	memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
346 	memcpy_and_pad(id->fr, sizeof(id->fr),
347 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
348 
349 	id->rab = 6;
350 
351 	/*
352 	 * XXX: figure out how we can assign a IEEE OUI, but until then
353 	 * the safest is to leave it as zeroes.
354 	 */
355 
356 	/* we support multiple ports, multiples hosts and ANA: */
357 	id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
358 
359 	/* no limit on data transfer sizes for now */
360 	id->mdts = 0;
361 	id->cntlid = cpu_to_le16(ctrl->cntlid);
362 	id->ver = cpu_to_le32(ctrl->subsys->ver);
363 
364 	/* XXX: figure out what to do about RTD3R/RTD3 */
365 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
366 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
367 		NVME_CTRL_ATTR_TBKAS);
368 
369 	id->oacs = 0;
370 
371 	/*
372 	 * We don't really have a practical limit on the number of abort
373 	 * comands.  But we don't do anything useful for abort either, so
374 	 * no point in allowing more abort commands than the spec requires.
375 	 */
376 	id->acl = 3;
377 
378 	id->aerl = NVMET_ASYNC_EVENTS - 1;
379 
380 	/* first slot is read-only, only one slot supported */
381 	id->frmw = (1 << 0) | (1 << 1);
382 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
383 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
384 	id->npss = 0;
385 
386 	/* We support keep-alive timeout in granularity of seconds */
387 	id->kas = cpu_to_le16(NVMET_KAS);
388 
389 	id->sqes = (0x6 << 4) | 0x6;
390 	id->cqes = (0x4 << 4) | 0x4;
391 
392 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
393 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
394 
395 	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
396 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
397 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
398 			NVME_CTRL_ONCS_WRITE_ZEROES);
399 
400 	/* XXX: don't report vwc if the underlying device is write through */
401 	id->vwc = NVME_CTRL_VWC_PRESENT;
402 
403 	/*
404 	 * We can't support atomic writes bigger than a LBA without support
405 	 * from the backend device.
406 	 */
407 	id->awun = 0;
408 	id->awupf = 0;
409 
410 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
411 	if (ctrl->ops->has_keyed_sgls)
412 		id->sgls |= cpu_to_le32(1 << 2);
413 	if (req->port->inline_data_size)
414 		id->sgls |= cpu_to_le32(1 << 20);
415 
416 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
417 
418 	/* Max command capsule size is sqe + single page of in-capsule data */
419 	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
420 				  req->port->inline_data_size) / 16);
421 	/* Max response capsule size is cqe */
422 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
423 
424 	id->msdbd = ctrl->ops->msdbd;
425 
426 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
427 	id->anatt = 10; /* random value */
428 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
429 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
430 
431 	/*
432 	 * Meh, we don't really support any power state.  Fake up the same
433 	 * values that qemu does.
434 	 */
435 	id->psd[0].max_power = cpu_to_le16(0x9c4);
436 	id->psd[0].entry_lat = cpu_to_le32(0x10);
437 	id->psd[0].exit_lat = cpu_to_le32(0x4);
438 
439 	id->nwpc = 1 << 0; /* write protect and no write protect */
440 
441 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
442 
443 	kfree(id);
444 out:
445 	nvmet_req_complete(req, status);
446 }
447 
448 static void nvmet_execute_identify_ns(struct nvmet_req *req)
449 {
450 	struct nvmet_ns *ns;
451 	struct nvme_id_ns *id;
452 	u16 status = 0;
453 
454 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
455 		req->error_loc = offsetof(struct nvme_identify, nsid);
456 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
457 		goto out;
458 	}
459 
460 	id = kzalloc(sizeof(*id), GFP_KERNEL);
461 	if (!id) {
462 		status = NVME_SC_INTERNAL;
463 		goto out;
464 	}
465 
466 	/* return an all zeroed buffer if we can't find an active namespace */
467 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
468 	if (!ns)
469 		goto done;
470 
471 	/*
472 	 * nuse = ncap = nsze isn't always true, but we have no way to find
473 	 * that out from the underlying device.
474 	 */
475 	id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
476 	switch (req->port->ana_state[ns->anagrpid]) {
477 	case NVME_ANA_INACCESSIBLE:
478 	case NVME_ANA_PERSISTENT_LOSS:
479 		break;
480 	default:
481 		id->nuse = id->nsze;
482 		break;
483         }
484 
485 	if (ns->bdev)
486 		nvmet_bdev_set_limits(ns->bdev, id);
487 
488 	/*
489 	 * We just provide a single LBA format that matches what the
490 	 * underlying device reports.
491 	 */
492 	id->nlbaf = 0;
493 	id->flbas = 0;
494 
495 	/*
496 	 * Our namespace might always be shared.  Not just with other
497 	 * controllers, but also with any other user of the block device.
498 	 */
499 	id->nmic = (1 << 0);
500 	id->anagrpid = cpu_to_le32(ns->anagrpid);
501 
502 	memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
503 
504 	id->lbaf[0].ds = ns->blksize_shift;
505 
506 	if (ns->readonly)
507 		id->nsattr |= (1 << 0);
508 	nvmet_put_namespace(ns);
509 done:
510 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
511 	kfree(id);
512 out:
513 	nvmet_req_complete(req, status);
514 }
515 
516 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
517 {
518 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
519 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
520 	struct nvmet_ns *ns;
521 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
522 	__le32 *list;
523 	u16 status = 0;
524 	int i = 0;
525 
526 	list = kzalloc(buf_size, GFP_KERNEL);
527 	if (!list) {
528 		status = NVME_SC_INTERNAL;
529 		goto out;
530 	}
531 
532 	rcu_read_lock();
533 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
534 		if (ns->nsid <= min_nsid)
535 			continue;
536 		list[i++] = cpu_to_le32(ns->nsid);
537 		if (i == buf_size / sizeof(__le32))
538 			break;
539 	}
540 	rcu_read_unlock();
541 
542 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
543 
544 	kfree(list);
545 out:
546 	nvmet_req_complete(req, status);
547 }
548 
549 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
550 				    void *id, off_t *off)
551 {
552 	struct nvme_ns_id_desc desc = {
553 		.nidt = type,
554 		.nidl = len,
555 	};
556 	u16 status;
557 
558 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
559 	if (status)
560 		return status;
561 	*off += sizeof(desc);
562 
563 	status = nvmet_copy_to_sgl(req, *off, id, len);
564 	if (status)
565 		return status;
566 	*off += len;
567 
568 	return 0;
569 }
570 
571 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
572 {
573 	struct nvmet_ns *ns;
574 	u16 status = 0;
575 	off_t off = 0;
576 
577 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
578 	if (!ns) {
579 		req->error_loc = offsetof(struct nvme_identify, nsid);
580 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
581 		goto out;
582 	}
583 
584 	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
585 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
586 						  NVME_NIDT_UUID_LEN,
587 						  &ns->uuid, &off);
588 		if (status)
589 			goto out_put_ns;
590 	}
591 	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
592 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
593 						  NVME_NIDT_NGUID_LEN,
594 						  &ns->nguid, &off);
595 		if (status)
596 			goto out_put_ns;
597 	}
598 
599 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
600 			off) != NVME_IDENTIFY_DATA_SIZE - off)
601 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
602 out_put_ns:
603 	nvmet_put_namespace(ns);
604 out:
605 	nvmet_req_complete(req, status);
606 }
607 
608 static void nvmet_execute_identify(struct nvmet_req *req)
609 {
610 	if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
611 		return;
612 
613 	switch (req->cmd->identify.cns) {
614 	case NVME_ID_CNS_NS:
615 		return nvmet_execute_identify_ns(req);
616 	case NVME_ID_CNS_CTRL:
617 		return nvmet_execute_identify_ctrl(req);
618 	case NVME_ID_CNS_NS_ACTIVE_LIST:
619 		return nvmet_execute_identify_nslist(req);
620 	case NVME_ID_CNS_NS_DESC_LIST:
621 		return nvmet_execute_identify_desclist(req);
622 	}
623 
624 	pr_err("unhandled identify cns %d on qid %d\n",
625 	       req->cmd->identify.cns, req->sq->qid);
626 	req->error_loc = offsetof(struct nvme_identify, cns);
627 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
628 }
629 
630 /*
631  * A "minimum viable" abort implementation: the command is mandatory in the
632  * spec, but we are not required to do any useful work.  We couldn't really
633  * do a useful abort, so don't bother even with waiting for the command
634  * to be exectuted and return immediately telling the command to abort
635  * wasn't found.
636  */
637 static void nvmet_execute_abort(struct nvmet_req *req)
638 {
639 	if (!nvmet_check_data_len(req, 0))
640 		return;
641 	nvmet_set_result(req, 1);
642 	nvmet_req_complete(req, 0);
643 }
644 
645 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
646 {
647 	u16 status;
648 
649 	if (req->ns->file)
650 		status = nvmet_file_flush(req);
651 	else
652 		status = nvmet_bdev_flush(req);
653 
654 	if (status)
655 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
656 	return status;
657 }
658 
659 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
660 {
661 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
662 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
663 	u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
664 
665 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
666 	if (unlikely(!req->ns)) {
667 		req->error_loc = offsetof(struct nvme_common_command, nsid);
668 		return status;
669 	}
670 
671 	mutex_lock(&subsys->lock);
672 	switch (write_protect) {
673 	case NVME_NS_WRITE_PROTECT:
674 		req->ns->readonly = true;
675 		status = nvmet_write_protect_flush_sync(req);
676 		if (status)
677 			req->ns->readonly = false;
678 		break;
679 	case NVME_NS_NO_WRITE_PROTECT:
680 		req->ns->readonly = false;
681 		status = 0;
682 		break;
683 	default:
684 		break;
685 	}
686 
687 	if (!status)
688 		nvmet_ns_changed(subsys, req->ns->nsid);
689 	mutex_unlock(&subsys->lock);
690 	return status;
691 }
692 
693 u16 nvmet_set_feat_kato(struct nvmet_req *req)
694 {
695 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
696 
697 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
698 
699 	nvmet_set_result(req, req->sq->ctrl->kato);
700 
701 	return 0;
702 }
703 
704 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
705 {
706 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
707 
708 	if (val32 & ~mask) {
709 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
710 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
711 	}
712 
713 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
714 	nvmet_set_result(req, val32);
715 
716 	return 0;
717 }
718 
719 static void nvmet_execute_set_features(struct nvmet_req *req)
720 {
721 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
722 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
723 	u16 status = 0;
724 
725 	if (!nvmet_check_data_len(req, 0))
726 		return;
727 
728 	switch (cdw10 & 0xff) {
729 	case NVME_FEAT_NUM_QUEUES:
730 		nvmet_set_result(req,
731 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
732 		break;
733 	case NVME_FEAT_KATO:
734 		status = nvmet_set_feat_kato(req);
735 		break;
736 	case NVME_FEAT_ASYNC_EVENT:
737 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
738 		break;
739 	case NVME_FEAT_HOST_ID:
740 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
741 		break;
742 	case NVME_FEAT_WRITE_PROTECT:
743 		status = nvmet_set_feat_write_protect(req);
744 		break;
745 	default:
746 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
747 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
748 		break;
749 	}
750 
751 	nvmet_req_complete(req, status);
752 }
753 
754 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
755 {
756 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
757 	u32 result;
758 
759 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
760 	if (!req->ns)  {
761 		req->error_loc = offsetof(struct nvme_common_command, nsid);
762 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
763 	}
764 	mutex_lock(&subsys->lock);
765 	if (req->ns->readonly == true)
766 		result = NVME_NS_WRITE_PROTECT;
767 	else
768 		result = NVME_NS_NO_WRITE_PROTECT;
769 	nvmet_set_result(req, result);
770 	mutex_unlock(&subsys->lock);
771 
772 	return 0;
773 }
774 
775 void nvmet_get_feat_kato(struct nvmet_req *req)
776 {
777 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
778 }
779 
780 void nvmet_get_feat_async_event(struct nvmet_req *req)
781 {
782 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
783 }
784 
785 static void nvmet_execute_get_features(struct nvmet_req *req)
786 {
787 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
788 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
789 	u16 status = 0;
790 
791 	if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
792 		return;
793 
794 	switch (cdw10 & 0xff) {
795 	/*
796 	 * These features are mandatory in the spec, but we don't
797 	 * have a useful way to implement them.  We'll eventually
798 	 * need to come up with some fake values for these.
799 	 */
800 #if 0
801 	case NVME_FEAT_ARBITRATION:
802 		break;
803 	case NVME_FEAT_POWER_MGMT:
804 		break;
805 	case NVME_FEAT_TEMP_THRESH:
806 		break;
807 	case NVME_FEAT_ERR_RECOVERY:
808 		break;
809 	case NVME_FEAT_IRQ_COALESCE:
810 		break;
811 	case NVME_FEAT_IRQ_CONFIG:
812 		break;
813 	case NVME_FEAT_WRITE_ATOMIC:
814 		break;
815 #endif
816 	case NVME_FEAT_ASYNC_EVENT:
817 		nvmet_get_feat_async_event(req);
818 		break;
819 	case NVME_FEAT_VOLATILE_WC:
820 		nvmet_set_result(req, 1);
821 		break;
822 	case NVME_FEAT_NUM_QUEUES:
823 		nvmet_set_result(req,
824 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
825 		break;
826 	case NVME_FEAT_KATO:
827 		nvmet_get_feat_kato(req);
828 		break;
829 	case NVME_FEAT_HOST_ID:
830 		/* need 128-bit host identifier flag */
831 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
832 			req->error_loc =
833 				offsetof(struct nvme_common_command, cdw11);
834 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
835 			break;
836 		}
837 
838 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
839 				sizeof(req->sq->ctrl->hostid));
840 		break;
841 	case NVME_FEAT_WRITE_PROTECT:
842 		status = nvmet_get_feat_write_protect(req);
843 		break;
844 	default:
845 		req->error_loc =
846 			offsetof(struct nvme_common_command, cdw10);
847 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
848 		break;
849 	}
850 
851 	nvmet_req_complete(req, status);
852 }
853 
854 void nvmet_execute_async_event(struct nvmet_req *req)
855 {
856 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
857 
858 	if (!nvmet_check_data_len(req, 0))
859 		return;
860 
861 	mutex_lock(&ctrl->lock);
862 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
863 		mutex_unlock(&ctrl->lock);
864 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
865 		return;
866 	}
867 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
868 	mutex_unlock(&ctrl->lock);
869 
870 	schedule_work(&ctrl->async_event_work);
871 }
872 
873 void nvmet_execute_keep_alive(struct nvmet_req *req)
874 {
875 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
876 
877 	if (!nvmet_check_data_len(req, 0))
878 		return;
879 
880 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
881 		ctrl->cntlid, ctrl->kato);
882 
883 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
884 	nvmet_req_complete(req, 0);
885 }
886 
887 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
888 {
889 	struct nvme_command *cmd = req->cmd;
890 	u16 ret;
891 
892 	if (nvme_is_fabrics(cmd))
893 		return nvmet_parse_fabrics_cmd(req);
894 	if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
895 		return nvmet_parse_discovery_cmd(req);
896 
897 	ret = nvmet_check_ctrl_status(req, cmd);
898 	if (unlikely(ret))
899 		return ret;
900 
901 	switch (cmd->common.opcode) {
902 	case nvme_admin_get_log_page:
903 		req->execute = nvmet_execute_get_log_page;
904 		return 0;
905 	case nvme_admin_identify:
906 		req->execute = nvmet_execute_identify;
907 		return 0;
908 	case nvme_admin_abort_cmd:
909 		req->execute = nvmet_execute_abort;
910 		return 0;
911 	case nvme_admin_set_features:
912 		req->execute = nvmet_execute_set_features;
913 		return 0;
914 	case nvme_admin_get_features:
915 		req->execute = nvmet_execute_get_features;
916 		return 0;
917 	case nvme_admin_async_event:
918 		req->execute = nvmet_execute_async_event;
919 		return 0;
920 	case nvme_admin_keep_alive:
921 		req->execute = nvmet_execute_keep_alive;
922 		return 0;
923 	}
924 
925 	pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
926 	       req->sq->qid);
927 	req->error_loc = offsetof(struct nvme_common_command, opcode);
928 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
929 }
930