xref: /openbmc/linux/drivers/nvme/target/admin-cmd.c (revision d2574c33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 
10 #include <generated/utsrelease.h>
11 #include <asm/unaligned.h>
12 #include "nvmet.h"
13 
14 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
15 {
16 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
17 
18 	len <<= 16;
19 	len += le16_to_cpu(cmd->get_log_page.numdl);
20 	/* NUMD is a 0's based value */
21 	len += 1;
22 	len *= sizeof(u32);
23 
24 	return len;
25 }
26 
27 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
28 {
29 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
30 }
31 
32 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
33 {
34 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
35 	u16 status = NVME_SC_SUCCESS;
36 	unsigned long flags;
37 	off_t offset = 0;
38 	u64 slot;
39 	u64 i;
40 
41 	spin_lock_irqsave(&ctrl->error_lock, flags);
42 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
43 
44 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
45 		status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
46 				sizeof(struct nvme_error_slot));
47 		if (status)
48 			break;
49 
50 		if (slot == 0)
51 			slot = NVMET_ERROR_LOG_SLOTS - 1;
52 		else
53 			slot--;
54 		offset += sizeof(struct nvme_error_slot);
55 	}
56 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
57 	nvmet_req_complete(req, status);
58 }
59 
60 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
61 		struct nvme_smart_log *slog)
62 {
63 	struct nvmet_ns *ns;
64 	u64 host_reads, host_writes, data_units_read, data_units_written;
65 
66 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
67 	if (!ns) {
68 		pr_err("Could not find namespace id : %d\n",
69 				le32_to_cpu(req->cmd->get_log_page.nsid));
70 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
71 		return NVME_SC_INVALID_NS;
72 	}
73 
74 	/* we don't have the right data for file backed ns */
75 	if (!ns->bdev)
76 		goto out;
77 
78 	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
79 	data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
80 	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
81 	data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
82 
83 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
84 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
85 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
86 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
87 out:
88 	nvmet_put_namespace(ns);
89 
90 	return NVME_SC_SUCCESS;
91 }
92 
93 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
94 		struct nvme_smart_log *slog)
95 {
96 	u64 host_reads = 0, host_writes = 0;
97 	u64 data_units_read = 0, data_units_written = 0;
98 	struct nvmet_ns *ns;
99 	struct nvmet_ctrl *ctrl;
100 
101 	ctrl = req->sq->ctrl;
102 
103 	rcu_read_lock();
104 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
105 		/* we don't have the right data for file backed ns */
106 		if (!ns->bdev)
107 			continue;
108 		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
109 		data_units_read +=
110 			part_stat_read(ns->bdev->bd_part, sectors[READ]);
111 		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
112 		data_units_written +=
113 			part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
114 
115 	}
116 	rcu_read_unlock();
117 
118 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
119 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
120 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
121 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
122 
123 	return NVME_SC_SUCCESS;
124 }
125 
126 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
127 {
128 	struct nvme_smart_log *log;
129 	u16 status = NVME_SC_INTERNAL;
130 	unsigned long flags;
131 
132 	if (req->data_len != sizeof(*log))
133 		goto out;
134 
135 	log = kzalloc(sizeof(*log), GFP_KERNEL);
136 	if (!log)
137 		goto out;
138 
139 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
140 		status = nvmet_get_smart_log_all(req, log);
141 	else
142 		status = nvmet_get_smart_log_nsid(req, log);
143 	if (status)
144 		goto out_free_log;
145 
146 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
147 	put_unaligned_le64(req->sq->ctrl->err_counter,
148 			&log->num_err_log_entries);
149 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
150 
151 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
152 out_free_log:
153 	kfree(log);
154 out:
155 	nvmet_req_complete(req, status);
156 }
157 
158 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
159 {
160 	u16 status = NVME_SC_INTERNAL;
161 	struct nvme_effects_log *log;
162 
163 	log = kzalloc(sizeof(*log), GFP_KERNEL);
164 	if (!log)
165 		goto out;
166 
167 	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
168 	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
169 	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
170 	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
171 	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
172 	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
173 	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
174 
175 	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
176 	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
177 	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
178 	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
179 	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
180 
181 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
182 
183 	kfree(log);
184 out:
185 	nvmet_req_complete(req, status);
186 }
187 
188 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
189 {
190 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
191 	u16 status = NVME_SC_INTERNAL;
192 	size_t len;
193 
194 	if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
195 		goto out;
196 
197 	mutex_lock(&ctrl->lock);
198 	if (ctrl->nr_changed_ns == U32_MAX)
199 		len = sizeof(__le32);
200 	else
201 		len = ctrl->nr_changed_ns * sizeof(__le32);
202 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
203 	if (!status)
204 		status = nvmet_zero_sgl(req, len, req->data_len - len);
205 	ctrl->nr_changed_ns = 0;
206 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
207 	mutex_unlock(&ctrl->lock);
208 out:
209 	nvmet_req_complete(req, status);
210 }
211 
212 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
213 		struct nvme_ana_group_desc *desc)
214 {
215 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
216 	struct nvmet_ns *ns;
217 	u32 count = 0;
218 
219 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
220 		rcu_read_lock();
221 		list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
222 			if (ns->anagrpid == grpid)
223 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
224 		rcu_read_unlock();
225 	}
226 
227 	desc->grpid = cpu_to_le32(grpid);
228 	desc->nnsids = cpu_to_le32(count);
229 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
230 	desc->state = req->port->ana_state[grpid];
231 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
232 	return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
233 }
234 
235 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
236 {
237 	struct nvme_ana_rsp_hdr hdr = { 0, };
238 	struct nvme_ana_group_desc *desc;
239 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
240 	size_t len;
241 	u32 grpid;
242 	u16 ngrps = 0;
243 	u16 status;
244 
245 	status = NVME_SC_INTERNAL;
246 	desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
247 			NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
248 	if (!desc)
249 		goto out;
250 
251 	down_read(&nvmet_ana_sem);
252 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
253 		if (!nvmet_ana_group_enabled[grpid])
254 			continue;
255 		len = nvmet_format_ana_group(req, grpid, desc);
256 		status = nvmet_copy_to_sgl(req, offset, desc, len);
257 		if (status)
258 			break;
259 		offset += len;
260 		ngrps++;
261 	}
262 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
263 		if (nvmet_ana_group_enabled[grpid])
264 			ngrps++;
265 	}
266 
267 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
268 	hdr.ngrps = cpu_to_le16(ngrps);
269 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
270 	up_read(&nvmet_ana_sem);
271 
272 	kfree(desc);
273 
274 	/* copy the header last once we know the number of groups */
275 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
276 out:
277 	nvmet_req_complete(req, status);
278 }
279 
280 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
281 {
282 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
283 	struct nvme_id_ctrl *id;
284 	u16 status = 0;
285 	const char model[] = "Linux";
286 
287 	id = kzalloc(sizeof(*id), GFP_KERNEL);
288 	if (!id) {
289 		status = NVME_SC_INTERNAL;
290 		goto out;
291 	}
292 
293 	/* XXX: figure out how to assign real vendors IDs. */
294 	id->vid = 0;
295 	id->ssvid = 0;
296 
297 	memset(id->sn, ' ', sizeof(id->sn));
298 	bin2hex(id->sn, &ctrl->subsys->serial,
299 		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
300 	memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
301 	memcpy_and_pad(id->fr, sizeof(id->fr),
302 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
303 
304 	id->rab = 6;
305 
306 	/*
307 	 * XXX: figure out how we can assign a IEEE OUI, but until then
308 	 * the safest is to leave it as zeroes.
309 	 */
310 
311 	/* we support multiple ports, multiples hosts and ANA: */
312 	id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
313 
314 	/* no limit on data transfer sizes for now */
315 	id->mdts = 0;
316 	id->cntlid = cpu_to_le16(ctrl->cntlid);
317 	id->ver = cpu_to_le32(ctrl->subsys->ver);
318 
319 	/* XXX: figure out what to do about RTD3R/RTD3 */
320 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
321 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
322 		NVME_CTRL_ATTR_TBKAS);
323 
324 	id->oacs = 0;
325 
326 	/*
327 	 * We don't really have a practical limit on the number of abort
328 	 * comands.  But we don't do anything useful for abort either, so
329 	 * no point in allowing more abort commands than the spec requires.
330 	 */
331 	id->acl = 3;
332 
333 	id->aerl = NVMET_ASYNC_EVENTS - 1;
334 
335 	/* first slot is read-only, only one slot supported */
336 	id->frmw = (1 << 0) | (1 << 1);
337 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
338 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
339 	id->npss = 0;
340 
341 	/* We support keep-alive timeout in granularity of seconds */
342 	id->kas = cpu_to_le16(NVMET_KAS);
343 
344 	id->sqes = (0x6 << 4) | 0x6;
345 	id->cqes = (0x4 << 4) | 0x4;
346 
347 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
348 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
349 
350 	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
351 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
352 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
353 			NVME_CTRL_ONCS_WRITE_ZEROES);
354 
355 	/* XXX: don't report vwc if the underlying device is write through */
356 	id->vwc = NVME_CTRL_VWC_PRESENT;
357 
358 	/*
359 	 * We can't support atomic writes bigger than a LBA without support
360 	 * from the backend device.
361 	 */
362 	id->awun = 0;
363 	id->awupf = 0;
364 
365 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
366 	if (ctrl->ops->has_keyed_sgls)
367 		id->sgls |= cpu_to_le32(1 << 2);
368 	if (req->port->inline_data_size)
369 		id->sgls |= cpu_to_le32(1 << 20);
370 
371 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
372 
373 	/* Max command capsule size is sqe + single page of in-capsule data */
374 	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
375 				  req->port->inline_data_size) / 16);
376 	/* Max response capsule size is cqe */
377 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
378 
379 	id->msdbd = ctrl->ops->msdbd;
380 
381 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
382 	id->anatt = 10; /* random value */
383 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
384 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
385 
386 	/*
387 	 * Meh, we don't really support any power state.  Fake up the same
388 	 * values that qemu does.
389 	 */
390 	id->psd[0].max_power = cpu_to_le16(0x9c4);
391 	id->psd[0].entry_lat = cpu_to_le32(0x10);
392 	id->psd[0].exit_lat = cpu_to_le32(0x4);
393 
394 	id->nwpc = 1 << 0; /* write protect and no write protect */
395 
396 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
397 
398 	kfree(id);
399 out:
400 	nvmet_req_complete(req, status);
401 }
402 
403 static void nvmet_execute_identify_ns(struct nvmet_req *req)
404 {
405 	struct nvmet_ns *ns;
406 	struct nvme_id_ns *id;
407 	u16 status = 0;
408 
409 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
410 		req->error_loc = offsetof(struct nvme_identify, nsid);
411 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
412 		goto out;
413 	}
414 
415 	id = kzalloc(sizeof(*id), GFP_KERNEL);
416 	if (!id) {
417 		status = NVME_SC_INTERNAL;
418 		goto out;
419 	}
420 
421 	/* return an all zeroed buffer if we can't find an active namespace */
422 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
423 	if (!ns)
424 		goto done;
425 
426 	/*
427 	 * nuse = ncap = nsze isn't always true, but we have no way to find
428 	 * that out from the underlying device.
429 	 */
430 	id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
431 	switch (req->port->ana_state[ns->anagrpid]) {
432 	case NVME_ANA_INACCESSIBLE:
433 	case NVME_ANA_PERSISTENT_LOSS:
434 		break;
435 	default:
436 		id->nuse = id->nsze;
437 		break;
438         }
439 
440 	/*
441 	 * We just provide a single LBA format that matches what the
442 	 * underlying device reports.
443 	 */
444 	id->nlbaf = 0;
445 	id->flbas = 0;
446 
447 	/*
448 	 * Our namespace might always be shared.  Not just with other
449 	 * controllers, but also with any other user of the block device.
450 	 */
451 	id->nmic = (1 << 0);
452 	id->anagrpid = cpu_to_le32(ns->anagrpid);
453 
454 	memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
455 
456 	id->lbaf[0].ds = ns->blksize_shift;
457 
458 	if (ns->readonly)
459 		id->nsattr |= (1 << 0);
460 	nvmet_put_namespace(ns);
461 done:
462 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
463 	kfree(id);
464 out:
465 	nvmet_req_complete(req, status);
466 }
467 
468 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
469 {
470 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
471 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
472 	struct nvmet_ns *ns;
473 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
474 	__le32 *list;
475 	u16 status = 0;
476 	int i = 0;
477 
478 	list = kzalloc(buf_size, GFP_KERNEL);
479 	if (!list) {
480 		status = NVME_SC_INTERNAL;
481 		goto out;
482 	}
483 
484 	rcu_read_lock();
485 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
486 		if (ns->nsid <= min_nsid)
487 			continue;
488 		list[i++] = cpu_to_le32(ns->nsid);
489 		if (i == buf_size / sizeof(__le32))
490 			break;
491 	}
492 	rcu_read_unlock();
493 
494 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
495 
496 	kfree(list);
497 out:
498 	nvmet_req_complete(req, status);
499 }
500 
501 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
502 				    void *id, off_t *off)
503 {
504 	struct nvme_ns_id_desc desc = {
505 		.nidt = type,
506 		.nidl = len,
507 	};
508 	u16 status;
509 
510 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
511 	if (status)
512 		return status;
513 	*off += sizeof(desc);
514 
515 	status = nvmet_copy_to_sgl(req, *off, id, len);
516 	if (status)
517 		return status;
518 	*off += len;
519 
520 	return 0;
521 }
522 
523 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
524 {
525 	struct nvmet_ns *ns;
526 	u16 status = 0;
527 	off_t off = 0;
528 
529 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
530 	if (!ns) {
531 		req->error_loc = offsetof(struct nvme_identify, nsid);
532 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
533 		goto out;
534 	}
535 
536 	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
537 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
538 						  NVME_NIDT_UUID_LEN,
539 						  &ns->uuid, &off);
540 		if (status)
541 			goto out_put_ns;
542 	}
543 	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
544 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
545 						  NVME_NIDT_NGUID_LEN,
546 						  &ns->nguid, &off);
547 		if (status)
548 			goto out_put_ns;
549 	}
550 
551 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
552 			off) != NVME_IDENTIFY_DATA_SIZE - off)
553 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
554 out_put_ns:
555 	nvmet_put_namespace(ns);
556 out:
557 	nvmet_req_complete(req, status);
558 }
559 
560 /*
561  * A "minimum viable" abort implementation: the command is mandatory in the
562  * spec, but we are not required to do any useful work.  We couldn't really
563  * do a useful abort, so don't bother even with waiting for the command
564  * to be exectuted and return immediately telling the command to abort
565  * wasn't found.
566  */
567 static void nvmet_execute_abort(struct nvmet_req *req)
568 {
569 	nvmet_set_result(req, 1);
570 	nvmet_req_complete(req, 0);
571 }
572 
573 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
574 {
575 	u16 status;
576 
577 	if (req->ns->file)
578 		status = nvmet_file_flush(req);
579 	else
580 		status = nvmet_bdev_flush(req);
581 
582 	if (status)
583 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
584 	return status;
585 }
586 
587 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
588 {
589 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
590 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
591 	u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
592 
593 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
594 	if (unlikely(!req->ns)) {
595 		req->error_loc = offsetof(struct nvme_common_command, nsid);
596 		return status;
597 	}
598 
599 	mutex_lock(&subsys->lock);
600 	switch (write_protect) {
601 	case NVME_NS_WRITE_PROTECT:
602 		req->ns->readonly = true;
603 		status = nvmet_write_protect_flush_sync(req);
604 		if (status)
605 			req->ns->readonly = false;
606 		break;
607 	case NVME_NS_NO_WRITE_PROTECT:
608 		req->ns->readonly = false;
609 		status = 0;
610 		break;
611 	default:
612 		break;
613 	}
614 
615 	if (!status)
616 		nvmet_ns_changed(subsys, req->ns->nsid);
617 	mutex_unlock(&subsys->lock);
618 	return status;
619 }
620 
621 u16 nvmet_set_feat_kato(struct nvmet_req *req)
622 {
623 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
624 
625 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
626 
627 	nvmet_set_result(req, req->sq->ctrl->kato);
628 
629 	return 0;
630 }
631 
632 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
633 {
634 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
635 
636 	if (val32 & ~mask) {
637 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
638 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
639 	}
640 
641 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
642 	nvmet_set_result(req, val32);
643 
644 	return 0;
645 }
646 
647 static void nvmet_execute_set_features(struct nvmet_req *req)
648 {
649 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
650 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
651 	u16 status = 0;
652 
653 	switch (cdw10 & 0xff) {
654 	case NVME_FEAT_NUM_QUEUES:
655 		nvmet_set_result(req,
656 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
657 		break;
658 	case NVME_FEAT_KATO:
659 		status = nvmet_set_feat_kato(req);
660 		break;
661 	case NVME_FEAT_ASYNC_EVENT:
662 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
663 		break;
664 	case NVME_FEAT_HOST_ID:
665 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
666 		break;
667 	case NVME_FEAT_WRITE_PROTECT:
668 		status = nvmet_set_feat_write_protect(req);
669 		break;
670 	default:
671 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
672 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
673 		break;
674 	}
675 
676 	nvmet_req_complete(req, status);
677 }
678 
679 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
680 {
681 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
682 	u32 result;
683 
684 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
685 	if (!req->ns)  {
686 		req->error_loc = offsetof(struct nvme_common_command, nsid);
687 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
688 	}
689 	mutex_lock(&subsys->lock);
690 	if (req->ns->readonly == true)
691 		result = NVME_NS_WRITE_PROTECT;
692 	else
693 		result = NVME_NS_NO_WRITE_PROTECT;
694 	nvmet_set_result(req, result);
695 	mutex_unlock(&subsys->lock);
696 
697 	return 0;
698 }
699 
700 void nvmet_get_feat_kato(struct nvmet_req *req)
701 {
702 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
703 }
704 
705 void nvmet_get_feat_async_event(struct nvmet_req *req)
706 {
707 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
708 }
709 
710 static void nvmet_execute_get_features(struct nvmet_req *req)
711 {
712 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
713 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
714 	u16 status = 0;
715 
716 	switch (cdw10 & 0xff) {
717 	/*
718 	 * These features are mandatory in the spec, but we don't
719 	 * have a useful way to implement them.  We'll eventually
720 	 * need to come up with some fake values for these.
721 	 */
722 #if 0
723 	case NVME_FEAT_ARBITRATION:
724 		break;
725 	case NVME_FEAT_POWER_MGMT:
726 		break;
727 	case NVME_FEAT_TEMP_THRESH:
728 		break;
729 	case NVME_FEAT_ERR_RECOVERY:
730 		break;
731 	case NVME_FEAT_IRQ_COALESCE:
732 		break;
733 	case NVME_FEAT_IRQ_CONFIG:
734 		break;
735 	case NVME_FEAT_WRITE_ATOMIC:
736 		break;
737 #endif
738 	case NVME_FEAT_ASYNC_EVENT:
739 		nvmet_get_feat_async_event(req);
740 		break;
741 	case NVME_FEAT_VOLATILE_WC:
742 		nvmet_set_result(req, 1);
743 		break;
744 	case NVME_FEAT_NUM_QUEUES:
745 		nvmet_set_result(req,
746 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
747 		break;
748 	case NVME_FEAT_KATO:
749 		nvmet_get_feat_kato(req);
750 		break;
751 	case NVME_FEAT_HOST_ID:
752 		/* need 128-bit host identifier flag */
753 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
754 			req->error_loc =
755 				offsetof(struct nvme_common_command, cdw11);
756 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
757 			break;
758 		}
759 
760 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
761 				sizeof(req->sq->ctrl->hostid));
762 		break;
763 	case NVME_FEAT_WRITE_PROTECT:
764 		status = nvmet_get_feat_write_protect(req);
765 		break;
766 	default:
767 		req->error_loc =
768 			offsetof(struct nvme_common_command, cdw10);
769 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
770 		break;
771 	}
772 
773 	nvmet_req_complete(req, status);
774 }
775 
776 void nvmet_execute_async_event(struct nvmet_req *req)
777 {
778 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
779 
780 	mutex_lock(&ctrl->lock);
781 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
782 		mutex_unlock(&ctrl->lock);
783 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
784 		return;
785 	}
786 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
787 	mutex_unlock(&ctrl->lock);
788 
789 	schedule_work(&ctrl->async_event_work);
790 }
791 
792 void nvmet_execute_keep_alive(struct nvmet_req *req)
793 {
794 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
795 
796 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
797 		ctrl->cntlid, ctrl->kato);
798 
799 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
800 	nvmet_req_complete(req, 0);
801 }
802 
803 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
804 {
805 	struct nvme_command *cmd = req->cmd;
806 	u16 ret;
807 
808 	ret = nvmet_check_ctrl_status(req, cmd);
809 	if (unlikely(ret))
810 		return ret;
811 
812 	switch (cmd->common.opcode) {
813 	case nvme_admin_get_log_page:
814 		req->data_len = nvmet_get_log_page_len(cmd);
815 
816 		switch (cmd->get_log_page.lid) {
817 		case NVME_LOG_ERROR:
818 			req->execute = nvmet_execute_get_log_page_error;
819 			return 0;
820 		case NVME_LOG_SMART:
821 			req->execute = nvmet_execute_get_log_page_smart;
822 			return 0;
823 		case NVME_LOG_FW_SLOT:
824 			/*
825 			 * We only support a single firmware slot which always
826 			 * is active, so we can zero out the whole firmware slot
827 			 * log and still claim to fully implement this mandatory
828 			 * log page.
829 			 */
830 			req->execute = nvmet_execute_get_log_page_noop;
831 			return 0;
832 		case NVME_LOG_CHANGED_NS:
833 			req->execute = nvmet_execute_get_log_changed_ns;
834 			return 0;
835 		case NVME_LOG_CMD_EFFECTS:
836 			req->execute = nvmet_execute_get_log_cmd_effects_ns;
837 			return 0;
838 		case NVME_LOG_ANA:
839 			req->execute = nvmet_execute_get_log_page_ana;
840 			return 0;
841 		}
842 		break;
843 	case nvme_admin_identify:
844 		req->data_len = NVME_IDENTIFY_DATA_SIZE;
845 		switch (cmd->identify.cns) {
846 		case NVME_ID_CNS_NS:
847 			req->execute = nvmet_execute_identify_ns;
848 			return 0;
849 		case NVME_ID_CNS_CTRL:
850 			req->execute = nvmet_execute_identify_ctrl;
851 			return 0;
852 		case NVME_ID_CNS_NS_ACTIVE_LIST:
853 			req->execute = nvmet_execute_identify_nslist;
854 			return 0;
855 		case NVME_ID_CNS_NS_DESC_LIST:
856 			req->execute = nvmet_execute_identify_desclist;
857 			return 0;
858 		}
859 		break;
860 	case nvme_admin_abort_cmd:
861 		req->execute = nvmet_execute_abort;
862 		req->data_len = 0;
863 		return 0;
864 	case nvme_admin_set_features:
865 		req->execute = nvmet_execute_set_features;
866 		req->data_len = 0;
867 		return 0;
868 	case nvme_admin_get_features:
869 		req->execute = nvmet_execute_get_features;
870 		req->data_len = 0;
871 		return 0;
872 	case nvme_admin_async_event:
873 		req->execute = nvmet_execute_async_event;
874 		req->data_len = 0;
875 		return 0;
876 	case nvme_admin_keep_alive:
877 		req->execute = nvmet_execute_keep_alive;
878 		req->data_len = 0;
879 		return 0;
880 	}
881 
882 	pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
883 	       req->sq->qid);
884 	req->error_loc = offsetof(struct nvme_common_command, opcode);
885 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
886 }
887