xref: /openbmc/linux/drivers/nvme/target/admin-cmd.c (revision a977d045)
1 /*
2  * NVMe admin command implementation.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
17 
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
20 #include "nvmet.h"
21 
22 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
23 {
24 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
25 
26 	len <<= 16;
27 	len += le16_to_cpu(cmd->get_log_page.numdl);
28 	/* NUMD is a 0's based value */
29 	len += 1;
30 	len *= sizeof(u32);
31 
32 	return len;
33 }
34 
35 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
36 		struct nvme_smart_log *slog)
37 {
38 	u16 status;
39 	struct nvmet_ns *ns;
40 	u64 host_reads, host_writes, data_units_read, data_units_written;
41 
42 	status = NVME_SC_SUCCESS;
43 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
44 	if (!ns) {
45 		status = NVME_SC_INVALID_NS;
46 		pr_err("nvmet : Could not find namespace id : %d\n",
47 				le32_to_cpu(req->cmd->get_log_page.nsid));
48 		goto out;
49 	}
50 
51 	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
52 	data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
53 	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
54 	data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
55 
56 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
57 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
58 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
59 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
60 	nvmet_put_namespace(ns);
61 out:
62 	return status;
63 }
64 
65 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
66 		struct nvme_smart_log *slog)
67 {
68 	u16 status;
69 	u64 host_reads = 0, host_writes = 0;
70 	u64 data_units_read = 0, data_units_written = 0;
71 	struct nvmet_ns *ns;
72 	struct nvmet_ctrl *ctrl;
73 
74 	status = NVME_SC_SUCCESS;
75 	ctrl = req->sq->ctrl;
76 
77 	rcu_read_lock();
78 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
79 		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
80 		data_units_read +=
81 			part_stat_read(ns->bdev->bd_part, sectors[READ]);
82 		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
83 		data_units_written +=
84 			part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
85 
86 	}
87 	rcu_read_unlock();
88 
89 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
90 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
91 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
92 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
93 
94 	return status;
95 }
96 
97 static u16 nvmet_get_smart_log(struct nvmet_req *req,
98 		struct nvme_smart_log *slog)
99 {
100 	u16 status;
101 
102 	WARN_ON(req == NULL || slog == NULL);
103 	if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
104 		status = nvmet_get_smart_log_all(req, slog);
105 	else
106 		status = nvmet_get_smart_log_nsid(req, slog);
107 	return status;
108 }
109 
110 static void nvmet_execute_get_log_page(struct nvmet_req *req)
111 {
112 	struct nvme_smart_log *smart_log;
113 	size_t data_len = nvmet_get_log_page_len(req->cmd);
114 	void *buf;
115 	u16 status = 0;
116 
117 	buf = kzalloc(data_len, GFP_KERNEL);
118 	if (!buf) {
119 		status = NVME_SC_INTERNAL;
120 		goto out;
121 	}
122 
123 	switch (req->cmd->get_log_page.lid) {
124 	case NVME_LOG_ERROR:
125 		/*
126 		 * We currently never set the More bit in the status field,
127 		 * so all error log entries are invalid and can be zeroed out.
128 		 * This is called a minum viable implementation (TM) of this
129 		 * mandatory log page.
130 		 */
131 		break;
132 	case NVME_LOG_SMART:
133 		/*
134 		 * XXX: fill out actual smart log
135 		 *
136 		 * We might have a hard time coming up with useful values for
137 		 * many of the fields, and even when we have useful data
138 		 * available (e.g. units or commands read/written) those aren't
139 		 * persistent over power loss.
140 		 */
141 		if (data_len != sizeof(*smart_log)) {
142 			status = NVME_SC_INTERNAL;
143 			goto err;
144 		}
145 		smart_log = buf;
146 		status = nvmet_get_smart_log(req, smart_log);
147 		if (status) {
148 			memset(buf, '\0', data_len);
149 			goto err;
150 		}
151 		break;
152 	case NVME_LOG_FW_SLOT:
153 		/*
154 		 * We only support a single firmware slot which always is
155 		 * active, so we can zero out the whole firmware slot log and
156 		 * still claim to fully implement this mandatory log page.
157 		 */
158 		break;
159 	default:
160 		BUG();
161 	}
162 
163 	status = nvmet_copy_to_sgl(req, 0, buf, data_len);
164 
165 err:
166 	kfree(buf);
167 out:
168 	nvmet_req_complete(req, status);
169 }
170 
171 static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
172 {
173 	int len = min(src_len, dst_len);
174 
175 	memcpy(dst, src, len);
176 	if (dst_len > len)
177 		memset(dst + len, ' ', dst_len - len);
178 }
179 
180 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
181 {
182 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
183 	struct nvme_id_ctrl *id;
184 	u16 status = 0;
185 	const char model[] = "Linux";
186 
187 	id = kzalloc(sizeof(*id), GFP_KERNEL);
188 	if (!id) {
189 		status = NVME_SC_INTERNAL;
190 		goto out;
191 	}
192 
193 	/* XXX: figure out how to assign real vendors IDs. */
194 	id->vid = 0;
195 	id->ssvid = 0;
196 
197 	bin2hex(id->sn, &ctrl->subsys->serial,
198 		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
199 	copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
200 	copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
201 
202 	memset(id->mn, ' ', sizeof(id->mn));
203 	strncpy((char *)id->mn, "Linux", sizeof(id->mn));
204 
205 	memset(id->fr, ' ', sizeof(id->fr));
206 	strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
207 
208 	id->rab = 6;
209 
210 	/*
211 	 * XXX: figure out how we can assign a IEEE OUI, but until then
212 	 * the safest is to leave it as zeroes.
213 	 */
214 
215 	/* we support multiple ports and multiples hosts: */
216 	id->cmic = (1 << 0) | (1 << 1);
217 
218 	/* no limit on data transfer sizes for now */
219 	id->mdts = 0;
220 	id->cntlid = cpu_to_le16(ctrl->cntlid);
221 	id->ver = cpu_to_le32(ctrl->subsys->ver);
222 
223 	/* XXX: figure out what to do about RTD3R/RTD3 */
224 	id->oaes = cpu_to_le32(1 << 8);
225 	id->ctratt = cpu_to_le32(1 << 0);
226 
227 	id->oacs = 0;
228 
229 	/*
230 	 * We don't really have a practical limit on the number of abort
231 	 * comands.  But we don't do anything useful for abort either, so
232 	 * no point in allowing more abort commands than the spec requires.
233 	 */
234 	id->acl = 3;
235 
236 	id->aerl = NVMET_ASYNC_EVENTS - 1;
237 
238 	/* first slot is read-only, only one slot supported */
239 	id->frmw = (1 << 0) | (1 << 1);
240 	id->lpa = (1 << 0) | (1 << 2);
241 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
242 	id->npss = 0;
243 
244 	/* We support keep-alive timeout in granularity of seconds */
245 	id->kas = cpu_to_le16(NVMET_KAS);
246 
247 	id->sqes = (0x6 << 4) | 0x6;
248 	id->cqes = (0x4 << 4) | 0x4;
249 
250 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
251 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
252 
253 	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
254 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
255 			NVME_CTRL_ONCS_WRITE_ZEROES);
256 
257 	/* XXX: don't report vwc if the underlying device is write through */
258 	id->vwc = NVME_CTRL_VWC_PRESENT;
259 
260 	/*
261 	 * We can't support atomic writes bigger than a LBA without support
262 	 * from the backend device.
263 	 */
264 	id->awun = 0;
265 	id->awupf = 0;
266 
267 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
268 	if (ctrl->ops->has_keyed_sgls)
269 		id->sgls |= cpu_to_le32(1 << 2);
270 	if (ctrl->ops->sqe_inline_size)
271 		id->sgls |= cpu_to_le32(1 << 20);
272 
273 	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
274 
275 	/* Max command capsule size is sqe + single page of in-capsule data */
276 	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
277 				  ctrl->ops->sqe_inline_size) / 16);
278 	/* Max response capsule size is cqe */
279 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
280 
281 	id->msdbd = ctrl->ops->msdbd;
282 
283 	/*
284 	 * Meh, we don't really support any power state.  Fake up the same
285 	 * values that qemu does.
286 	 */
287 	id->psd[0].max_power = cpu_to_le16(0x9c4);
288 	id->psd[0].entry_lat = cpu_to_le32(0x10);
289 	id->psd[0].exit_lat = cpu_to_le32(0x4);
290 
291 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
292 
293 	kfree(id);
294 out:
295 	nvmet_req_complete(req, status);
296 }
297 
298 static void nvmet_execute_identify_ns(struct nvmet_req *req)
299 {
300 	struct nvmet_ns *ns;
301 	struct nvme_id_ns *id;
302 	u16 status = 0;
303 
304 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
305 	if (!ns) {
306 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
307 		goto out;
308 	}
309 
310 	id = kzalloc(sizeof(*id), GFP_KERNEL);
311 	if (!id) {
312 		status = NVME_SC_INTERNAL;
313 		goto out_put_ns;
314 	}
315 
316 	/*
317 	 * nuse = ncap = nsze isn't aways true, but we have no way to find
318 	 * that out from the underlying device.
319 	 */
320 	id->ncap = id->nuse = id->nsze =
321 		cpu_to_le64(ns->size >> ns->blksize_shift);
322 
323 	/*
324 	 * We just provide a single LBA format that matches what the
325 	 * underlying device reports.
326 	 */
327 	id->nlbaf = 0;
328 	id->flbas = 0;
329 
330 	/*
331 	 * Our namespace might always be shared.  Not just with other
332 	 * controllers, but also with any other user of the block device.
333 	 */
334 	id->nmic = (1 << 0);
335 
336 	memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
337 
338 	id->lbaf[0].ds = ns->blksize_shift;
339 
340 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
341 
342 	kfree(id);
343 out_put_ns:
344 	nvmet_put_namespace(ns);
345 out:
346 	nvmet_req_complete(req, status);
347 }
348 
349 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
350 {
351 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
352 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
353 	struct nvmet_ns *ns;
354 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
355 	__le32 *list;
356 	u16 status = 0;
357 	int i = 0;
358 
359 	list = kzalloc(buf_size, GFP_KERNEL);
360 	if (!list) {
361 		status = NVME_SC_INTERNAL;
362 		goto out;
363 	}
364 
365 	rcu_read_lock();
366 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
367 		if (ns->nsid <= min_nsid)
368 			continue;
369 		list[i++] = cpu_to_le32(ns->nsid);
370 		if (i == buf_size / sizeof(__le32))
371 			break;
372 	}
373 	rcu_read_unlock();
374 
375 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
376 
377 	kfree(list);
378 out:
379 	nvmet_req_complete(req, status);
380 }
381 
382 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
383 				    void *id, off_t *off)
384 {
385 	struct nvme_ns_id_desc desc = {
386 		.nidt = type,
387 		.nidl = len,
388 	};
389 	u16 status;
390 
391 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
392 	if (status)
393 		return status;
394 	*off += sizeof(desc);
395 
396 	status = nvmet_copy_to_sgl(req, *off, id, len);
397 	if (status)
398 		return status;
399 	*off += len;
400 
401 	return 0;
402 }
403 
404 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
405 {
406 	struct nvmet_ns *ns;
407 	u16 status = 0;
408 	off_t off = 0;
409 
410 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
411 	if (!ns) {
412 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
413 		goto out;
414 	}
415 
416 	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
417 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
418 						  NVME_NIDT_UUID_LEN,
419 						  &ns->uuid, &off);
420 		if (status)
421 			goto out_put_ns;
422 	}
423 	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
424 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
425 						  NVME_NIDT_NGUID_LEN,
426 						  &ns->nguid, &off);
427 		if (status)
428 			goto out_put_ns;
429 	}
430 
431 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
432 			off) != NVME_IDENTIFY_DATA_SIZE - off)
433 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
434 out_put_ns:
435 	nvmet_put_namespace(ns);
436 out:
437 	nvmet_req_complete(req, status);
438 }
439 
440 /*
441  * A "mimimum viable" abort implementation: the command is mandatory in the
442  * spec, but we are not required to do any useful work.  We couldn't really
443  * do a useful abort, so don't bother even with waiting for the command
444  * to be exectuted and return immediately telling the command to abort
445  * wasn't found.
446  */
447 static void nvmet_execute_abort(struct nvmet_req *req)
448 {
449 	nvmet_set_result(req, 1);
450 	nvmet_req_complete(req, 0);
451 }
452 
453 static void nvmet_execute_set_features(struct nvmet_req *req)
454 {
455 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
456 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
457 	u32 val32;
458 	u16 status = 0;
459 
460 	switch (cdw10 & 0xf) {
461 	case NVME_FEAT_NUM_QUEUES:
462 		nvmet_set_result(req,
463 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
464 		break;
465 	case NVME_FEAT_KATO:
466 		val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
467 		req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
468 		nvmet_set_result(req, req->sq->ctrl->kato);
469 		break;
470 	default:
471 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
472 		break;
473 	}
474 
475 	nvmet_req_complete(req, status);
476 }
477 
478 static void nvmet_execute_get_features(struct nvmet_req *req)
479 {
480 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
481 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
482 	u16 status = 0;
483 
484 	switch (cdw10 & 0xf) {
485 	/*
486 	 * These features are mandatory in the spec, but we don't
487 	 * have a useful way to implement them.  We'll eventually
488 	 * need to come up with some fake values for these.
489 	 */
490 #if 0
491 	case NVME_FEAT_ARBITRATION:
492 		break;
493 	case NVME_FEAT_POWER_MGMT:
494 		break;
495 	case NVME_FEAT_TEMP_THRESH:
496 		break;
497 	case NVME_FEAT_ERR_RECOVERY:
498 		break;
499 	case NVME_FEAT_IRQ_COALESCE:
500 		break;
501 	case NVME_FEAT_IRQ_CONFIG:
502 		break;
503 	case NVME_FEAT_WRITE_ATOMIC:
504 		break;
505 	case NVME_FEAT_ASYNC_EVENT:
506 		break;
507 #endif
508 	case NVME_FEAT_VOLATILE_WC:
509 		nvmet_set_result(req, 1);
510 		break;
511 	case NVME_FEAT_NUM_QUEUES:
512 		nvmet_set_result(req,
513 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
514 		break;
515 	case NVME_FEAT_KATO:
516 		nvmet_set_result(req, req->sq->ctrl->kato * 1000);
517 		break;
518 	default:
519 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
520 		break;
521 	}
522 
523 	nvmet_req_complete(req, status);
524 }
525 
526 static void nvmet_execute_async_event(struct nvmet_req *req)
527 {
528 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
529 
530 	mutex_lock(&ctrl->lock);
531 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
532 		mutex_unlock(&ctrl->lock);
533 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
534 		return;
535 	}
536 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
537 	mutex_unlock(&ctrl->lock);
538 
539 	schedule_work(&ctrl->async_event_work);
540 }
541 
542 static void nvmet_execute_keep_alive(struct nvmet_req *req)
543 {
544 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
545 
546 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
547 		ctrl->cntlid, ctrl->kato);
548 
549 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
550 	nvmet_req_complete(req, 0);
551 }
552 
553 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
554 {
555 	struct nvme_command *cmd = req->cmd;
556 	u16 ret;
557 
558 	req->ns = NULL;
559 
560 	ret = nvmet_check_ctrl_status(req, cmd);
561 	if (unlikely(ret))
562 		return ret;
563 
564 	switch (cmd->common.opcode) {
565 	case nvme_admin_get_log_page:
566 		req->data_len = nvmet_get_log_page_len(cmd);
567 
568 		switch (cmd->get_log_page.lid) {
569 		case NVME_LOG_ERROR:
570 		case NVME_LOG_SMART:
571 		case NVME_LOG_FW_SLOT:
572 			req->execute = nvmet_execute_get_log_page;
573 			return 0;
574 		}
575 		break;
576 	case nvme_admin_identify:
577 		req->data_len = NVME_IDENTIFY_DATA_SIZE;
578 		switch (cmd->identify.cns) {
579 		case NVME_ID_CNS_NS:
580 			req->execute = nvmet_execute_identify_ns;
581 			return 0;
582 		case NVME_ID_CNS_CTRL:
583 			req->execute = nvmet_execute_identify_ctrl;
584 			return 0;
585 		case NVME_ID_CNS_NS_ACTIVE_LIST:
586 			req->execute = nvmet_execute_identify_nslist;
587 			return 0;
588 		case NVME_ID_CNS_NS_DESC_LIST:
589 			req->execute = nvmet_execute_identify_desclist;
590 			return 0;
591 		}
592 		break;
593 	case nvme_admin_abort_cmd:
594 		req->execute = nvmet_execute_abort;
595 		req->data_len = 0;
596 		return 0;
597 	case nvme_admin_set_features:
598 		req->execute = nvmet_execute_set_features;
599 		req->data_len = 0;
600 		return 0;
601 	case nvme_admin_get_features:
602 		req->execute = nvmet_execute_get_features;
603 		req->data_len = 0;
604 		return 0;
605 	case nvme_admin_async_event:
606 		req->execute = nvmet_execute_async_event;
607 		req->data_len = 0;
608 		return 0;
609 	case nvme_admin_keep_alive:
610 		req->execute = nvmet_execute_keep_alive;
611 		req->data_len = 0;
612 		return 0;
613 	}
614 
615 	pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
616 	       req->sq->qid);
617 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
618 }
619