admin-cmd.c (8be98d2f2a0a262f8bf8a0bc1fdf522b3c7aab17) admin-cmd.c (ab7a2737ac5acd7d485ca45d8772497717fbc781)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/rculist.h>

--- 148 unchanged lines hidden (view full) ---

157
158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159out_free_log:
160 kfree(log);
161out:
162 nvmet_req_complete(req, status);
163}
164
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/rculist.h>

--- 148 unchanged lines hidden (view full) ---

157
158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159out_free_log:
160 kfree(log);
161out:
162 nvmet_req_complete(req, status);
163}
164
165static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
165static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166{
166{
167 u16 status = NVME_SC_INTERNAL;
168 struct nvme_effects_log *log;
169
170 log = kzalloc(sizeof(*log), GFP_KERNEL);
171 if (!log)
172 goto out;
173
174 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
175 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
176 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
177 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
178 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
179 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
180 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
181
182 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
183 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
184 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
185 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
186 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
167 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
168 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
169 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
170 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
171 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
172 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
173 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
174
175 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
176 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
177 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
178 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
179 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
180}
187
181
188 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
182static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
183{
184 log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
185 log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
186 log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
187}
189
188
189static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
190{
191 struct nvme_effects_log *log;
192 u16 status = NVME_SC_SUCCESS;
193
194 log = kzalloc(sizeof(*log), GFP_KERNEL);
195 if (!log) {
196 status = NVME_SC_INTERNAL;
197 goto out;
198 }
199
200 switch (req->cmd->get_log_page.csi) {
201 case NVME_CSI_NVM:
202 nvmet_get_cmd_effects_nvm(log);
203 break;
204 case NVME_CSI_ZNS:
205 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
206 status = NVME_SC_INVALID_IO_CMD_SET;
207 goto free;
208 }
209 nvmet_get_cmd_effects_nvm(log);
210 nvmet_get_cmd_effects_zns(log);
211 break;
212 default:
213 status = NVME_SC_INVALID_LOG_PAGE;
214 goto free;
215 }
216
217 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
218free:
190 kfree(log);
191out:
192 nvmet_req_complete(req, status);
193}
194
195static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
196{
197 struct nvmet_ctrl *ctrl = req->sq->ctrl;

--- 110 unchanged lines hidden (view full) ---

308 return nvmet_execute_get_log_page_ana(req);
309 }
310 pr_debug("unhandled lid %d on qid %d\n",
311 req->cmd->get_log_page.lid, req->sq->qid);
312 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
313 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
314}
315
219 kfree(log);
220out:
221 nvmet_req_complete(req, status);
222}
223
224static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
225{
226 struct nvmet_ctrl *ctrl = req->sq->ctrl;

--- 110 unchanged lines hidden (view full) ---

337 return nvmet_execute_get_log_page_ana(req);
338 }
339 pr_debug("unhandled lid %d on qid %d\n",
340 req->cmd->get_log_page.lid, req->sq->qid);
341 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
342 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
343}
344
316static u16 nvmet_set_model_number(struct nvmet_subsys *subsys)
317{
318 u16 status = 0;
319
320 mutex_lock(&subsys->lock);
321 if (!subsys->model_number) {
322 subsys->model_number =
323 kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
324 if (!subsys->model_number)
325 status = NVME_SC_INTERNAL;
326 }
327 mutex_unlock(&subsys->lock);
328
329 return status;
330}
331
332static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
333{
334 struct nvmet_ctrl *ctrl = req->sq->ctrl;
335 struct nvmet_subsys *subsys = ctrl->subsys;
336 struct nvme_id_ctrl *id;
337 u32 cmd_capsule_size;
338 u16 status = 0;
339
345static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
346{
347 struct nvmet_ctrl *ctrl = req->sq->ctrl;
348 struct nvmet_subsys *subsys = ctrl->subsys;
349 struct nvme_id_ctrl *id;
350 u32 cmd_capsule_size;
351 u16 status = 0;
352
340 /*
341 * If there is no model number yet, set it now. It will then remain
342 * stable for the life time of the subsystem.
343 */
344 if (!subsys->model_number) {
345 status = nvmet_set_model_number(subsys);
346 if (status)
347 goto out;
353 if (!subsys->subsys_discovered) {
354 mutex_lock(&subsys->lock);
355 subsys->subsys_discovered = true;
356 mutex_unlock(&subsys->lock);
348 }
349
350 id = kzalloc(sizeof(*id), GFP_KERNEL);
351 if (!id) {
352 status = NVME_SC_INTERNAL;
353 goto out;
354 }
355
356 /* XXX: figure out how to assign real vendors IDs. */
357 id->vid = 0;
358 id->ssvid = 0;
359
357 }
358
359 id = kzalloc(sizeof(*id), GFP_KERNEL);
360 if (!id) {
361 status = NVME_SC_INTERNAL;
362 goto out;
363 }
364
365 /* XXX: figure out how to assign real vendors IDs. */
366 id->vid = 0;
367 id->ssvid = 0;
368
360 memset(id->sn, ' ', sizeof(id->sn));
361 bin2hex(id->sn, &ctrl->subsys->serial,
362 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
369 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
363 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
364 strlen(subsys->model_number), ' ');
365 memcpy_and_pad(id->fr, sizeof(id->fr),
366 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
367
368 id->rab = 6;
369
370 /*

--- 39 unchanged lines hidden (view full) ---

410 id->kas = cpu_to_le16(NVMET_KAS);
411
412 id->sqes = (0x6 << 4) | 0x6;
413 id->cqes = (0x4 << 4) | 0x4;
414
415 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
416 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
417
370 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
371 strlen(subsys->model_number), ' ');
372 memcpy_and_pad(id->fr, sizeof(id->fr),
373 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
374
375 id->rab = 6;
376
377 /*

--- 39 unchanged lines hidden (view full) ---

417 id->kas = cpu_to_le16(NVMET_KAS);
418
419 id->sqes = (0x6 << 4) | 0x6;
420 id->cqes = (0x4 << 4) | 0x4;
421
422 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
423 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
424
418 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
425 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
419 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
420 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
421 NVME_CTRL_ONCS_WRITE_ZEROES);
422
423 /* XXX: don't report vwc if the underlying device is write through */
424 id->vwc = NVME_CTRL_VWC_PRESENT;
425
426 /*

--- 203 unchanged lines hidden (view full) ---

630 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
631 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
632 NVME_NIDT_NGUID_LEN,
633 &req->ns->nguid, &off);
634 if (status)
635 goto out;
636 }
637
426 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
427 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
428 NVME_CTRL_ONCS_WRITE_ZEROES);
429
430 /* XXX: don't report vwc if the underlying device is write through */
431 id->vwc = NVME_CTRL_VWC_PRESENT;
432
433 /*

--- 203 unchanged lines hidden (view full) ---

637 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
638 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
639 NVME_NIDT_NGUID_LEN,
640 &req->ns->nguid, &off);
641 if (status)
642 goto out;
643 }
644
645 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
646 NVME_NIDT_CSI_LEN,
647 &req->ns->csi, &off);
648 if (status)
649 goto out;
650
638 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
639 off) != NVME_IDENTIFY_DATA_SIZE - off)
640 status = NVME_SC_INTERNAL | NVME_SC_DNR;
641
642out:
643 nvmet_req_complete(req, status);
644}
645
651 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
652 off) != NVME_IDENTIFY_DATA_SIZE - off)
653 status = NVME_SC_INTERNAL | NVME_SC_DNR;
654
655out:
656 nvmet_req_complete(req, status);
657}
658
659static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
660{
661 switch (req->cmd->identify.csi) {
662 case NVME_CSI_NVM:
663 nvmet_execute_identify_desclist(req);
664 return true;
665 case NVME_CSI_ZNS:
666 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
667 nvmet_execute_identify_desclist(req);
668 return true;
669 }
670 return false;
671 default:
672 return false;
673 }
674}
675
646static void nvmet_execute_identify(struct nvmet_req *req)
647{
648 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
649 return;
650
651 switch (req->cmd->identify.cns) {
652 case NVME_ID_CNS_NS:
676static void nvmet_execute_identify(struct nvmet_req *req)
677{
678 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
679 return;
680
681 switch (req->cmd->identify.cns) {
682 case NVME_ID_CNS_NS:
653 return nvmet_execute_identify_ns(req);
683 switch (req->cmd->identify.csi) {
684 case NVME_CSI_NVM:
685 return nvmet_execute_identify_ns(req);
686 default:
687 break;
688 }
689 break;
690 case NVME_ID_CNS_CS_NS:
691 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
692 switch (req->cmd->identify.csi) {
693 case NVME_CSI_ZNS:
694 return nvmet_execute_identify_cns_cs_ns(req);
695 default:
696 break;
697 }
698 }
699 break;
654 case NVME_ID_CNS_CTRL:
700 case NVME_ID_CNS_CTRL:
655 return nvmet_execute_identify_ctrl(req);
701 switch (req->cmd->identify.csi) {
702 case NVME_CSI_NVM:
703 return nvmet_execute_identify_ctrl(req);
704 }
705 break;
706 case NVME_ID_CNS_CS_CTRL:
707 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
708 switch (req->cmd->identify.csi) {
709 case NVME_CSI_ZNS:
710 return nvmet_execute_identify_cns_cs_ctrl(req);
711 default:
712 break;
713 }
714 }
715 break;
656 case NVME_ID_CNS_NS_ACTIVE_LIST:
716 case NVME_ID_CNS_NS_ACTIVE_LIST:
657 return nvmet_execute_identify_nslist(req);
717 switch (req->cmd->identify.csi) {
718 case NVME_CSI_NVM:
719 return nvmet_execute_identify_nslist(req);
720 default:
721 break;
722 }
723 break;
658 case NVME_ID_CNS_NS_DESC_LIST:
724 case NVME_ID_CNS_NS_DESC_LIST:
659 return nvmet_execute_identify_desclist(req);
725 if (nvmet_handle_identify_desclist(req) == true)
726 return;
727 break;
660 }
661
728 }
729
662 pr_debug("unhandled identify cns %d on qid %d\n",
663 req->cmd->identify.cns, req->sq->qid);
664 req->error_loc = offsetof(struct nvme_identify, cns);
665 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
730 nvmet_req_cns_error_complete(req);
666}
667
668/*
669 * A "minimum viable" abort implementation: the command is mandatory in the
670 * spec, but we are not required to do any useful work. We couldn't really
671 * do a useful abort, so don't bother even with waiting for the command
672 * to be exectuted and return immediately telling the command to abort
673 * wasn't found.

--- 271 unchanged lines hidden (view full) ---

945 return nvmet_parse_fabrics_cmd(req);
946 if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
947 return nvmet_parse_discovery_cmd(req);
948
949 ret = nvmet_check_ctrl_status(req);
950 if (unlikely(ret))
951 return ret;
952
731}
732
733/*
734 * A "minimum viable" abort implementation: the command is mandatory in the
735 * spec, but we are not required to do any useful work. We couldn't really
736 * do a useful abort, so don't bother even with waiting for the command
737 * to be exectuted and return immediately telling the command to abort
738 * wasn't found.

--- 271 unchanged lines hidden (view full) ---

1010 return nvmet_parse_fabrics_cmd(req);
1011 if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
1012 return nvmet_parse_discovery_cmd(req);
1013
1014 ret = nvmet_check_ctrl_status(req);
1015 if (unlikely(ret))
1016 return ret;
1017
953 if (nvmet_req_passthru_ctrl(req))
1018 if (nvmet_is_passthru_req(req))
954 return nvmet_parse_passthru_admin_cmd(req);
955
956 switch (cmd->common.opcode) {
957 case nvme_admin_get_log_page:
958 req->execute = nvmet_execute_get_log_page;
959 return 0;
960 case nvme_admin_identify:
961 req->execute = nvmet_execute_identify;

--- 20 unchanged lines hidden ---
1019 return nvmet_parse_passthru_admin_cmd(req);
1020
1021 switch (cmd->common.opcode) {
1022 case nvme_admin_get_log_page:
1023 req->execute = nvmet_execute_get_log_page;
1024 return 0;
1025 case nvme_admin_identify:
1026 req->execute = nvmet_execute_identify;

--- 20 unchanged lines hidden ---