1 /* 2 * NVMe admin command implementation. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/module.h> 16 #include <generated/utsrelease.h> 17 #include "nvmet.h" 18 19 u32 nvmet_get_log_page_len(struct nvme_command *cmd) 20 { 21 u32 len = le16_to_cpu(cmd->get_log_page.numdu); 22 23 len <<= 16; 24 len += le16_to_cpu(cmd->get_log_page.numdl); 25 /* NUMD is a 0's based value */ 26 len += 1; 27 len *= sizeof(u32); 28 29 return len; 30 } 31 32 static void nvmet_execute_get_log_page(struct nvmet_req *req) 33 { 34 size_t data_len = nvmet_get_log_page_len(req->cmd); 35 void *buf; 36 u16 status = 0; 37 38 buf = kzalloc(data_len, GFP_KERNEL); 39 if (!buf) { 40 status = NVME_SC_INTERNAL; 41 goto out; 42 } 43 44 switch (req->cmd->get_log_page.lid) { 45 case 0x01: 46 /* 47 * We currently never set the More bit in the status field, 48 * so all error log entries are invalid and can be zeroed out. 49 * This is called a minum viable implementation (TM) of this 50 * mandatory log page. 51 */ 52 break; 53 case 0x02: 54 /* 55 * XXX: fill out actual smart log 56 * 57 * We might have a hard time coming up with useful values for 58 * many of the fields, and even when we have useful data 59 * available (e.g. units or commands read/written) those aren't 60 * persistent over power loss. 61 */ 62 break; 63 case 0x03: 64 /* 65 * We only support a single firmware slot which always is 66 * active, so we can zero out the whole firmware slot log and 67 * still claim to fully implement this mandatory log page. 68 */ 69 break; 70 default: 71 BUG(); 72 } 73 74 status = nvmet_copy_to_sgl(req, 0, buf, data_len); 75 76 kfree(buf); 77 out: 78 nvmet_req_complete(req, status); 79 } 80 81 static void nvmet_execute_identify_ctrl(struct nvmet_req *req) 82 { 83 struct nvmet_ctrl *ctrl = req->sq->ctrl; 84 struct nvme_id_ctrl *id; 85 u16 status = 0; 86 87 id = kzalloc(sizeof(*id), GFP_KERNEL); 88 if (!id) { 89 status = NVME_SC_INTERNAL; 90 goto out; 91 } 92 93 /* XXX: figure out how to assign real vendors IDs. */ 94 id->vid = 0; 95 id->ssvid = 0; 96 97 memset(id->sn, ' ', sizeof(id->sn)); 98 snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); 99 100 memset(id->mn, ' ', sizeof(id->mn)); 101 strncpy((char *)id->mn, "Linux", sizeof(id->mn)); 102 103 memset(id->fr, ' ', sizeof(id->fr)); 104 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); 105 106 id->rab = 6; 107 108 /* 109 * XXX: figure out how we can assign a IEEE OUI, but until then 110 * the safest is to leave it as zeroes. 111 */ 112 113 /* we support multiple ports and multiples hosts: */ 114 id->mic = (1 << 0) | (1 << 1); 115 116 /* no limit on data transfer sizes for now */ 117 id->mdts = 0; 118 id->cntlid = cpu_to_le16(ctrl->cntlid); 119 id->ver = cpu_to_le32(ctrl->subsys->ver); 120 121 /* XXX: figure out what to do about RTD3R/RTD3 */ 122 id->oaes = cpu_to_le32(1 << 8); 123 id->ctratt = cpu_to_le32(1 << 0); 124 125 id->oacs = 0; 126 127 /* 128 * We don't really have a practical limit on the number of abort 129 * comands. But we don't do anything useful for abort either, so 130 * no point in allowing more abort commands than the spec requires. 131 */ 132 id->acl = 3; 133 134 id->aerl = NVMET_ASYNC_EVENTS - 1; 135 136 /* first slot is read-only, only one slot supported */ 137 id->frmw = (1 << 0) | (1 << 1); 138 id->lpa = (1 << 0) | (1 << 2); 139 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; 140 id->npss = 0; 141 142 /* We support keep-alive timeout in granularity of seconds */ 143 id->kas = cpu_to_le16(NVMET_KAS); 144 145 id->sqes = (0x6 << 4) | 0x6; 146 id->cqes = (0x4 << 4) | 0x4; 147 148 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 149 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 150 151 id->nn = cpu_to_le32(ctrl->subsys->max_nsid); 152 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM); 153 154 /* XXX: don't report vwc if the underlying device is write through */ 155 id->vwc = NVME_CTRL_VWC_PRESENT; 156 157 /* 158 * We can't support atomic writes bigger than a LBA without support 159 * from the backend device. 160 */ 161 id->awun = 0; 162 id->awupf = 0; 163 164 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 165 if (ctrl->ops->has_keyed_sgls) 166 id->sgls |= cpu_to_le32(1 << 2); 167 if (ctrl->ops->sqe_inline_size) 168 id->sgls |= cpu_to_le32(1 << 20); 169 170 strcpy(id->subnqn, ctrl->subsys->subsysnqn); 171 172 /* Max command capsule size is sqe + single page of in-capsule data */ 173 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + 174 ctrl->ops->sqe_inline_size) / 16); 175 /* Max response capsule size is cqe */ 176 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); 177 178 id->msdbd = ctrl->ops->msdbd; 179 180 /* 181 * Meh, we don't really support any power state. Fake up the same 182 * values that qemu does. 183 */ 184 id->psd[0].max_power = cpu_to_le16(0x9c4); 185 id->psd[0].entry_lat = cpu_to_le32(0x10); 186 id->psd[0].exit_lat = cpu_to_le32(0x4); 187 188 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 189 190 kfree(id); 191 out: 192 nvmet_req_complete(req, status); 193 } 194 195 static void nvmet_execute_identify_ns(struct nvmet_req *req) 196 { 197 struct nvmet_ns *ns; 198 struct nvme_id_ns *id; 199 u16 status = 0; 200 201 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); 202 if (!ns) { 203 status = NVME_SC_INVALID_NS | NVME_SC_DNR; 204 goto out; 205 } 206 207 id = kzalloc(sizeof(*id), GFP_KERNEL); 208 if (!id) { 209 status = NVME_SC_INTERNAL; 210 goto out_put_ns; 211 } 212 213 /* 214 * nuse = ncap = nsze isn't aways true, but we have no way to find 215 * that out from the underlying device. 216 */ 217 id->ncap = id->nuse = id->nsze = 218 cpu_to_le64(ns->size >> ns->blksize_shift); 219 220 /* 221 * We just provide a single LBA format that matches what the 222 * underlying device reports. 223 */ 224 id->nlbaf = 0; 225 id->flbas = 0; 226 227 /* 228 * Our namespace might always be shared. Not just with other 229 * controllers, but also with any other user of the block device. 230 */ 231 id->nmic = (1 << 0); 232 233 memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le)); 234 235 id->lbaf[0].ds = ns->blksize_shift; 236 237 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 238 239 kfree(id); 240 out_put_ns: 241 nvmet_put_namespace(ns); 242 out: 243 nvmet_req_complete(req, status); 244 } 245 246 static void nvmet_execute_identify_nslist(struct nvmet_req *req) 247 { 248 static const int buf_size = 4096; 249 struct nvmet_ctrl *ctrl = req->sq->ctrl; 250 struct nvmet_ns *ns; 251 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); 252 __le32 *list; 253 u16 status = 0; 254 int i = 0; 255 256 list = kzalloc(buf_size, GFP_KERNEL); 257 if (!list) { 258 status = NVME_SC_INTERNAL; 259 goto out; 260 } 261 262 rcu_read_lock(); 263 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { 264 if (ns->nsid <= min_nsid) 265 continue; 266 list[i++] = cpu_to_le32(ns->nsid); 267 if (i == buf_size / sizeof(__le32)) 268 break; 269 } 270 rcu_read_unlock(); 271 272 status = nvmet_copy_to_sgl(req, 0, list, buf_size); 273 274 kfree(list); 275 out: 276 nvmet_req_complete(req, status); 277 } 278 279 /* 280 * A "mimimum viable" abort implementation: the command is mandatory in the 281 * spec, but we are not required to do any useful work. We couldn't really 282 * do a useful abort, so don't bother even with waiting for the command 283 * to be exectuted and return immediately telling the command to abort 284 * wasn't found. 285 */ 286 static void nvmet_execute_abort(struct nvmet_req *req) 287 { 288 nvmet_set_result(req, 1); 289 nvmet_req_complete(req, 0); 290 } 291 292 static void nvmet_execute_set_features(struct nvmet_req *req) 293 { 294 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 295 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); 296 u64 val; 297 u32 val32; 298 u16 status = 0; 299 300 switch (cdw10 & 0xf) { 301 case NVME_FEAT_NUM_QUEUES: 302 nvmet_set_result(req, 303 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); 304 break; 305 case NVME_FEAT_KATO: 306 val = le64_to_cpu(req->cmd->prop_set.value); 307 val32 = val & 0xffff; 308 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); 309 nvmet_set_result(req, req->sq->ctrl->kato); 310 break; 311 default: 312 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 313 break; 314 } 315 316 nvmet_req_complete(req, status); 317 } 318 319 static void nvmet_execute_get_features(struct nvmet_req *req) 320 { 321 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 322 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); 323 u16 status = 0; 324 325 switch (cdw10 & 0xf) { 326 /* 327 * These features are mandatory in the spec, but we don't 328 * have a useful way to implement them. We'll eventually 329 * need to come up with some fake values for these. 330 */ 331 #if 0 332 case NVME_FEAT_ARBITRATION: 333 break; 334 case NVME_FEAT_POWER_MGMT: 335 break; 336 case NVME_FEAT_TEMP_THRESH: 337 break; 338 case NVME_FEAT_ERR_RECOVERY: 339 break; 340 case NVME_FEAT_IRQ_COALESCE: 341 break; 342 case NVME_FEAT_IRQ_CONFIG: 343 break; 344 case NVME_FEAT_WRITE_ATOMIC: 345 break; 346 case NVME_FEAT_ASYNC_EVENT: 347 break; 348 #endif 349 case NVME_FEAT_VOLATILE_WC: 350 nvmet_set_result(req, 1); 351 break; 352 case NVME_FEAT_NUM_QUEUES: 353 nvmet_set_result(req, 354 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); 355 break; 356 case NVME_FEAT_KATO: 357 nvmet_set_result(req, req->sq->ctrl->kato * 1000); 358 break; 359 default: 360 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 361 break; 362 } 363 364 nvmet_req_complete(req, status); 365 } 366 367 static void nvmet_execute_async_event(struct nvmet_req *req) 368 { 369 struct nvmet_ctrl *ctrl = req->sq->ctrl; 370 371 mutex_lock(&ctrl->lock); 372 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { 373 mutex_unlock(&ctrl->lock); 374 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); 375 return; 376 } 377 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 378 mutex_unlock(&ctrl->lock); 379 380 schedule_work(&ctrl->async_event_work); 381 } 382 383 static void nvmet_execute_keep_alive(struct nvmet_req *req) 384 { 385 struct nvmet_ctrl *ctrl = req->sq->ctrl; 386 387 pr_debug("ctrl %d update keep-alive timer for %d secs\n", 388 ctrl->cntlid, ctrl->kato); 389 390 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); 391 nvmet_req_complete(req, 0); 392 } 393 394 int nvmet_parse_admin_cmd(struct nvmet_req *req) 395 { 396 struct nvme_command *cmd = req->cmd; 397 398 req->ns = NULL; 399 400 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { 401 pr_err("nvmet: got admin cmd %d while CC.EN == 0\n", 402 cmd->common.opcode); 403 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 404 } 405 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 406 pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n", 407 cmd->common.opcode); 408 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 409 } 410 411 switch (cmd->common.opcode) { 412 case nvme_admin_get_log_page: 413 req->data_len = nvmet_get_log_page_len(cmd); 414 415 switch (cmd->get_log_page.lid) { 416 case 0x01: 417 case 0x02: 418 case 0x03: 419 req->execute = nvmet_execute_get_log_page; 420 return 0; 421 } 422 break; 423 case nvme_admin_identify: 424 req->data_len = 4096; 425 switch (le32_to_cpu(cmd->identify.cns)) { 426 case 0x00: 427 req->execute = nvmet_execute_identify_ns; 428 return 0; 429 case 0x01: 430 req->execute = nvmet_execute_identify_ctrl; 431 return 0; 432 case 0x02: 433 req->execute = nvmet_execute_identify_nslist; 434 return 0; 435 } 436 break; 437 case nvme_admin_abort_cmd: 438 req->execute = nvmet_execute_abort; 439 req->data_len = 0; 440 return 0; 441 case nvme_admin_set_features: 442 req->execute = nvmet_execute_set_features; 443 req->data_len = 0; 444 return 0; 445 case nvme_admin_get_features: 446 req->execute = nvmet_execute_get_features; 447 req->data_len = 0; 448 return 0; 449 case nvme_admin_async_event: 450 req->execute = nvmet_execute_async_event; 451 req->data_len = 0; 452 return 0; 453 case nvme_admin_keep_alive: 454 req->execute = nvmet_execute_keep_alive; 455 req->data_len = 0; 456 return 0; 457 } 458 459 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode); 460 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 461 } 462