1 /* 2 * NVMe admin command implementation. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/module.h> 16 #include <generated/utsrelease.h> 17 #include <asm/unaligned.h> 18 #include "nvmet.h" 19 20 u32 nvmet_get_log_page_len(struct nvme_command *cmd) 21 { 22 u32 len = le16_to_cpu(cmd->get_log_page.numdu); 23 24 len <<= 16; 25 len += le16_to_cpu(cmd->get_log_page.numdl); 26 /* NUMD is a 0's based value */ 27 len += 1; 28 len *= sizeof(u32); 29 30 return len; 31 } 32 33 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, 34 struct nvme_smart_log *slog) 35 { 36 u16 status; 37 struct nvmet_ns *ns; 38 u64 host_reads, host_writes, data_units_read, data_units_written; 39 40 status = NVME_SC_SUCCESS; 41 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); 42 if (!ns) { 43 status = NVME_SC_INVALID_NS; 44 pr_err("nvmet : Counld not find namespace id : %d\n", 45 le32_to_cpu(req->cmd->get_log_page.nsid)); 46 goto out; 47 } 48 49 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); 50 data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]); 51 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); 52 data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); 53 54 put_unaligned_le64(host_reads, &slog->host_reads[0]); 55 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 56 put_unaligned_le64(host_writes, &slog->host_writes[0]); 57 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 58 nvmet_put_namespace(ns); 59 out: 60 return status; 61 } 62 63 static u16 nvmet_get_smart_log_all(struct nvmet_req *req, 64 struct nvme_smart_log *slog) 65 { 66 u16 status; 67 u64 host_reads = 0, host_writes = 0; 68 u64 data_units_read = 0, data_units_written = 0; 69 struct nvmet_ns *ns; 70 struct nvmet_ctrl *ctrl; 71 72 status = NVME_SC_SUCCESS; 73 ctrl = req->sq->ctrl; 74 75 rcu_read_lock(); 76 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { 77 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); 78 data_units_read += 79 part_stat_read(ns->bdev->bd_part, sectors[READ]); 80 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); 81 data_units_written += 82 part_stat_read(ns->bdev->bd_part, sectors[WRITE]); 83 84 } 85 rcu_read_unlock(); 86 87 put_unaligned_le64(host_reads, &slog->host_reads[0]); 88 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 89 put_unaligned_le64(host_writes, &slog->host_writes[0]); 90 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 91 92 return status; 93 } 94 95 static u16 nvmet_get_smart_log(struct nvmet_req *req, 96 struct nvme_smart_log *slog) 97 { 98 u16 status; 99 100 WARN_ON(req == NULL || slog == NULL); 101 if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) 102 status = nvmet_get_smart_log_all(req, slog); 103 else 104 status = nvmet_get_smart_log_nsid(req, slog); 105 return status; 106 } 107 108 static void nvmet_execute_get_log_page(struct nvmet_req *req) 109 { 110 struct nvme_smart_log *smart_log; 111 size_t data_len = nvmet_get_log_page_len(req->cmd); 112 void *buf; 113 u16 status = 0; 114 115 buf = kzalloc(data_len, GFP_KERNEL); 116 if (!buf) { 117 status = NVME_SC_INTERNAL; 118 goto out; 119 } 120 121 switch (req->cmd->get_log_page.lid) { 122 case 0x01: 123 /* 124 * We currently never set the More bit in the status field, 125 * so all error log entries are invalid and can be zeroed out. 126 * This is called a minum viable implementation (TM) of this 127 * mandatory log page. 128 */ 129 break; 130 case 0x02: 131 /* 132 * XXX: fill out actual smart log 133 * 134 * We might have a hard time coming up with useful values for 135 * many of the fields, and even when we have useful data 136 * available (e.g. units or commands read/written) those aren't 137 * persistent over power loss. 138 */ 139 if (data_len != sizeof(*smart_log)) { 140 status = NVME_SC_INTERNAL; 141 goto err; 142 } 143 smart_log = buf; 144 status = nvmet_get_smart_log(req, smart_log); 145 if (status) { 146 memset(buf, '\0', data_len); 147 goto err; 148 } 149 break; 150 case 0x03: 151 /* 152 * We only support a single firmware slot which always is 153 * active, so we can zero out the whole firmware slot log and 154 * still claim to fully implement this mandatory log page. 155 */ 156 break; 157 default: 158 BUG(); 159 } 160 161 status = nvmet_copy_to_sgl(req, 0, buf, data_len); 162 163 err: 164 kfree(buf); 165 out: 166 nvmet_req_complete(req, status); 167 } 168 169 static void nvmet_execute_identify_ctrl(struct nvmet_req *req) 170 { 171 struct nvmet_ctrl *ctrl = req->sq->ctrl; 172 struct nvme_id_ctrl *id; 173 u16 status = 0; 174 175 id = kzalloc(sizeof(*id), GFP_KERNEL); 176 if (!id) { 177 status = NVME_SC_INTERNAL; 178 goto out; 179 } 180 181 /* XXX: figure out how to assign real vendors IDs. */ 182 id->vid = 0; 183 id->ssvid = 0; 184 185 memset(id->sn, ' ', sizeof(id->sn)); 186 snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); 187 188 memset(id->mn, ' ', sizeof(id->mn)); 189 strncpy((char *)id->mn, "Linux", sizeof(id->mn)); 190 191 memset(id->fr, ' ', sizeof(id->fr)); 192 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); 193 194 id->rab = 6; 195 196 /* 197 * XXX: figure out how we can assign a IEEE OUI, but until then 198 * the safest is to leave it as zeroes. 199 */ 200 201 /* we support multiple ports and multiples hosts: */ 202 id->cmic = (1 << 0) | (1 << 1); 203 204 /* no limit on data transfer sizes for now */ 205 id->mdts = 0; 206 id->cntlid = cpu_to_le16(ctrl->cntlid); 207 id->ver = cpu_to_le32(ctrl->subsys->ver); 208 209 /* XXX: figure out what to do about RTD3R/RTD3 */ 210 id->oaes = cpu_to_le32(1 << 8); 211 id->ctratt = cpu_to_le32(1 << 0); 212 213 id->oacs = 0; 214 215 /* 216 * We don't really have a practical limit on the number of abort 217 * comands. But we don't do anything useful for abort either, so 218 * no point in allowing more abort commands than the spec requires. 219 */ 220 id->acl = 3; 221 222 id->aerl = NVMET_ASYNC_EVENTS - 1; 223 224 /* first slot is read-only, only one slot supported */ 225 id->frmw = (1 << 0) | (1 << 1); 226 id->lpa = (1 << 0) | (1 << 2); 227 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; 228 id->npss = 0; 229 230 /* We support keep-alive timeout in granularity of seconds */ 231 id->kas = cpu_to_le16(NVMET_KAS); 232 233 id->sqes = (0x6 << 4) | 0x6; 234 id->cqes = (0x4 << 4) | 0x4; 235 236 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 237 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 238 239 id->nn = cpu_to_le32(ctrl->subsys->max_nsid); 240 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | 241 NVME_CTRL_ONCS_WRITE_ZEROES); 242 243 /* XXX: don't report vwc if the underlying device is write through */ 244 id->vwc = NVME_CTRL_VWC_PRESENT; 245 246 /* 247 * We can't support atomic writes bigger than a LBA without support 248 * from the backend device. 249 */ 250 id->awun = 0; 251 id->awupf = 0; 252 253 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 254 if (ctrl->ops->has_keyed_sgls) 255 id->sgls |= cpu_to_le32(1 << 2); 256 if (ctrl->ops->sqe_inline_size) 257 id->sgls |= cpu_to_le32(1 << 20); 258 259 strcpy(id->subnqn, ctrl->subsys->subsysnqn); 260 261 /* Max command capsule size is sqe + single page of in-capsule data */ 262 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + 263 ctrl->ops->sqe_inline_size) / 16); 264 /* Max response capsule size is cqe */ 265 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); 266 267 id->msdbd = ctrl->ops->msdbd; 268 269 /* 270 * Meh, we don't really support any power state. Fake up the same 271 * values that qemu does. 272 */ 273 id->psd[0].max_power = cpu_to_le16(0x9c4); 274 id->psd[0].entry_lat = cpu_to_le32(0x10); 275 id->psd[0].exit_lat = cpu_to_le32(0x4); 276 277 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 278 279 kfree(id); 280 out: 281 nvmet_req_complete(req, status); 282 } 283 284 static void nvmet_execute_identify_ns(struct nvmet_req *req) 285 { 286 struct nvmet_ns *ns; 287 struct nvme_id_ns *id; 288 u16 status = 0; 289 290 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); 291 if (!ns) { 292 status = NVME_SC_INVALID_NS | NVME_SC_DNR; 293 goto out; 294 } 295 296 id = kzalloc(sizeof(*id), GFP_KERNEL); 297 if (!id) { 298 status = NVME_SC_INTERNAL; 299 goto out_put_ns; 300 } 301 302 /* 303 * nuse = ncap = nsze isn't aways true, but we have no way to find 304 * that out from the underlying device. 305 */ 306 id->ncap = id->nuse = id->nsze = 307 cpu_to_le64(ns->size >> ns->blksize_shift); 308 309 /* 310 * We just provide a single LBA format that matches what the 311 * underlying device reports. 312 */ 313 id->nlbaf = 0; 314 id->flbas = 0; 315 316 /* 317 * Our namespace might always be shared. Not just with other 318 * controllers, but also with any other user of the block device. 319 */ 320 id->nmic = (1 << 0); 321 322 memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le)); 323 324 id->lbaf[0].ds = ns->blksize_shift; 325 326 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 327 328 kfree(id); 329 out_put_ns: 330 nvmet_put_namespace(ns); 331 out: 332 nvmet_req_complete(req, status); 333 } 334 335 static void nvmet_execute_identify_nslist(struct nvmet_req *req) 336 { 337 static const int buf_size = 4096; 338 struct nvmet_ctrl *ctrl = req->sq->ctrl; 339 struct nvmet_ns *ns; 340 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); 341 __le32 *list; 342 u16 status = 0; 343 int i = 0; 344 345 list = kzalloc(buf_size, GFP_KERNEL); 346 if (!list) { 347 status = NVME_SC_INTERNAL; 348 goto out; 349 } 350 351 rcu_read_lock(); 352 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { 353 if (ns->nsid <= min_nsid) 354 continue; 355 list[i++] = cpu_to_le32(ns->nsid); 356 if (i == buf_size / sizeof(__le32)) 357 break; 358 } 359 rcu_read_unlock(); 360 361 status = nvmet_copy_to_sgl(req, 0, list, buf_size); 362 363 kfree(list); 364 out: 365 nvmet_req_complete(req, status); 366 } 367 368 /* 369 * A "mimimum viable" abort implementation: the command is mandatory in the 370 * spec, but we are not required to do any useful work. We couldn't really 371 * do a useful abort, so don't bother even with waiting for the command 372 * to be exectuted and return immediately telling the command to abort 373 * wasn't found. 374 */ 375 static void nvmet_execute_abort(struct nvmet_req *req) 376 { 377 nvmet_set_result(req, 1); 378 nvmet_req_complete(req, 0); 379 } 380 381 static void nvmet_execute_set_features(struct nvmet_req *req) 382 { 383 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 384 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); 385 u32 val32; 386 u16 status = 0; 387 388 switch (cdw10 & 0xf) { 389 case NVME_FEAT_NUM_QUEUES: 390 nvmet_set_result(req, 391 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); 392 break; 393 case NVME_FEAT_KATO: 394 val32 = le32_to_cpu(req->cmd->common.cdw10[1]); 395 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); 396 nvmet_set_result(req, req->sq->ctrl->kato); 397 break; 398 default: 399 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 400 break; 401 } 402 403 nvmet_req_complete(req, status); 404 } 405 406 static void nvmet_execute_get_features(struct nvmet_req *req) 407 { 408 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 409 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); 410 u16 status = 0; 411 412 switch (cdw10 & 0xf) { 413 /* 414 * These features are mandatory in the spec, but we don't 415 * have a useful way to implement them. We'll eventually 416 * need to come up with some fake values for these. 417 */ 418 #if 0 419 case NVME_FEAT_ARBITRATION: 420 break; 421 case NVME_FEAT_POWER_MGMT: 422 break; 423 case NVME_FEAT_TEMP_THRESH: 424 break; 425 case NVME_FEAT_ERR_RECOVERY: 426 break; 427 case NVME_FEAT_IRQ_COALESCE: 428 break; 429 case NVME_FEAT_IRQ_CONFIG: 430 break; 431 case NVME_FEAT_WRITE_ATOMIC: 432 break; 433 case NVME_FEAT_ASYNC_EVENT: 434 break; 435 #endif 436 case NVME_FEAT_VOLATILE_WC: 437 nvmet_set_result(req, 1); 438 break; 439 case NVME_FEAT_NUM_QUEUES: 440 nvmet_set_result(req, 441 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); 442 break; 443 case NVME_FEAT_KATO: 444 nvmet_set_result(req, req->sq->ctrl->kato * 1000); 445 break; 446 default: 447 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 448 break; 449 } 450 451 nvmet_req_complete(req, status); 452 } 453 454 static void nvmet_execute_async_event(struct nvmet_req *req) 455 { 456 struct nvmet_ctrl *ctrl = req->sq->ctrl; 457 458 mutex_lock(&ctrl->lock); 459 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { 460 mutex_unlock(&ctrl->lock); 461 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); 462 return; 463 } 464 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 465 mutex_unlock(&ctrl->lock); 466 467 schedule_work(&ctrl->async_event_work); 468 } 469 470 static void nvmet_execute_keep_alive(struct nvmet_req *req) 471 { 472 struct nvmet_ctrl *ctrl = req->sq->ctrl; 473 474 pr_debug("ctrl %d update keep-alive timer for %d secs\n", 475 ctrl->cntlid, ctrl->kato); 476 477 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); 478 nvmet_req_complete(req, 0); 479 } 480 481 int nvmet_parse_admin_cmd(struct nvmet_req *req) 482 { 483 struct nvme_command *cmd = req->cmd; 484 485 req->ns = NULL; 486 487 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { 488 pr_err("nvmet: got admin cmd %d while CC.EN == 0\n", 489 cmd->common.opcode); 490 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 491 } 492 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 493 pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n", 494 cmd->common.opcode); 495 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 496 } 497 498 switch (cmd->common.opcode) { 499 case nvme_admin_get_log_page: 500 req->data_len = nvmet_get_log_page_len(cmd); 501 502 switch (cmd->get_log_page.lid) { 503 case 0x01: 504 case 0x02: 505 case 0x03: 506 req->execute = nvmet_execute_get_log_page; 507 return 0; 508 } 509 break; 510 case nvme_admin_identify: 511 req->data_len = 4096; 512 switch (le32_to_cpu(cmd->identify.cns)) { 513 case NVME_ID_CNS_NS: 514 req->execute = nvmet_execute_identify_ns; 515 return 0; 516 case NVME_ID_CNS_CTRL: 517 req->execute = nvmet_execute_identify_ctrl; 518 return 0; 519 case NVME_ID_CNS_NS_ACTIVE_LIST: 520 req->execute = nvmet_execute_identify_nslist; 521 return 0; 522 } 523 break; 524 case nvme_admin_abort_cmd: 525 req->execute = nvmet_execute_abort; 526 req->data_len = 0; 527 return 0; 528 case nvme_admin_set_features: 529 req->execute = nvmet_execute_set_features; 530 req->data_len = 0; 531 return 0; 532 case nvme_admin_get_features: 533 req->execute = nvmet_execute_get_features; 534 req->data_len = 0; 535 return 0; 536 case nvme_admin_async_event: 537 req->execute = nvmet_execute_async_event; 538 req->data_len = 0; 539 return 0; 540 case nvme_admin_keep_alive: 541 req->execute = nvmet_execute_keep_alive; 542 req->data_len = 0; 543 return 0; 544 } 545 546 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode); 547 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 548 } 549