1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Discovery service for the NVMe over Fabrics target. 4 * Copyright (C) 2016 Intel Corporation. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/slab.h> 8 #include <generated/utsrelease.h> 9 #include "nvmet.h" 10 11 struct nvmet_subsys *nvmet_disc_subsys; 12 13 static u64 nvmet_genctr; 14 15 static void __nvmet_disc_changed(struct nvmet_port *port, 16 struct nvmet_ctrl *ctrl) 17 { 18 if (ctrl->port != port) 19 return; 20 21 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) 22 return; 23 24 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 25 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); 26 } 27 28 void nvmet_port_disc_changed(struct nvmet_port *port, 29 struct nvmet_subsys *subsys) 30 { 31 struct nvmet_ctrl *ctrl; 32 33 lockdep_assert_held(&nvmet_config_sem); 34 nvmet_genctr++; 35 36 mutex_lock(&nvmet_disc_subsys->lock); 37 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { 38 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn)) 39 continue; 40 41 __nvmet_disc_changed(port, ctrl); 42 } 43 mutex_unlock(&nvmet_disc_subsys->lock); 44 45 /* If transport can signal change, notify transport */ 46 if (port->tr_ops && port->tr_ops->discovery_chg) 47 port->tr_ops->discovery_chg(port); 48 } 49 50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port, 51 struct nvmet_subsys *subsys, 52 struct nvmet_host *host) 53 { 54 struct nvmet_ctrl *ctrl; 55 56 mutex_lock(&nvmet_disc_subsys->lock); 57 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { 58 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn)) 59 continue; 60 61 __nvmet_disc_changed(port, ctrl); 62 } 63 mutex_unlock(&nvmet_disc_subsys->lock); 64 } 65 66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, 67 struct nvmet_host *host) 68 { 69 struct nvmet_port *port; 70 struct nvmet_subsys_link *s; 71 72 lockdep_assert_held(&nvmet_config_sem); 73 nvmet_genctr++; 74 75 list_for_each_entry(port, nvmet_ports, global_entry) 76 list_for_each_entry(s, &port->subsystems, entry) { 77 if (s->subsys != subsys) 78 continue; 79 __nvmet_subsys_disc_changed(port, subsys, host); 80 } 81 } 82 83 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port) 84 { 85 down_write(&nvmet_config_sem); 86 if (list_empty(&port->entry)) { 87 list_add_tail(&port->entry, &parent->referrals); 88 port->enabled = true; 89 nvmet_port_disc_changed(parent, NULL); 90 } 91 up_write(&nvmet_config_sem); 92 } 93 94 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port) 95 { 96 down_write(&nvmet_config_sem); 97 if (!list_empty(&port->entry)) { 98 port->enabled = false; 99 list_del_init(&port->entry); 100 nvmet_port_disc_changed(parent, NULL); 101 } 102 up_write(&nvmet_config_sem); 103 } 104 105 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, 106 struct nvmet_port *port, char *subsys_nqn, char *traddr, 107 u8 type, u32 numrec) 108 { 109 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; 110 111 e->trtype = port->disc_addr.trtype; 112 e->adrfam = port->disc_addr.adrfam; 113 e->treq = port->disc_addr.treq; 114 e->portid = port->disc_addr.portid; 115 /* we support only dynamic controllers */ 116 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); 117 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH); 118 e->subtype = type; 119 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); 120 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); 121 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); 122 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); 123 } 124 125 /* 126 * nvmet_set_disc_traddr - set a correct discovery log entry traddr 127 * 128 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses 129 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply 130 * must not contain that "any" IP address. If the transport implements 131 * .disc_traddr, use it. this callback will set the discovery traddr 132 * from the req->port address in case the port in question listens 133 * "any" IP address. 134 */ 135 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port, 136 char *traddr) 137 { 138 if (req->ops->disc_traddr) 139 req->ops->disc_traddr(req, port, traddr); 140 else 141 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 142 } 143 144 static size_t discovery_log_entries(struct nvmet_req *req) 145 { 146 struct nvmet_ctrl *ctrl = req->sq->ctrl; 147 struct nvmet_subsys_link *p; 148 struct nvmet_port *r; 149 size_t entries = 0; 150 151 list_for_each_entry(p, &req->port->subsystems, entry) { 152 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 153 continue; 154 entries++; 155 } 156 list_for_each_entry(r, &req->port->referrals, entry) 157 entries++; 158 return entries; 159 } 160 161 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) 162 { 163 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); 164 struct nvmet_ctrl *ctrl = req->sq->ctrl; 165 struct nvmf_disc_rsp_page_hdr *hdr; 166 u64 offset = nvmet_get_log_page_offset(req->cmd); 167 size_t data_len = nvmet_get_log_page_len(req->cmd); 168 size_t alloc_len; 169 struct nvmet_subsys_link *p; 170 struct nvmet_port *r; 171 u32 numrec = 0; 172 u16 status = 0; 173 void *buffer; 174 175 if (!nvmet_check_transfer_len(req, data_len)) 176 return; 177 178 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { 179 req->error_loc = 180 offsetof(struct nvme_get_log_page_command, lid); 181 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 182 goto out; 183 } 184 185 /* Spec requires dword aligned offsets */ 186 if (offset & 0x3) { 187 req->error_loc = 188 offsetof(struct nvme_get_log_page_command, lpo); 189 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 190 goto out; 191 } 192 193 /* 194 * Make sure we're passing at least a buffer of response header size. 195 * If host provided data len is less than the header size, only the 196 * number of bytes requested by host will be sent to host. 197 */ 198 down_read(&nvmet_config_sem); 199 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); 200 buffer = kzalloc(alloc_len, GFP_KERNEL); 201 if (!buffer) { 202 up_read(&nvmet_config_sem); 203 status = NVME_SC_INTERNAL; 204 goto out; 205 } 206 207 hdr = buffer; 208 list_for_each_entry(p, &req->port->subsystems, entry) { 209 char traddr[NVMF_TRADDR_SIZE]; 210 211 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 212 continue; 213 214 nvmet_set_disc_traddr(req, req->port, traddr); 215 nvmet_format_discovery_entry(hdr, req->port, 216 p->subsys->subsysnqn, traddr, 217 NVME_NQN_NVME, numrec); 218 numrec++; 219 } 220 221 list_for_each_entry(r, &req->port->referrals, entry) { 222 nvmet_format_discovery_entry(hdr, r, 223 NVME_DISC_SUBSYS_NAME, 224 r->disc_addr.traddr, 225 NVME_NQN_DISC, numrec); 226 numrec++; 227 } 228 229 hdr->genctr = cpu_to_le64(nvmet_genctr); 230 hdr->numrec = cpu_to_le64(numrec); 231 hdr->recfmt = cpu_to_le16(0); 232 233 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE); 234 235 up_read(&nvmet_config_sem); 236 237 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); 238 kfree(buffer); 239 out: 240 nvmet_req_complete(req, status); 241 } 242 243 static void nvmet_execute_disc_identify(struct nvmet_req *req) 244 { 245 struct nvmet_ctrl *ctrl = req->sq->ctrl; 246 struct nvme_id_ctrl *id; 247 const char model[] = "Linux"; 248 u16 status = 0; 249 250 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) 251 return; 252 253 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { 254 req->error_loc = offsetof(struct nvme_identify, cns); 255 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 256 goto out; 257 } 258 259 id = kzalloc(sizeof(*id), GFP_KERNEL); 260 if (!id) { 261 status = NVME_SC_INTERNAL; 262 goto out; 263 } 264 265 memset(id->sn, ' ', sizeof(id->sn)); 266 bin2hex(id->sn, &ctrl->subsys->serial, 267 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); 268 memset(id->fr, ' ', sizeof(id->fr)); 269 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); 270 memcpy_and_pad(id->fr, sizeof(id->fr), 271 UTS_RELEASE, strlen(UTS_RELEASE), ' '); 272 273 /* no limit on data transfer sizes for now */ 274 id->mdts = 0; 275 id->cntlid = cpu_to_le16(ctrl->cntlid); 276 id->ver = cpu_to_le32(ctrl->subsys->ver); 277 id->lpa = (1 << 2); 278 279 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 280 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 281 282 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 283 if (ctrl->ops->flags & NVMF_KEYED_SGLS) 284 id->sgls |= cpu_to_le32(1 << 2); 285 if (req->port->inline_data_size) 286 id->sgls |= cpu_to_le32(1 << 20); 287 288 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL); 289 290 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); 291 292 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 293 294 kfree(id); 295 out: 296 nvmet_req_complete(req, status); 297 } 298 299 static void nvmet_execute_disc_set_features(struct nvmet_req *req) 300 { 301 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 302 u16 stat; 303 304 if (!nvmet_check_transfer_len(req, 0)) 305 return; 306 307 switch (cdw10 & 0xff) { 308 case NVME_FEAT_KATO: 309 stat = nvmet_set_feat_kato(req); 310 break; 311 case NVME_FEAT_ASYNC_EVENT: 312 stat = nvmet_set_feat_async_event(req, 313 NVMET_DISC_AEN_CFG_OPTIONAL); 314 break; 315 default: 316 req->error_loc = 317 offsetof(struct nvme_common_command, cdw10); 318 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 319 break; 320 } 321 322 nvmet_req_complete(req, stat); 323 } 324 325 static void nvmet_execute_disc_get_features(struct nvmet_req *req) 326 { 327 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 328 u16 stat = 0; 329 330 if (!nvmet_check_transfer_len(req, 0)) 331 return; 332 333 switch (cdw10 & 0xff) { 334 case NVME_FEAT_KATO: 335 nvmet_get_feat_kato(req); 336 break; 337 case NVME_FEAT_ASYNC_EVENT: 338 nvmet_get_feat_async_event(req); 339 break; 340 default: 341 req->error_loc = 342 offsetof(struct nvme_common_command, cdw10); 343 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 344 break; 345 } 346 347 nvmet_req_complete(req, stat); 348 } 349 350 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) 351 { 352 struct nvme_command *cmd = req->cmd; 353 354 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 355 pr_err("got cmd %d while not ready\n", 356 cmd->common.opcode); 357 req->error_loc = 358 offsetof(struct nvme_common_command, opcode); 359 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 360 } 361 362 switch (cmd->common.opcode) { 363 case nvme_admin_set_features: 364 req->execute = nvmet_execute_disc_set_features; 365 return 0; 366 case nvme_admin_get_features: 367 req->execute = nvmet_execute_disc_get_features; 368 return 0; 369 case nvme_admin_async_event: 370 req->execute = nvmet_execute_async_event; 371 return 0; 372 case nvme_admin_keep_alive: 373 req->execute = nvmet_execute_keep_alive; 374 return 0; 375 case nvme_admin_get_log_page: 376 req->execute = nvmet_execute_disc_get_log_page; 377 return 0; 378 case nvme_admin_identify: 379 req->execute = nvmet_execute_disc_identify; 380 return 0; 381 default: 382 pr_debug("unhandled cmd %d\n", cmd->common.opcode); 383 req->error_loc = offsetof(struct nvme_common_command, opcode); 384 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 385 } 386 387 } 388 389 int __init nvmet_init_discovery(void) 390 { 391 nvmet_disc_subsys = 392 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); 393 return PTR_ERR_OR_ZERO(nvmet_disc_subsys); 394 } 395 396 void nvmet_exit_discovery(void) 397 { 398 nvmet_subsys_put(nvmet_disc_subsys); 399 } 400