1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Discovery service for the NVMe over Fabrics target. 4 * Copyright (C) 2016 Intel Corporation. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/slab.h> 8 #include <generated/utsrelease.h> 9 #include "nvmet.h" 10 11 struct nvmet_subsys *nvmet_disc_subsys; 12 13 static u64 nvmet_genctr; 14 15 static void __nvmet_disc_changed(struct nvmet_port *port, 16 struct nvmet_ctrl *ctrl) 17 { 18 if (ctrl->port != port) 19 return; 20 21 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) 22 return; 23 24 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 25 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); 26 } 27 28 void nvmet_port_disc_changed(struct nvmet_port *port, 29 struct nvmet_subsys *subsys) 30 { 31 struct nvmet_ctrl *ctrl; 32 33 nvmet_genctr++; 34 35 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { 36 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn)) 37 continue; 38 39 __nvmet_disc_changed(port, ctrl); 40 } 41 } 42 43 static void __nvmet_subsys_disc_changed(struct nvmet_port *port, 44 struct nvmet_subsys *subsys, 45 struct nvmet_host *host) 46 { 47 struct nvmet_ctrl *ctrl; 48 49 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { 50 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn)) 51 continue; 52 53 __nvmet_disc_changed(port, ctrl); 54 } 55 } 56 57 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, 58 struct nvmet_host *host) 59 { 60 struct nvmet_port *port; 61 struct nvmet_subsys_link *s; 62 63 nvmet_genctr++; 64 65 list_for_each_entry(port, nvmet_ports, global_entry) 66 list_for_each_entry(s, &port->subsystems, entry) { 67 if (s->subsys != subsys) 68 continue; 69 __nvmet_subsys_disc_changed(port, subsys, host); 70 } 71 } 72 73 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port) 74 { 75 down_write(&nvmet_config_sem); 76 if (list_empty(&port->entry)) { 77 list_add_tail(&port->entry, &parent->referrals); 78 port->enabled = true; 79 nvmet_port_disc_changed(parent, NULL); 80 } 81 up_write(&nvmet_config_sem); 82 } 83 84 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port) 85 { 86 down_write(&nvmet_config_sem); 87 if (!list_empty(&port->entry)) { 88 port->enabled = false; 89 list_del_init(&port->entry); 90 nvmet_port_disc_changed(parent, NULL); 91 } 92 up_write(&nvmet_config_sem); 93 } 94 95 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, 96 struct nvmet_port *port, char *subsys_nqn, char *traddr, 97 u8 type, u32 numrec) 98 { 99 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; 100 101 e->trtype = port->disc_addr.trtype; 102 e->adrfam = port->disc_addr.adrfam; 103 e->treq = port->disc_addr.treq; 104 e->portid = port->disc_addr.portid; 105 /* we support only dynamic controllers */ 106 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); 107 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH); 108 e->subtype = type; 109 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); 110 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); 111 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); 112 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); 113 } 114 115 /* 116 * nvmet_set_disc_traddr - set a correct discovery log entry traddr 117 * 118 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses 119 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply 120 * must not contain that "any" IP address. If the transport implements 121 * .disc_traddr, use it. this callback will set the discovery traddr 122 * from the req->port address in case the port in question listens 123 * "any" IP address. 124 */ 125 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port, 126 char *traddr) 127 { 128 if (req->ops->disc_traddr) 129 req->ops->disc_traddr(req, port, traddr); 130 else 131 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 132 } 133 134 static size_t discovery_log_entries(struct nvmet_req *req) 135 { 136 struct nvmet_ctrl *ctrl = req->sq->ctrl; 137 struct nvmet_subsys_link *p; 138 struct nvmet_port *r; 139 size_t entries = 0; 140 141 list_for_each_entry(p, &req->port->subsystems, entry) { 142 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 143 continue; 144 entries++; 145 } 146 list_for_each_entry(r, &req->port->referrals, entry) 147 entries++; 148 return entries; 149 } 150 151 static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) 152 { 153 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); 154 struct nvmet_ctrl *ctrl = req->sq->ctrl; 155 struct nvmf_disc_rsp_page_hdr *hdr; 156 u64 offset = nvmet_get_log_page_offset(req->cmd); 157 size_t data_len = nvmet_get_log_page_len(req->cmd); 158 size_t alloc_len; 159 struct nvmet_subsys_link *p; 160 struct nvmet_port *r; 161 u32 numrec = 0; 162 u16 status = 0; 163 void *buffer; 164 165 /* Spec requires dword aligned offsets */ 166 if (offset & 0x3) { 167 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 168 goto out; 169 } 170 171 /* 172 * Make sure we're passing at least a buffer of response header size. 173 * If host provided data len is less than the header size, only the 174 * number of bytes requested by host will be sent to host. 175 */ 176 down_read(&nvmet_config_sem); 177 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); 178 buffer = kzalloc(alloc_len, GFP_KERNEL); 179 if (!buffer) { 180 up_read(&nvmet_config_sem); 181 status = NVME_SC_INTERNAL; 182 goto out; 183 } 184 185 hdr = buffer; 186 list_for_each_entry(p, &req->port->subsystems, entry) { 187 char traddr[NVMF_TRADDR_SIZE]; 188 189 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 190 continue; 191 192 nvmet_set_disc_traddr(req, req->port, traddr); 193 nvmet_format_discovery_entry(hdr, req->port, 194 p->subsys->subsysnqn, traddr, 195 NVME_NQN_NVME, numrec); 196 numrec++; 197 } 198 199 list_for_each_entry(r, &req->port->referrals, entry) { 200 nvmet_format_discovery_entry(hdr, r, 201 NVME_DISC_SUBSYS_NAME, 202 r->disc_addr.traddr, 203 NVME_NQN_DISC, numrec); 204 numrec++; 205 } 206 207 hdr->genctr = cpu_to_le64(nvmet_genctr); 208 hdr->numrec = cpu_to_le64(numrec); 209 hdr->recfmt = cpu_to_le16(0); 210 211 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE); 212 213 up_read(&nvmet_config_sem); 214 215 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); 216 kfree(buffer); 217 out: 218 nvmet_req_complete(req, status); 219 } 220 221 static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req) 222 { 223 struct nvmet_ctrl *ctrl = req->sq->ctrl; 224 struct nvme_id_ctrl *id; 225 u16 status = 0; 226 227 id = kzalloc(sizeof(*id), GFP_KERNEL); 228 if (!id) { 229 status = NVME_SC_INTERNAL; 230 goto out; 231 } 232 233 memset(id->fr, ' ', sizeof(id->fr)); 234 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); 235 236 /* no limit on data transfer sizes for now */ 237 id->mdts = 0; 238 id->cntlid = cpu_to_le16(ctrl->cntlid); 239 id->ver = cpu_to_le32(ctrl->subsys->ver); 240 id->lpa = (1 << 2); 241 242 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 243 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 244 245 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 246 if (ctrl->ops->has_keyed_sgls) 247 id->sgls |= cpu_to_le32(1 << 2); 248 if (req->port->inline_data_size) 249 id->sgls |= cpu_to_le32(1 << 20); 250 251 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL); 252 253 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); 254 255 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 256 257 kfree(id); 258 out: 259 nvmet_req_complete(req, status); 260 } 261 262 static void nvmet_execute_disc_set_features(struct nvmet_req *req) 263 { 264 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 265 u16 stat; 266 267 switch (cdw10 & 0xff) { 268 case NVME_FEAT_KATO: 269 stat = nvmet_set_feat_kato(req); 270 break; 271 case NVME_FEAT_ASYNC_EVENT: 272 stat = nvmet_set_feat_async_event(req, 273 NVMET_DISC_AEN_CFG_OPTIONAL); 274 break; 275 default: 276 req->error_loc = 277 offsetof(struct nvme_common_command, cdw10); 278 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 279 break; 280 } 281 282 nvmet_req_complete(req, stat); 283 } 284 285 static void nvmet_execute_disc_get_features(struct nvmet_req *req) 286 { 287 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 288 u16 stat = 0; 289 290 switch (cdw10 & 0xff) { 291 case NVME_FEAT_KATO: 292 nvmet_get_feat_kato(req); 293 break; 294 case NVME_FEAT_ASYNC_EVENT: 295 nvmet_get_feat_async_event(req); 296 break; 297 default: 298 req->error_loc = 299 offsetof(struct nvme_common_command, cdw10); 300 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 301 break; 302 } 303 304 nvmet_req_complete(req, stat); 305 } 306 307 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) 308 { 309 struct nvme_command *cmd = req->cmd; 310 311 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 312 pr_err("got cmd %d while not ready\n", 313 cmd->common.opcode); 314 req->error_loc = 315 offsetof(struct nvme_common_command, opcode); 316 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 317 } 318 319 switch (cmd->common.opcode) { 320 case nvme_admin_set_features: 321 req->execute = nvmet_execute_disc_set_features; 322 req->data_len = 0; 323 return 0; 324 case nvme_admin_get_features: 325 req->execute = nvmet_execute_disc_get_features; 326 req->data_len = 0; 327 return 0; 328 case nvme_admin_async_event: 329 req->execute = nvmet_execute_async_event; 330 req->data_len = 0; 331 return 0; 332 case nvme_admin_keep_alive: 333 req->execute = nvmet_execute_keep_alive; 334 req->data_len = 0; 335 return 0; 336 case nvme_admin_get_log_page: 337 req->data_len = nvmet_get_log_page_len(cmd); 338 339 switch (cmd->get_log_page.lid) { 340 case NVME_LOG_DISC: 341 req->execute = nvmet_execute_get_disc_log_page; 342 return 0; 343 default: 344 pr_err("unsupported get_log_page lid %d\n", 345 cmd->get_log_page.lid); 346 req->error_loc = 347 offsetof(struct nvme_get_log_page_command, lid); 348 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 349 } 350 case nvme_admin_identify: 351 req->data_len = NVME_IDENTIFY_DATA_SIZE; 352 switch (cmd->identify.cns) { 353 case NVME_ID_CNS_CTRL: 354 req->execute = 355 nvmet_execute_identify_disc_ctrl; 356 return 0; 357 default: 358 pr_err("unsupported identify cns %d\n", 359 cmd->identify.cns); 360 req->error_loc = offsetof(struct nvme_identify, cns); 361 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 362 } 363 default: 364 pr_err("unhandled cmd %d\n", cmd->common.opcode); 365 req->error_loc = offsetof(struct nvme_common_command, opcode); 366 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 367 } 368 369 } 370 371 int __init nvmet_init_discovery(void) 372 { 373 nvmet_disc_subsys = 374 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); 375 if (!nvmet_disc_subsys) 376 return -ENOMEM; 377 return 0; 378 } 379 380 void nvmet_exit_discovery(void) 381 { 382 nvmet_subsys_put(nvmet_disc_subsys); 383 } 384