1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe Over Fabrics Target File I/O commands implementation. 4 * Copyright (c) 2017-2018 Western Digital Corporation or its 5 * affiliates. 6 */ 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 #include <linux/uio.h> 9 #include <linux/falloc.h> 10 #include <linux/file.h> 11 #include "nvmet.h" 12 13 #define NVMET_MAX_MPOOL_BVEC 16 14 #define NVMET_MIN_MPOOL_OBJ 16 15 16 void nvmet_file_ns_disable(struct nvmet_ns *ns) 17 { 18 if (ns->file) { 19 if (ns->buffered_io) 20 flush_workqueue(buffered_io_wq); 21 mempool_destroy(ns->bvec_pool); 22 ns->bvec_pool = NULL; 23 kmem_cache_destroy(ns->bvec_cache); 24 ns->bvec_cache = NULL; 25 fput(ns->file); 26 ns->file = NULL; 27 } 28 } 29 30 int nvmet_file_ns_enable(struct nvmet_ns *ns) 31 { 32 int flags = O_RDWR | O_LARGEFILE; 33 struct kstat stat; 34 int ret; 35 36 if (!ns->buffered_io) 37 flags |= O_DIRECT; 38 39 ns->file = filp_open(ns->device_path, flags, 0); 40 if (IS_ERR(ns->file)) { 41 pr_err("failed to open file %s: (%ld)\n", 42 ns->device_path, PTR_ERR(ns->file)); 43 return PTR_ERR(ns->file); 44 } 45 46 ret = vfs_getattr(&ns->file->f_path, 47 &stat, STATX_SIZE, AT_STATX_FORCE_SYNC); 48 if (ret) 49 goto err; 50 51 ns->size = stat.size; 52 /* 53 * i_blkbits can be greater than the universally accepted upper bound, 54 * so make sure we export a sane namespace lba_shift. 55 */ 56 ns->blksize_shift = min_t(u8, 57 file_inode(ns->file)->i_blkbits, 12); 58 59 ns->bvec_cache = kmem_cache_create("nvmet-bvec", 60 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 61 0, SLAB_HWCACHE_ALIGN, NULL); 62 if (!ns->bvec_cache) { 63 ret = -ENOMEM; 64 goto err; 65 } 66 67 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, 68 mempool_free_slab, ns->bvec_cache); 69 70 if (!ns->bvec_pool) { 71 ret = -ENOMEM; 72 goto err; 73 } 74 75 return ret; 76 err: 77 ns->size = 0; 78 ns->blksize_shift = 0; 79 nvmet_file_ns_disable(ns); 80 return ret; 81 } 82 83 static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg) 84 { 85 bv->bv_page = sg_page(sg); 86 bv->bv_offset = sg->offset; 87 bv->bv_len = sg->length; 88 } 89 90 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, 91 unsigned long nr_segs, size_t count, int ki_flags) 92 { 93 struct kiocb *iocb = &req->f.iocb; 94 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter); 95 struct iov_iter iter; 96 int rw; 97 98 if (req->cmd->rw.opcode == nvme_cmd_write) { 99 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 100 ki_flags |= IOCB_DSYNC; 101 call_iter = req->ns->file->f_op->write_iter; 102 rw = WRITE; 103 } else { 104 call_iter = req->ns->file->f_op->read_iter; 105 rw = READ; 106 } 107 108 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); 109 110 iocb->ki_pos = pos; 111 iocb->ki_filp = req->ns->file; 112 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); 113 114 return call_iter(iocb, &iter); 115 } 116 117 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) 118 { 119 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); 120 u16 status = NVME_SC_SUCCESS; 121 122 if (req->f.bvec != req->inline_bvec) { 123 if (likely(req->f.mpool_alloc == false)) 124 kfree(req->f.bvec); 125 else 126 mempool_free(req->f.bvec, req->ns->bvec_pool); 127 } 128 129 if (unlikely(ret != req->transfer_len)) 130 status = errno_to_nvme_status(req, ret); 131 nvmet_req_complete(req, status); 132 } 133 134 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) 135 { 136 ssize_t nr_bvec = req->sg_cnt; 137 unsigned long bv_cnt = 0; 138 bool is_sync = false; 139 size_t len = 0, total_len = 0; 140 ssize_t ret = 0; 141 loff_t pos; 142 int i; 143 struct scatterlist *sg; 144 145 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) 146 is_sync = true; 147 148 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; 149 if (unlikely(pos + req->transfer_len > req->ns->size)) { 150 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); 151 return true; 152 } 153 154 memset(&req->f.iocb, 0, sizeof(struct kiocb)); 155 for_each_sg(req->sg, sg, req->sg_cnt, i) { 156 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); 157 len += req->f.bvec[bv_cnt].bv_len; 158 total_len += req->f.bvec[bv_cnt].bv_len; 159 bv_cnt++; 160 161 WARN_ON_ONCE((nr_bvec - 1) < 0); 162 163 if (unlikely(is_sync) && 164 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { 165 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0); 166 if (ret < 0) 167 goto complete; 168 169 pos += len; 170 bv_cnt = 0; 171 len = 0; 172 } 173 nr_bvec--; 174 } 175 176 if (WARN_ON_ONCE(total_len != req->transfer_len)) { 177 ret = -EIO; 178 goto complete; 179 } 180 181 if (unlikely(is_sync)) { 182 ret = total_len; 183 goto complete; 184 } 185 186 /* 187 * A NULL ki_complete ask for synchronous execution, which we want 188 * for the IOCB_NOWAIT case. 189 */ 190 if (!(ki_flags & IOCB_NOWAIT)) 191 req->f.iocb.ki_complete = nvmet_file_io_done; 192 193 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags); 194 195 switch (ret) { 196 case -EIOCBQUEUED: 197 return true; 198 case -EAGAIN: 199 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT))) 200 goto complete; 201 return false; 202 case -EOPNOTSUPP: 203 /* 204 * For file systems returning error -EOPNOTSUPP, handle 205 * IOCB_NOWAIT error case separately and retry without 206 * IOCB_NOWAIT. 207 */ 208 if ((ki_flags & IOCB_NOWAIT)) 209 return false; 210 break; 211 } 212 213 complete: 214 nvmet_file_io_done(&req->f.iocb, ret, 0); 215 return true; 216 } 217 218 static void nvmet_file_buffered_io_work(struct work_struct *w) 219 { 220 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 221 222 nvmet_file_execute_io(req, 0); 223 } 224 225 static void nvmet_file_submit_buffered_io(struct nvmet_req *req) 226 { 227 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); 228 queue_work(buffered_io_wq, &req->f.work); 229 } 230 231 static void nvmet_file_execute_rw(struct nvmet_req *req) 232 { 233 ssize_t nr_bvec = req->sg_cnt; 234 235 if (!nvmet_check_data_len(req, nvmet_rw_len(req))) 236 return; 237 238 if (!req->sg_cnt || !nr_bvec) { 239 nvmet_req_complete(req, 0); 240 return; 241 } 242 243 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC) 244 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), 245 GFP_KERNEL); 246 else 247 req->f.bvec = req->inline_bvec; 248 249 if (unlikely(!req->f.bvec)) { 250 /* fallback under memory pressure */ 251 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); 252 req->f.mpool_alloc = true; 253 } else 254 req->f.mpool_alloc = false; 255 256 if (req->ns->buffered_io) { 257 if (likely(!req->f.mpool_alloc) && 258 nvmet_file_execute_io(req, IOCB_NOWAIT)) 259 return; 260 nvmet_file_submit_buffered_io(req); 261 } else 262 nvmet_file_execute_io(req, 0); 263 } 264 265 u16 nvmet_file_flush(struct nvmet_req *req) 266 { 267 return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1)); 268 } 269 270 static void nvmet_file_flush_work(struct work_struct *w) 271 { 272 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 273 274 nvmet_req_complete(req, nvmet_file_flush(req)); 275 } 276 277 static void nvmet_file_execute_flush(struct nvmet_req *req) 278 { 279 if (!nvmet_check_data_len(req, 0)) 280 return; 281 INIT_WORK(&req->f.work, nvmet_file_flush_work); 282 schedule_work(&req->f.work); 283 } 284 285 static void nvmet_file_execute_discard(struct nvmet_req *req) 286 { 287 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 288 struct nvme_dsm_range range; 289 loff_t offset, len; 290 u16 status = 0; 291 int ret; 292 int i; 293 294 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { 295 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, 296 sizeof(range)); 297 if (status) 298 break; 299 300 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; 301 len = le32_to_cpu(range.nlb); 302 len <<= req->ns->blksize_shift; 303 if (offset + len > req->ns->size) { 304 req->error_slba = le64_to_cpu(range.slba); 305 status = errno_to_nvme_status(req, -ENOSPC); 306 break; 307 } 308 309 ret = vfs_fallocate(req->ns->file, mode, offset, len); 310 if (ret && ret != -EOPNOTSUPP) { 311 req->error_slba = le64_to_cpu(range.slba); 312 status = errno_to_nvme_status(req, ret); 313 break; 314 } 315 } 316 317 nvmet_req_complete(req, status); 318 } 319 320 static void nvmet_file_dsm_work(struct work_struct *w) 321 { 322 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 323 324 switch (le32_to_cpu(req->cmd->dsm.attributes)) { 325 case NVME_DSMGMT_AD: 326 nvmet_file_execute_discard(req); 327 return; 328 case NVME_DSMGMT_IDR: 329 case NVME_DSMGMT_IDW: 330 default: 331 /* Not supported yet */ 332 nvmet_req_complete(req, 0); 333 return; 334 } 335 } 336 337 static void nvmet_file_execute_dsm(struct nvmet_req *req) 338 { 339 if (!nvmet_check_data_len(req, nvmet_dsm_len(req))) 340 return; 341 INIT_WORK(&req->f.work, nvmet_file_dsm_work); 342 schedule_work(&req->f.work); 343 } 344 345 static void nvmet_file_write_zeroes_work(struct work_struct *w) 346 { 347 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 348 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; 349 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE; 350 loff_t offset; 351 loff_t len; 352 int ret; 353 354 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; 355 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << 356 req->ns->blksize_shift); 357 358 if (unlikely(offset + len > req->ns->size)) { 359 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); 360 return; 361 } 362 363 ret = vfs_fallocate(req->ns->file, mode, offset, len); 364 nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0); 365 } 366 367 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) 368 { 369 if (!nvmet_check_data_len(req, 0)) 370 return; 371 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); 372 schedule_work(&req->f.work); 373 } 374 375 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) 376 { 377 struct nvme_command *cmd = req->cmd; 378 379 switch (cmd->common.opcode) { 380 case nvme_cmd_read: 381 case nvme_cmd_write: 382 req->execute = nvmet_file_execute_rw; 383 return 0; 384 case nvme_cmd_flush: 385 req->execute = nvmet_file_execute_flush; 386 return 0; 387 case nvme_cmd_dsm: 388 req->execute = nvmet_file_execute_dsm; 389 return 0; 390 case nvme_cmd_write_zeroes: 391 req->execute = nvmet_file_execute_write_zeroes; 392 return 0; 393 default: 394 pr_err("unhandled cmd for file ns %d on qid %d\n", 395 cmd->common.opcode, req->sq->qid); 396 req->error_loc = offsetof(struct nvme_common_command, opcode); 397 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 398 } 399 } 400