1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe Over Fabrics Target File I/O commands implementation. 4 * Copyright (c) 2017-2018 Western Digital Corporation or its 5 * affiliates. 6 */ 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 #include <linux/uio.h> 9 #include <linux/falloc.h> 10 #include <linux/file.h> 11 #include "nvmet.h" 12 13 #define NVMET_MAX_MPOOL_BVEC 16 14 #define NVMET_MIN_MPOOL_OBJ 16 15 16 void nvmet_file_ns_disable(struct nvmet_ns *ns) 17 { 18 if (ns->file) { 19 if (ns->buffered_io) 20 flush_workqueue(buffered_io_wq); 21 mempool_destroy(ns->bvec_pool); 22 ns->bvec_pool = NULL; 23 kmem_cache_destroy(ns->bvec_cache); 24 ns->bvec_cache = NULL; 25 fput(ns->file); 26 ns->file = NULL; 27 } 28 } 29 30 int nvmet_file_ns_enable(struct nvmet_ns *ns) 31 { 32 int flags = O_RDWR | O_LARGEFILE; 33 struct kstat stat; 34 int ret; 35 36 if (!ns->buffered_io) 37 flags |= O_DIRECT; 38 39 ns->file = filp_open(ns->device_path, flags, 0); 40 if (IS_ERR(ns->file)) { 41 pr_err("failed to open file %s: (%ld)\n", 42 ns->device_path, PTR_ERR(ns->file)); 43 return PTR_ERR(ns->file); 44 } 45 46 ret = vfs_getattr(&ns->file->f_path, 47 &stat, STATX_SIZE, AT_STATX_FORCE_SYNC); 48 if (ret) 49 goto err; 50 51 ns->size = stat.size; 52 ns->blksize_shift = file_inode(ns->file)->i_blkbits; 53 54 ns->bvec_cache = kmem_cache_create("nvmet-bvec", 55 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 56 0, SLAB_HWCACHE_ALIGN, NULL); 57 if (!ns->bvec_cache) { 58 ret = -ENOMEM; 59 goto err; 60 } 61 62 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, 63 mempool_free_slab, ns->bvec_cache); 64 65 if (!ns->bvec_pool) { 66 ret = -ENOMEM; 67 goto err; 68 } 69 70 return ret; 71 err: 72 ns->size = 0; 73 ns->blksize_shift = 0; 74 nvmet_file_ns_disable(ns); 75 return ret; 76 } 77 78 static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) 79 { 80 bv->bv_page = sg_page_iter_page(iter); 81 bv->bv_offset = iter->sg->offset; 82 bv->bv_len = PAGE_SIZE - iter->sg->offset; 83 } 84 85 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, 86 unsigned long nr_segs, size_t count, int ki_flags) 87 { 88 struct kiocb *iocb = &req->f.iocb; 89 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter); 90 struct iov_iter iter; 91 int rw; 92 93 if (req->cmd->rw.opcode == nvme_cmd_write) { 94 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 95 ki_flags |= IOCB_DSYNC; 96 call_iter = req->ns->file->f_op->write_iter; 97 rw = WRITE; 98 } else { 99 call_iter = req->ns->file->f_op->read_iter; 100 rw = READ; 101 } 102 103 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); 104 105 iocb->ki_pos = pos; 106 iocb->ki_filp = req->ns->file; 107 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); 108 109 return call_iter(iocb, &iter); 110 } 111 112 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) 113 { 114 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); 115 116 if (req->f.bvec != req->inline_bvec) { 117 if (likely(req->f.mpool_alloc == false)) 118 kfree(req->f.bvec); 119 else 120 mempool_free(req->f.bvec, req->ns->bvec_pool); 121 } 122 123 nvmet_req_complete(req, ret != req->data_len ? 124 NVME_SC_INTERNAL | NVME_SC_DNR : 0); 125 } 126 127 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) 128 { 129 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 130 struct sg_page_iter sg_pg_iter; 131 unsigned long bv_cnt = 0; 132 bool is_sync = false; 133 size_t len = 0, total_len = 0; 134 ssize_t ret = 0; 135 loff_t pos; 136 137 138 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) 139 is_sync = true; 140 141 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; 142 if (unlikely(pos + req->data_len > req->ns->size)) { 143 nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); 144 return true; 145 } 146 147 memset(&req->f.iocb, 0, sizeof(struct kiocb)); 148 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { 149 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); 150 len += req->f.bvec[bv_cnt].bv_len; 151 total_len += req->f.bvec[bv_cnt].bv_len; 152 bv_cnt++; 153 154 WARN_ON_ONCE((nr_bvec - 1) < 0); 155 156 if (unlikely(is_sync) && 157 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { 158 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0); 159 if (ret < 0) 160 goto complete; 161 162 pos += len; 163 bv_cnt = 0; 164 len = 0; 165 } 166 nr_bvec--; 167 } 168 169 if (WARN_ON_ONCE(total_len != req->data_len)) { 170 ret = -EIO; 171 goto complete; 172 } 173 174 if (unlikely(is_sync)) { 175 ret = total_len; 176 goto complete; 177 } 178 179 /* 180 * A NULL ki_complete ask for synchronous execution, which we want 181 * for the IOCB_NOWAIT case. 182 */ 183 if (!(ki_flags & IOCB_NOWAIT)) 184 req->f.iocb.ki_complete = nvmet_file_io_done; 185 186 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags); 187 188 switch (ret) { 189 case -EIOCBQUEUED: 190 return true; 191 case -EAGAIN: 192 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT))) 193 goto complete; 194 return false; 195 case -EOPNOTSUPP: 196 /* 197 * For file systems returning error -EOPNOTSUPP, handle 198 * IOCB_NOWAIT error case separately and retry without 199 * IOCB_NOWAIT. 200 */ 201 if ((ki_flags & IOCB_NOWAIT)) 202 return false; 203 break; 204 } 205 206 complete: 207 nvmet_file_io_done(&req->f.iocb, ret, 0); 208 return true; 209 } 210 211 static void nvmet_file_buffered_io_work(struct work_struct *w) 212 { 213 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 214 215 nvmet_file_execute_io(req, 0); 216 } 217 218 static void nvmet_file_submit_buffered_io(struct nvmet_req *req) 219 { 220 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); 221 queue_work(buffered_io_wq, &req->f.work); 222 } 223 224 static void nvmet_file_execute_rw(struct nvmet_req *req) 225 { 226 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 227 228 if (!req->sg_cnt || !nr_bvec) { 229 nvmet_req_complete(req, 0); 230 return; 231 } 232 233 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC) 234 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), 235 GFP_KERNEL); 236 else 237 req->f.bvec = req->inline_bvec; 238 239 if (unlikely(!req->f.bvec)) { 240 /* fallback under memory pressure */ 241 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); 242 req->f.mpool_alloc = true; 243 } else 244 req->f.mpool_alloc = false; 245 246 if (req->ns->buffered_io) { 247 if (likely(!req->f.mpool_alloc) && 248 nvmet_file_execute_io(req, IOCB_NOWAIT)) 249 return; 250 nvmet_file_submit_buffered_io(req); 251 } else 252 nvmet_file_execute_io(req, 0); 253 } 254 255 u16 nvmet_file_flush(struct nvmet_req *req) 256 { 257 if (vfs_fsync(req->ns->file, 1) < 0) 258 return NVME_SC_INTERNAL | NVME_SC_DNR; 259 return 0; 260 } 261 262 static void nvmet_file_flush_work(struct work_struct *w) 263 { 264 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 265 266 nvmet_req_complete(req, nvmet_file_flush(req)); 267 } 268 269 static void nvmet_file_execute_flush(struct nvmet_req *req) 270 { 271 INIT_WORK(&req->f.work, nvmet_file_flush_work); 272 schedule_work(&req->f.work); 273 } 274 275 static void nvmet_file_execute_discard(struct nvmet_req *req) 276 { 277 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 278 struct nvme_dsm_range range; 279 loff_t offset, len; 280 u16 ret; 281 int i; 282 283 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { 284 ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range, 285 sizeof(range)); 286 if (ret) 287 break; 288 289 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; 290 len = le32_to_cpu(range.nlb); 291 len <<= req->ns->blksize_shift; 292 if (offset + len > req->ns->size) { 293 ret = NVME_SC_LBA_RANGE | NVME_SC_DNR; 294 break; 295 } 296 297 if (vfs_fallocate(req->ns->file, mode, offset, len)) { 298 ret = NVME_SC_INTERNAL | NVME_SC_DNR; 299 break; 300 } 301 } 302 303 nvmet_req_complete(req, ret); 304 } 305 306 static void nvmet_file_dsm_work(struct work_struct *w) 307 { 308 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 309 310 switch (le32_to_cpu(req->cmd->dsm.attributes)) { 311 case NVME_DSMGMT_AD: 312 nvmet_file_execute_discard(req); 313 return; 314 case NVME_DSMGMT_IDR: 315 case NVME_DSMGMT_IDW: 316 default: 317 /* Not supported yet */ 318 nvmet_req_complete(req, 0); 319 return; 320 } 321 } 322 323 static void nvmet_file_execute_dsm(struct nvmet_req *req) 324 { 325 INIT_WORK(&req->f.work, nvmet_file_dsm_work); 326 schedule_work(&req->f.work); 327 } 328 329 static void nvmet_file_write_zeroes_work(struct work_struct *w) 330 { 331 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 332 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; 333 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE; 334 loff_t offset; 335 loff_t len; 336 int ret; 337 338 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; 339 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << 340 req->ns->blksize_shift); 341 342 if (unlikely(offset + len > req->ns->size)) { 343 nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); 344 return; 345 } 346 347 ret = vfs_fallocate(req->ns->file, mode, offset, len); 348 nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 349 } 350 351 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) 352 { 353 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); 354 schedule_work(&req->f.work); 355 } 356 357 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) 358 { 359 struct nvme_command *cmd = req->cmd; 360 361 switch (cmd->common.opcode) { 362 case nvme_cmd_read: 363 case nvme_cmd_write: 364 req->execute = nvmet_file_execute_rw; 365 req->data_len = nvmet_rw_len(req); 366 return 0; 367 case nvme_cmd_flush: 368 req->execute = nvmet_file_execute_flush; 369 req->data_len = 0; 370 return 0; 371 case nvme_cmd_dsm: 372 req->execute = nvmet_file_execute_dsm; 373 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * 374 sizeof(struct nvme_dsm_range); 375 return 0; 376 case nvme_cmd_write_zeroes: 377 req->execute = nvmet_file_execute_write_zeroes; 378 req->data_len = 0; 379 return 0; 380 default: 381 pr_err("unhandled cmd for file ns %d on qid %d\n", 382 cmd->common.opcode, req->sq->qid); 383 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 384 } 385 } 386