1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target File I/O commands implementation.
4  * Copyright (c) 2017-2018 Western Digital Corporation or its
5  * affiliates.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/uio.h>
9 #include <linux/falloc.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include "nvmet.h"
13 
14 #define NVMET_MIN_MPOOL_OBJ		16
15 
16 void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
17 {
18 	ns->size = i_size_read(ns->file->f_mapping->host);
19 }
20 
21 void nvmet_file_ns_disable(struct nvmet_ns *ns)
22 {
23 	if (ns->file) {
24 		if (ns->buffered_io)
25 			flush_workqueue(buffered_io_wq);
26 		mempool_destroy(ns->bvec_pool);
27 		ns->bvec_pool = NULL;
28 		fput(ns->file);
29 		ns->file = NULL;
30 	}
31 }
32 
33 int nvmet_file_ns_enable(struct nvmet_ns *ns)
34 {
35 	int flags = O_RDWR | O_LARGEFILE;
36 	int ret = 0;
37 
38 	if (!ns->buffered_io)
39 		flags |= O_DIRECT;
40 
41 	ns->file = filp_open(ns->device_path, flags, 0);
42 	if (IS_ERR(ns->file)) {
43 		ret = PTR_ERR(ns->file);
44 		pr_err("failed to open file %s: (%d)\n",
45 			ns->device_path, ret);
46 		ns->file = NULL;
47 		return ret;
48 	}
49 
50 	nvmet_file_ns_revalidate(ns);
51 
52 	/*
53 	 * i_blkbits can be greater than the universally accepted upper bound,
54 	 * so make sure we export a sane namespace lba_shift.
55 	 */
56 	ns->blksize_shift = min_t(u8,
57 			file_inode(ns->file)->i_blkbits, 12);
58 
59 	ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
60 			mempool_free_slab, nvmet_bvec_cache);
61 
62 	if (!ns->bvec_pool) {
63 		ret = -ENOMEM;
64 		goto err;
65 	}
66 
67 	return ret;
68 err:
69 	fput(ns->file);
70 	ns->file = NULL;
71 	ns->size = 0;
72 	ns->blksize_shift = 0;
73 	return ret;
74 }
75 
76 static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
77 {
78 	bv->bv_page = sg_page(sg);
79 	bv->bv_offset = sg->offset;
80 	bv->bv_len = sg->length;
81 }
82 
83 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
84 		unsigned long nr_segs, size_t count, int ki_flags)
85 {
86 	struct kiocb *iocb = &req->f.iocb;
87 	ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
88 	struct iov_iter iter;
89 	int rw;
90 
91 	if (req->cmd->rw.opcode == nvme_cmd_write) {
92 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
93 			ki_flags |= IOCB_DSYNC;
94 		call_iter = req->ns->file->f_op->write_iter;
95 		rw = ITER_SOURCE;
96 	} else {
97 		call_iter = req->ns->file->f_op->read_iter;
98 		rw = ITER_DEST;
99 	}
100 
101 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
102 
103 	iocb->ki_pos = pos;
104 	iocb->ki_filp = req->ns->file;
105 	iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags;
106 
107 	return call_iter(iocb, &iter);
108 }
109 
110 static void nvmet_file_io_done(struct kiocb *iocb, long ret)
111 {
112 	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
113 	u16 status = NVME_SC_SUCCESS;
114 
115 	if (req->f.bvec != req->inline_bvec) {
116 		if (likely(req->f.mpool_alloc == false))
117 			kfree(req->f.bvec);
118 		else
119 			mempool_free(req->f.bvec, req->ns->bvec_pool);
120 	}
121 
122 	if (unlikely(ret != req->transfer_len))
123 		status = errno_to_nvme_status(req, ret);
124 	nvmet_req_complete(req, status);
125 }
126 
127 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
128 {
129 	ssize_t nr_bvec = req->sg_cnt;
130 	unsigned long bv_cnt = 0;
131 	bool is_sync = false;
132 	size_t len = 0, total_len = 0;
133 	ssize_t ret = 0;
134 	loff_t pos;
135 	int i;
136 	struct scatterlist *sg;
137 
138 	if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
139 		is_sync = true;
140 
141 	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
142 	if (unlikely(pos + req->transfer_len > req->ns->size)) {
143 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
144 		return true;
145 	}
146 
147 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
148 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
149 		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
150 		len += req->f.bvec[bv_cnt].bv_len;
151 		total_len += req->f.bvec[bv_cnt].bv_len;
152 		bv_cnt++;
153 
154 		WARN_ON_ONCE((nr_bvec - 1) < 0);
155 
156 		if (unlikely(is_sync) &&
157 		    (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
158 			ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
159 			if (ret < 0)
160 				goto complete;
161 
162 			pos += len;
163 			bv_cnt = 0;
164 			len = 0;
165 		}
166 		nr_bvec--;
167 	}
168 
169 	if (WARN_ON_ONCE(total_len != req->transfer_len)) {
170 		ret = -EIO;
171 		goto complete;
172 	}
173 
174 	if (unlikely(is_sync)) {
175 		ret = total_len;
176 		goto complete;
177 	}
178 
179 	/*
180 	 * A NULL ki_complete ask for synchronous execution, which we want
181 	 * for the IOCB_NOWAIT case.
182 	 */
183 	if (!(ki_flags & IOCB_NOWAIT))
184 		req->f.iocb.ki_complete = nvmet_file_io_done;
185 
186 	ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
187 
188 	switch (ret) {
189 	case -EIOCBQUEUED:
190 		return true;
191 	case -EAGAIN:
192 		if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
193 			goto complete;
194 		return false;
195 	case -EOPNOTSUPP:
196 		/*
197 		 * For file systems returning error -EOPNOTSUPP, handle
198 		 * IOCB_NOWAIT error case separately and retry without
199 		 * IOCB_NOWAIT.
200 		 */
201 		if ((ki_flags & IOCB_NOWAIT))
202 			return false;
203 		break;
204 	}
205 
206 complete:
207 	nvmet_file_io_done(&req->f.iocb, ret);
208 	return true;
209 }
210 
211 static void nvmet_file_buffered_io_work(struct work_struct *w)
212 {
213 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
214 
215 	nvmet_file_execute_io(req, 0);
216 }
217 
218 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
219 {
220 	INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
221 	queue_work(buffered_io_wq, &req->f.work);
222 }
223 
224 static void nvmet_file_execute_rw(struct nvmet_req *req)
225 {
226 	ssize_t nr_bvec = req->sg_cnt;
227 
228 	if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
229 		return;
230 
231 	if (!req->sg_cnt || !nr_bvec) {
232 		nvmet_req_complete(req, 0);
233 		return;
234 	}
235 
236 	if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
237 		req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
238 				GFP_KERNEL);
239 	else
240 		req->f.bvec = req->inline_bvec;
241 
242 	if (unlikely(!req->f.bvec)) {
243 		/* fallback under memory pressure */
244 		req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
245 		req->f.mpool_alloc = true;
246 	} else
247 		req->f.mpool_alloc = false;
248 
249 	if (req->ns->buffered_io) {
250 		if (likely(!req->f.mpool_alloc) &&
251 		    (req->ns->file->f_mode & FMODE_NOWAIT) &&
252 		    nvmet_file_execute_io(req, IOCB_NOWAIT))
253 			return;
254 		nvmet_file_submit_buffered_io(req);
255 	} else
256 		nvmet_file_execute_io(req, 0);
257 }
258 
259 u16 nvmet_file_flush(struct nvmet_req *req)
260 {
261 	return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
262 }
263 
264 static void nvmet_file_flush_work(struct work_struct *w)
265 {
266 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
267 
268 	nvmet_req_complete(req, nvmet_file_flush(req));
269 }
270 
271 static void nvmet_file_execute_flush(struct nvmet_req *req)
272 {
273 	if (!nvmet_check_transfer_len(req, 0))
274 		return;
275 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
276 	queue_work(nvmet_wq, &req->f.work);
277 }
278 
279 static void nvmet_file_execute_discard(struct nvmet_req *req)
280 {
281 	int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
282 	struct nvme_dsm_range range;
283 	loff_t offset, len;
284 	u16 status = 0;
285 	int ret;
286 	int i;
287 
288 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
289 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
290 					sizeof(range));
291 		if (status)
292 			break;
293 
294 		offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
295 		len = le32_to_cpu(range.nlb);
296 		len <<= req->ns->blksize_shift;
297 		if (offset + len > req->ns->size) {
298 			req->error_slba = le64_to_cpu(range.slba);
299 			status = errno_to_nvme_status(req, -ENOSPC);
300 			break;
301 		}
302 
303 		ret = vfs_fallocate(req->ns->file, mode, offset, len);
304 		if (ret && ret != -EOPNOTSUPP) {
305 			req->error_slba = le64_to_cpu(range.slba);
306 			status = errno_to_nvme_status(req, ret);
307 			break;
308 		}
309 	}
310 
311 	nvmet_req_complete(req, status);
312 }
313 
314 static void nvmet_file_dsm_work(struct work_struct *w)
315 {
316 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
317 
318 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
319 	case NVME_DSMGMT_AD:
320 		nvmet_file_execute_discard(req);
321 		return;
322 	case NVME_DSMGMT_IDR:
323 	case NVME_DSMGMT_IDW:
324 	default:
325 		/* Not supported yet */
326 		nvmet_req_complete(req, 0);
327 		return;
328 	}
329 }
330 
331 static void nvmet_file_execute_dsm(struct nvmet_req *req)
332 {
333 	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
334 		return;
335 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
336 	queue_work(nvmet_wq, &req->f.work);
337 }
338 
339 static void nvmet_file_write_zeroes_work(struct work_struct *w)
340 {
341 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
342 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
343 	int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
344 	loff_t offset;
345 	loff_t len;
346 	int ret;
347 
348 	offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
349 	len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
350 			req->ns->blksize_shift);
351 
352 	if (unlikely(offset + len > req->ns->size)) {
353 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
354 		return;
355 	}
356 
357 	ret = vfs_fallocate(req->ns->file, mode, offset, len);
358 	nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
359 }
360 
361 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
362 {
363 	if (!nvmet_check_transfer_len(req, 0))
364 		return;
365 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
366 	queue_work(nvmet_wq, &req->f.work);
367 }
368 
369 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
370 {
371 	switch (req->cmd->common.opcode) {
372 	case nvme_cmd_read:
373 	case nvme_cmd_write:
374 		req->execute = nvmet_file_execute_rw;
375 		return 0;
376 	case nvme_cmd_flush:
377 		req->execute = nvmet_file_execute_flush;
378 		return 0;
379 	case nvme_cmd_dsm:
380 		req->execute = nvmet_file_execute_dsm;
381 		return 0;
382 	case nvme_cmd_write_zeroes:
383 		req->execute = nvmet_file_execute_write_zeroes;
384 		return 0;
385 	default:
386 		return nvmet_report_invalid_opcode(req);
387 	}
388 }
389