1 /*
2  * NVMe I/O command implementation.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/blkdev.h>
16 #include <linux/module.h>
17 #include "nvmet.h"
18 
19 int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
20 {
21 	int ret;
22 
23 	ns->bdev = blkdev_get_by_path(ns->device_path,
24 			FMODE_READ | FMODE_WRITE, NULL);
25 	if (IS_ERR(ns->bdev)) {
26 		ret = PTR_ERR(ns->bdev);
27 		if (ret != -ENOTBLK) {
28 			pr_err("failed to open block device %s: (%ld)\n",
29 					ns->device_path, PTR_ERR(ns->bdev));
30 		}
31 		ns->bdev = NULL;
32 		return ret;
33 	}
34 	ns->size = i_size_read(ns->bdev->bd_inode);
35 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
36 	return 0;
37 }
38 
39 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
40 {
41 	if (ns->bdev) {
42 		blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
43 		ns->bdev = NULL;
44 	}
45 }
46 
47 static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
48 {
49 	u16 status = NVME_SC_SUCCESS;
50 
51 	if (likely(blk_sts == BLK_STS_OK))
52 		return status;
53 	/*
54 	 * Right now there exists M : 1 mapping between block layer error
55 	 * to the NVMe status code (see nvme_error_status()). For consistency,
56 	 * when we reverse map we use most appropriate NVMe Status code from
57 	 * the group of the NVMe staus codes used in the nvme_error_status().
58 	 */
59 	switch (blk_sts) {
60 	case BLK_STS_NOSPC:
61 		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
62 		req->error_loc = offsetof(struct nvme_rw_command, length);
63 		break;
64 	case BLK_STS_TARGET:
65 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
66 		req->error_loc = offsetof(struct nvme_rw_command, slba);
67 		break;
68 	case BLK_STS_NOTSUPP:
69 		req->error_loc = offsetof(struct nvme_common_command, opcode);
70 		switch (req->cmd->common.opcode) {
71 		case nvme_cmd_dsm:
72 		case nvme_cmd_write_zeroes:
73 			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
74 			break;
75 		default:
76 			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
77 		}
78 		break;
79 	case BLK_STS_MEDIUM:
80 		status = NVME_SC_ACCESS_DENIED;
81 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
82 		break;
83 	case BLK_STS_IOERR:
84 		/* fallthru */
85 	default:
86 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
87 		req->error_loc = offsetof(struct nvme_common_command, opcode);
88 	}
89 
90 	switch (req->cmd->common.opcode) {
91 	case nvme_cmd_read:
92 	case nvme_cmd_write:
93 		req->error_slba = le64_to_cpu(req->cmd->rw.slba);
94 		break;
95 	case nvme_cmd_write_zeroes:
96 		req->error_slba =
97 			le64_to_cpu(req->cmd->write_zeroes.slba);
98 		break;
99 	default:
100 		req->error_slba = 0;
101 	}
102 	return status;
103 }
104 
105 static void nvmet_bio_done(struct bio *bio)
106 {
107 	struct nvmet_req *req = bio->bi_private;
108 
109 	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
110 	if (bio != &req->b.inline_bio)
111 		bio_put(bio);
112 }
113 
114 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
115 {
116 	int sg_cnt = req->sg_cnt;
117 	struct bio *bio;
118 	struct scatterlist *sg;
119 	sector_t sector;
120 	int op, op_flags = 0, i;
121 
122 	if (!req->sg_cnt) {
123 		nvmet_req_complete(req, 0);
124 		return;
125 	}
126 
127 	if (req->cmd->rw.opcode == nvme_cmd_write) {
128 		op = REQ_OP_WRITE;
129 		op_flags = REQ_SYNC | REQ_IDLE;
130 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
131 			op_flags |= REQ_FUA;
132 	} else {
133 		op = REQ_OP_READ;
134 	}
135 
136 	if (is_pci_p2pdma_page(sg_page(req->sg)))
137 		op_flags |= REQ_NOMERGE;
138 
139 	sector = le64_to_cpu(req->cmd->rw.slba);
140 	sector <<= (req->ns->blksize_shift - 9);
141 
142 	if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
143 		bio = &req->b.inline_bio;
144 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
145 	} else {
146 		bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
147 	}
148 	bio_set_dev(bio, req->ns->bdev);
149 	bio->bi_iter.bi_sector = sector;
150 	bio->bi_private = req;
151 	bio->bi_end_io = nvmet_bio_done;
152 	bio_set_op_attrs(bio, op, op_flags);
153 
154 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
155 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
156 				!= sg->length) {
157 			struct bio *prev = bio;
158 
159 			bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
160 			bio_set_dev(bio, req->ns->bdev);
161 			bio->bi_iter.bi_sector = sector;
162 			bio_set_op_attrs(bio, op, op_flags);
163 
164 			bio_chain(bio, prev);
165 			submit_bio(prev);
166 		}
167 
168 		sector += sg->length >> 9;
169 		sg_cnt--;
170 	}
171 
172 	submit_bio(bio);
173 }
174 
175 static void nvmet_bdev_execute_flush(struct nvmet_req *req)
176 {
177 	struct bio *bio = &req->b.inline_bio;
178 
179 	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
180 	bio_set_dev(bio, req->ns->bdev);
181 	bio->bi_private = req;
182 	bio->bi_end_io = nvmet_bio_done;
183 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
184 
185 	submit_bio(bio);
186 }
187 
188 u16 nvmet_bdev_flush(struct nvmet_req *req)
189 {
190 	if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
191 		return NVME_SC_INTERNAL | NVME_SC_DNR;
192 	return 0;
193 }
194 
195 static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
196 		struct nvme_dsm_range *range, struct bio **bio)
197 {
198 	struct nvmet_ns *ns = req->ns;
199 	int ret;
200 
201 	ret = __blkdev_issue_discard(ns->bdev,
202 			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
203 			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
204 			GFP_KERNEL, 0, bio);
205 
206 	if (ret)
207 		req->error_slba = le64_to_cpu(range->slba);
208 
209 	return blk_to_nvme_status(req, errno_to_blk_status(ret));
210 }
211 
212 static void nvmet_bdev_execute_discard(struct nvmet_req *req)
213 {
214 	struct nvme_dsm_range range;
215 	struct bio *bio = NULL;
216 	int i;
217 	u16 status;
218 
219 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
220 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
221 				sizeof(range));
222 		if (status)
223 			break;
224 
225 		status = nvmet_bdev_discard_range(req, &range, &bio);
226 		if (status)
227 			break;
228 	}
229 
230 	if (bio) {
231 		bio->bi_private = req;
232 		bio->bi_end_io = nvmet_bio_done;
233 		if (status) {
234 			bio->bi_status = BLK_STS_IOERR;
235 			bio_endio(bio);
236 		} else {
237 			submit_bio(bio);
238 		}
239 	} else {
240 		nvmet_req_complete(req, status);
241 	}
242 }
243 
244 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
245 {
246 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
247 	case NVME_DSMGMT_AD:
248 		nvmet_bdev_execute_discard(req);
249 		return;
250 	case NVME_DSMGMT_IDR:
251 	case NVME_DSMGMT_IDW:
252 	default:
253 		/* Not supported yet */
254 		nvmet_req_complete(req, 0);
255 		return;
256 	}
257 }
258 
259 static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
260 {
261 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
262 	struct bio *bio = NULL;
263 	u16 status = NVME_SC_SUCCESS;
264 	sector_t sector;
265 	sector_t nr_sector;
266 	int ret;
267 
268 	sector = le64_to_cpu(write_zeroes->slba) <<
269 		(req->ns->blksize_shift - 9);
270 	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
271 		(req->ns->blksize_shift - 9));
272 
273 	ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
274 			GFP_KERNEL, &bio, 0);
275 	status = blk_to_nvme_status(req, errno_to_blk_status(ret));
276 	if (bio) {
277 		bio->bi_private = req;
278 		bio->bi_end_io = nvmet_bio_done;
279 		submit_bio(bio);
280 	} else {
281 		nvmet_req_complete(req, status);
282 	}
283 }
284 
285 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
286 {
287 	struct nvme_command *cmd = req->cmd;
288 
289 	switch (cmd->common.opcode) {
290 	case nvme_cmd_read:
291 	case nvme_cmd_write:
292 		req->execute = nvmet_bdev_execute_rw;
293 		req->data_len = nvmet_rw_len(req);
294 		return 0;
295 	case nvme_cmd_flush:
296 		req->execute = nvmet_bdev_execute_flush;
297 		req->data_len = 0;
298 		return 0;
299 	case nvme_cmd_dsm:
300 		req->execute = nvmet_bdev_execute_dsm;
301 		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
302 			sizeof(struct nvme_dsm_range);
303 		return 0;
304 	case nvme_cmd_write_zeroes:
305 		req->execute = nvmet_bdev_execute_write_zeroes;
306 		return 0;
307 	default:
308 		pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
309 		       req->sq->qid);
310 		req->error_loc = offsetof(struct nvme_common_command, opcode);
311 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
312 	}
313 }
314