xref: /openbmc/linux/block/bsg-lib.c (revision 1ab142d4)
1 /*
2  *  BSG helper library
3  *
4  *  Copyright (C) 2008   James Smart, Emulex Corporation
5  *  Copyright (C) 2011   Red Hat, Inc.  All rights reserved.
6  *  Copyright (C) 2011   Mike Christie
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/scatterlist.h>
27 #include <linux/bsg-lib.h>
28 #include <linux/export.h>
29 #include <scsi/scsi_cmnd.h>
30 
31 /**
32  * bsg_destroy_job - routine to teardown/delete a bsg job
33  * @job: bsg_job that is to be torn down
34  */
35 static void bsg_destroy_job(struct bsg_job *job)
36 {
37 	put_device(job->dev);	/* release reference for the request */
38 
39 	kfree(job->request_payload.sg_list);
40 	kfree(job->reply_payload.sg_list);
41 	kfree(job);
42 }
43 
44 /**
45  * bsg_job_done - completion routine for bsg requests
46  * @job: bsg_job that is complete
47  * @result: job reply result
48  * @reply_payload_rcv_len: length of payload recvd
49  *
50  * The LLD should call this when the bsg job has completed.
51  */
52 void bsg_job_done(struct bsg_job *job, int result,
53 		  unsigned int reply_payload_rcv_len)
54 {
55 	struct request *req = job->req;
56 	struct request *rsp = req->next_rq;
57 	int err;
58 
59 	err = job->req->errors = result;
60 	if (err < 0)
61 		/* we're only returning the result field in the reply */
62 		job->req->sense_len = sizeof(u32);
63 	else
64 		job->req->sense_len = job->reply_len;
65 	/* we assume all request payload was transferred, residual == 0 */
66 	req->resid_len = 0;
67 
68 	if (rsp) {
69 		WARN_ON(reply_payload_rcv_len > rsp->resid_len);
70 
71 		/* set reply (bidi) residual */
72 		rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
73 	}
74 	blk_complete_request(req);
75 }
76 EXPORT_SYMBOL_GPL(bsg_job_done);
77 
78 /**
79  * bsg_softirq_done - softirq done routine for destroying the bsg requests
80  * @rq: BSG request that holds the job to be destroyed
81  */
82 static void bsg_softirq_done(struct request *rq)
83 {
84 	struct bsg_job *job = rq->special;
85 
86 	blk_end_request_all(rq, rq->errors);
87 	bsg_destroy_job(job);
88 }
89 
90 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
91 {
92 	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
93 
94 	BUG_ON(!req->nr_phys_segments);
95 
96 	buf->sg_list = kzalloc(sz, GFP_KERNEL);
97 	if (!buf->sg_list)
98 		return -ENOMEM;
99 	sg_init_table(buf->sg_list, req->nr_phys_segments);
100 	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
101 	buf->payload_len = blk_rq_bytes(req);
102 	return 0;
103 }
104 
105 /**
106  * bsg_create_job - create the bsg_job structure for the bsg request
107  * @dev: device that is being sent the bsg request
108  * @req: BSG request that needs a job structure
109  */
110 static int bsg_create_job(struct device *dev, struct request *req)
111 {
112 	struct request *rsp = req->next_rq;
113 	struct request_queue *q = req->q;
114 	struct bsg_job *job;
115 	int ret;
116 
117 	BUG_ON(req->special);
118 
119 	job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
120 	if (!job)
121 		return -ENOMEM;
122 
123 	req->special = job;
124 	job->req = req;
125 	if (q->bsg_job_size)
126 		job->dd_data = (void *)&job[1];
127 	job->request = req->cmd;
128 	job->request_len = req->cmd_len;
129 	job->reply = req->sense;
130 	job->reply_len = SCSI_SENSE_BUFFERSIZE;	/* Size of sense buffer
131 						 * allocated */
132 	if (req->bio) {
133 		ret = bsg_map_buffer(&job->request_payload, req);
134 		if (ret)
135 			goto failjob_rls_job;
136 	}
137 	if (rsp && rsp->bio) {
138 		ret = bsg_map_buffer(&job->reply_payload, rsp);
139 		if (ret)
140 			goto failjob_rls_rqst_payload;
141 	}
142 	job->dev = dev;
143 	/* take a reference for the request */
144 	get_device(job->dev);
145 	return 0;
146 
147 failjob_rls_rqst_payload:
148 	kfree(job->request_payload.sg_list);
149 failjob_rls_job:
150 	kfree(job);
151 	return -ENOMEM;
152 }
153 
154 /*
155  * bsg_goose_queue - restart queue in case it was stopped
156  * @q: request q to be restarted
157  */
158 void bsg_goose_queue(struct request_queue *q)
159 {
160 	if (!q)
161 		return;
162 
163 	blk_run_queue_async(q);
164 }
165 EXPORT_SYMBOL_GPL(bsg_goose_queue);
166 
167 /**
168  * bsg_request_fn - generic handler for bsg requests
169  * @q: request queue to manage
170  *
171  * On error the create_bsg_job function should return a -Exyz error value
172  * that will be set to the req->errors.
173  *
174  * Drivers/subsys should pass this to the queue init function.
175  */
176 void bsg_request_fn(struct request_queue *q)
177 {
178 	struct device *dev = q->queuedata;
179 	struct request *req;
180 	struct bsg_job *job;
181 	int ret;
182 
183 	if (!get_device(dev))
184 		return;
185 
186 	while (1) {
187 		req = blk_fetch_request(q);
188 		if (!req)
189 			break;
190 		spin_unlock_irq(q->queue_lock);
191 
192 		ret = bsg_create_job(dev, req);
193 		if (ret) {
194 			req->errors = ret;
195 			blk_end_request_all(req, ret);
196 			spin_lock_irq(q->queue_lock);
197 			continue;
198 		}
199 
200 		job = req->special;
201 		ret = q->bsg_job_fn(job);
202 		spin_lock_irq(q->queue_lock);
203 		if (ret)
204 			break;
205 	}
206 
207 	spin_unlock_irq(q->queue_lock);
208 	put_device(dev);
209 	spin_lock_irq(q->queue_lock);
210 }
211 EXPORT_SYMBOL_GPL(bsg_request_fn);
212 
213 /**
214  * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
215  * @dev: device to attach bsg device to
216  * @q: request queue setup by caller
217  * @name: device to give bsg device
218  * @job_fn: bsg job handler
219  * @dd_job_size: size of LLD data needed for each job
220  *
221  * The caller should have setup the reuqest queue with bsg_request_fn
222  * as the request_fn.
223  */
224 int bsg_setup_queue(struct device *dev, struct request_queue *q,
225 		    char *name, bsg_job_fn *job_fn, int dd_job_size)
226 {
227 	int ret;
228 
229 	q->queuedata = dev;
230 	q->bsg_job_size = dd_job_size;
231 	q->bsg_job_fn = job_fn;
232 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
233 	blk_queue_softirq_done(q, bsg_softirq_done);
234 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
235 
236 	ret = bsg_register_queue(q, dev, name, NULL);
237 	if (ret) {
238 		printk(KERN_ERR "%s: bsg interface failed to "
239 		       "initialize - register queue\n", dev->kobj.name);
240 		return ret;
241 	}
242 
243 	return 0;
244 }
245 EXPORT_SYMBOL_GPL(bsg_setup_queue);
246 
247 /**
248  * bsg_remove_queue - Deletes the bsg dev from the q
249  * @q:	the request_queue that is to be torn down.
250  *
251  * Notes:
252  *   Before unregistering the queue empty any requests that are blocked
253  */
254 void bsg_remove_queue(struct request_queue *q)
255 {
256 	struct request *req; /* block request */
257 	int counts; /* totals for request_list count and starved */
258 
259 	if (!q)
260 		return;
261 
262 	/* Stop taking in new requests */
263 	spin_lock_irq(q->queue_lock);
264 	blk_stop_queue(q);
265 
266 	/* drain all requests in the queue */
267 	while (1) {
268 		/* need the lock to fetch a request
269 		 * this may fetch the same reqeust as the previous pass
270 		 */
271 		req = blk_fetch_request(q);
272 		/* save requests in use and starved */
273 		counts = q->rq.count[0] + q->rq.count[1] +
274 			 q->rq.starved[0] + q->rq.starved[1];
275 		spin_unlock_irq(q->queue_lock);
276 		/* any requests still outstanding? */
277 		if (counts == 0)
278 			break;
279 
280 		/* This may be the same req as the previous iteration,
281 		 * always send the blk_end_request_all after a prefetch.
282 		 * It is not okay to not end the request because the
283 		 * prefetch started the request.
284 		 */
285 		if (req) {
286 			/* return -ENXIO to indicate that this queue is
287 			 * going away
288 			 */
289 			req->errors = -ENXIO;
290 			blk_end_request_all(req, -ENXIO);
291 		}
292 
293 		msleep(200); /* allow bsg to possibly finish */
294 		spin_lock_irq(q->queue_lock);
295 	}
296 	bsg_unregister_queue(q);
297 }
298 EXPORT_SYMBOL_GPL(bsg_remove_queue);
299