xref: /openbmc/linux/block/bsg-lib.c (revision ec5c05e5ac8bcb4a6bcd92970e15494a85400d34)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  BSG helper library
4   *
5   *  Copyright (C) 2008   James Smart, Emulex Corporation
6   *  Copyright (C) 2011   Red Hat, Inc.  All rights reserved.
7   *  Copyright (C) 2011   Mike Christie
8   */
9  #include <linux/bsg.h>
10  #include <linux/slab.h>
11  #include <linux/blk-mq.h>
12  #include <linux/delay.h>
13  #include <linux/scatterlist.h>
14  #include <linux/bsg-lib.h>
15  #include <linux/export.h>
16  #include <scsi/scsi_cmnd.h>
17  #include <scsi/sg.h>
18  
19  #define uptr64(val) ((void __user *)(uintptr_t)(val))
20  
21  struct bsg_set {
22  	struct blk_mq_tag_set	tag_set;
23  	struct bsg_device	*bd;
24  	bsg_job_fn		*job_fn;
25  	bsg_timeout_fn		*timeout_fn;
26  };
27  
28  static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
29  		fmode_t mode, unsigned int timeout)
30  {
31  	struct bsg_job *job;
32  	struct request *rq;
33  	struct bio *bio;
34  	void *reply;
35  	int ret;
36  
37  	if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
38  	    hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
39  		return -EINVAL;
40  	if (!capable(CAP_SYS_RAWIO))
41  		return -EPERM;
42  
43  	rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
44  			     REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
45  	if (IS_ERR(rq))
46  		return PTR_ERR(rq);
47  	rq->timeout = timeout;
48  
49  	job = blk_mq_rq_to_pdu(rq);
50  	reply = job->reply;
51  	memset(job, 0, sizeof(*job));
52  	job->reply = reply;
53  	job->reply_len = SCSI_SENSE_BUFFERSIZE;
54  	job->dd_data = job + 1;
55  
56  	job->request_len = hdr->request_len;
57  	job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
58  	if (IS_ERR(job->request)) {
59  		ret = PTR_ERR(job->request);
60  		goto out_free_rq;
61  	}
62  
63  	if (hdr->dout_xfer_len && hdr->din_xfer_len) {
64  		job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
65  		if (IS_ERR(job->bidi_rq)) {
66  			ret = PTR_ERR(job->bidi_rq);
67  			goto out_free_job_request;
68  		}
69  
70  		ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
71  				uptr64(hdr->din_xferp), hdr->din_xfer_len,
72  				GFP_KERNEL);
73  		if (ret)
74  			goto out_free_bidi_rq;
75  
76  		job->bidi_bio = job->bidi_rq->bio;
77  	} else {
78  		job->bidi_rq = NULL;
79  		job->bidi_bio = NULL;
80  	}
81  
82  	ret = 0;
83  	if (hdr->dout_xfer_len) {
84  		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
85  				hdr->dout_xfer_len, GFP_KERNEL);
86  	} else if (hdr->din_xfer_len) {
87  		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
88  				hdr->din_xfer_len, GFP_KERNEL);
89  	}
90  
91  	if (ret)
92  		goto out_unmap_bidi_rq;
93  
94  	bio = rq->bio;
95  	blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
96  
97  	/*
98  	 * The assignments below don't make much sense, but are kept for
99  	 * bug by bug backwards compatibility:
100  	 */
101  	hdr->device_status = job->result & 0xff;
102  	hdr->transport_status = host_byte(job->result);
103  	hdr->driver_status = 0;
104  	hdr->info = 0;
105  	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
106  		hdr->info |= SG_INFO_CHECK;
107  	hdr->response_len = 0;
108  
109  	if (job->result < 0) {
110  		/* we're only returning the result field in the reply */
111  		job->reply_len = sizeof(u32);
112  		ret = job->result;
113  	}
114  
115  	if (job->reply_len && hdr->response) {
116  		int len = min(hdr->max_response_len, job->reply_len);
117  
118  		if (copy_to_user(uptr64(hdr->response), job->reply, len))
119  			ret = -EFAULT;
120  		else
121  			hdr->response_len = len;
122  	}
123  
124  	/* we assume all request payload was transferred, residual == 0 */
125  	hdr->dout_resid = 0;
126  
127  	if (job->bidi_rq) {
128  		unsigned int rsp_len = job->reply_payload.payload_len;
129  
130  		if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
131  			hdr->din_resid = 0;
132  		else
133  			hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
134  	} else {
135  		hdr->din_resid = 0;
136  	}
137  
138  	blk_rq_unmap_user(bio);
139  out_unmap_bidi_rq:
140  	if (job->bidi_rq)
141  		blk_rq_unmap_user(job->bidi_bio);
142  out_free_bidi_rq:
143  	if (job->bidi_rq)
144  		blk_mq_free_request(job->bidi_rq);
145  out_free_job_request:
146  	kfree(job->request);
147  out_free_rq:
148  	blk_mq_free_request(rq);
149  	return ret;
150  }
151  
152  /**
153   * bsg_teardown_job - routine to teardown a bsg job
154   * @kref: kref inside bsg_job that is to be torn down
155   */
156  static void bsg_teardown_job(struct kref *kref)
157  {
158  	struct bsg_job *job = container_of(kref, struct bsg_job, kref);
159  	struct request *rq = blk_mq_rq_from_pdu(job);
160  
161  	put_device(job->dev);	/* release reference for the request */
162  
163  	kfree(job->request_payload.sg_list);
164  	kfree(job->reply_payload.sg_list);
165  
166  	blk_mq_end_request(rq, BLK_STS_OK);
167  }
168  
169  void bsg_job_put(struct bsg_job *job)
170  {
171  	kref_put(&job->kref, bsg_teardown_job);
172  }
173  EXPORT_SYMBOL_GPL(bsg_job_put);
174  
175  int bsg_job_get(struct bsg_job *job)
176  {
177  	return kref_get_unless_zero(&job->kref);
178  }
179  EXPORT_SYMBOL_GPL(bsg_job_get);
180  
181  /**
182   * bsg_job_done - completion routine for bsg requests
183   * @job: bsg_job that is complete
184   * @result: job reply result
185   * @reply_payload_rcv_len: length of payload recvd
186   *
187   * The LLD should call this when the bsg job has completed.
188   */
189  void bsg_job_done(struct bsg_job *job, int result,
190  		  unsigned int reply_payload_rcv_len)
191  {
192  	struct request *rq = blk_mq_rq_from_pdu(job);
193  
194  	job->result = result;
195  	job->reply_payload_rcv_len = reply_payload_rcv_len;
196  	if (likely(!blk_should_fake_timeout(rq->q)))
197  		blk_mq_complete_request(rq);
198  }
199  EXPORT_SYMBOL_GPL(bsg_job_done);
200  
201  /**
202   * bsg_complete - softirq done routine for destroying the bsg requests
203   * @rq: BSG request that holds the job to be destroyed
204   */
205  static void bsg_complete(struct request *rq)
206  {
207  	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
208  
209  	bsg_job_put(job);
210  }
211  
212  static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
213  {
214  	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
215  
216  	BUG_ON(!req->nr_phys_segments);
217  
218  	buf->sg_list = kmalloc(sz, GFP_KERNEL);
219  	if (!buf->sg_list)
220  		return -ENOMEM;
221  	sg_init_table(buf->sg_list, req->nr_phys_segments);
222  	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
223  	buf->payload_len = blk_rq_bytes(req);
224  	return 0;
225  }
226  
227  /**
228   * bsg_prepare_job - create the bsg_job structure for the bsg request
229   * @dev: device that is being sent the bsg request
230   * @req: BSG request that needs a job structure
231   */
232  static bool bsg_prepare_job(struct device *dev, struct request *req)
233  {
234  	struct bsg_job *job = blk_mq_rq_to_pdu(req);
235  	int ret;
236  
237  	job->timeout = req->timeout;
238  
239  	if (req->bio) {
240  		ret = bsg_map_buffer(&job->request_payload, req);
241  		if (ret)
242  			goto failjob_rls_job;
243  	}
244  	if (job->bidi_rq) {
245  		ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
246  		if (ret)
247  			goto failjob_rls_rqst_payload;
248  	}
249  	job->dev = dev;
250  	/* take a reference for the request */
251  	get_device(job->dev);
252  	kref_init(&job->kref);
253  	return true;
254  
255  failjob_rls_rqst_payload:
256  	kfree(job->request_payload.sg_list);
257  failjob_rls_job:
258  	job->result = -ENOMEM;
259  	return false;
260  }
261  
262  /**
263   * bsg_queue_rq - generic handler for bsg requests
264   * @hctx: hardware queue
265   * @bd: queue data
266   *
267   * On error the create_bsg_job function should return a -Exyz error value
268   * that will be set to ->result.
269   *
270   * Drivers/subsys should pass this to the queue init function.
271   */
272  static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
273  				 const struct blk_mq_queue_data *bd)
274  {
275  	struct request_queue *q = hctx->queue;
276  	struct device *dev = q->queuedata;
277  	struct request *req = bd->rq;
278  	struct bsg_set *bset =
279  		container_of(q->tag_set, struct bsg_set, tag_set);
280  	blk_status_t sts = BLK_STS_IOERR;
281  	int ret;
282  
283  	blk_mq_start_request(req);
284  
285  	if (!get_device(dev))
286  		return BLK_STS_IOERR;
287  
288  	if (!bsg_prepare_job(dev, req))
289  		goto out;
290  
291  	ret = bset->job_fn(blk_mq_rq_to_pdu(req));
292  	if (!ret)
293  		sts = BLK_STS_OK;
294  
295  out:
296  	put_device(dev);
297  	return sts;
298  }
299  
300  /* called right after the request is allocated for the request_queue */
301  static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
302  		       unsigned int hctx_idx, unsigned int numa_node)
303  {
304  	struct bsg_job *job = blk_mq_rq_to_pdu(req);
305  
306  	job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
307  	if (!job->reply)
308  		return -ENOMEM;
309  	return 0;
310  }
311  
312  static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
313  		       unsigned int hctx_idx)
314  {
315  	struct bsg_job *job = blk_mq_rq_to_pdu(req);
316  
317  	kfree(job->reply);
318  }
319  
320  void bsg_remove_queue(struct request_queue *q)
321  {
322  	if (q) {
323  		struct bsg_set *bset =
324  			container_of(q->tag_set, struct bsg_set, tag_set);
325  
326  		bsg_unregister_queue(bset->bd);
327  		blk_mq_destroy_queue(q);
328  		blk_put_queue(q);
329  		blk_mq_free_tag_set(&bset->tag_set);
330  		kfree(bset);
331  	}
332  }
333  EXPORT_SYMBOL_GPL(bsg_remove_queue);
334  
335  static enum blk_eh_timer_return bsg_timeout(struct request *rq)
336  {
337  	struct bsg_set *bset =
338  		container_of(rq->q->tag_set, struct bsg_set, tag_set);
339  
340  	if (!bset->timeout_fn)
341  		return BLK_EH_DONE;
342  	return bset->timeout_fn(rq);
343  }
344  
345  static const struct blk_mq_ops bsg_mq_ops = {
346  	.queue_rq		= bsg_queue_rq,
347  	.init_request		= bsg_init_rq,
348  	.exit_request		= bsg_exit_rq,
349  	.complete		= bsg_complete,
350  	.timeout		= bsg_timeout,
351  };
352  
353  /**
354   * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
355   * @dev: device to attach bsg device to
356   * @name: device to give bsg device
357   * @job_fn: bsg job handler
358   * @timeout: timeout handler function pointer
359   * @dd_job_size: size of LLD data needed for each job
360   */
361  struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
362  		bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
363  {
364  	struct bsg_set *bset;
365  	struct blk_mq_tag_set *set;
366  	struct request_queue *q;
367  	int ret = -ENOMEM;
368  
369  	bset = kzalloc(sizeof(*bset), GFP_KERNEL);
370  	if (!bset)
371  		return ERR_PTR(-ENOMEM);
372  
373  	bset->job_fn = job_fn;
374  	bset->timeout_fn = timeout;
375  
376  	set = &bset->tag_set;
377  	set->ops = &bsg_mq_ops;
378  	set->nr_hw_queues = 1;
379  	set->queue_depth = 128;
380  	set->numa_node = NUMA_NO_NODE;
381  	set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
382  	set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
383  	if (blk_mq_alloc_tag_set(set))
384  		goto out_tag_set;
385  
386  	q = blk_mq_init_queue(set);
387  	if (IS_ERR(q)) {
388  		ret = PTR_ERR(q);
389  		goto out_queue;
390  	}
391  
392  	q->queuedata = dev;
393  	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
394  
395  	bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
396  	if (IS_ERR(bset->bd)) {
397  		ret = PTR_ERR(bset->bd);
398  		goto out_cleanup_queue;
399  	}
400  
401  	return q;
402  out_cleanup_queue:
403  	blk_mq_destroy_queue(q);
404  	blk_put_queue(q);
405  out_queue:
406  	blk_mq_free_tag_set(set);
407  out_tag_set:
408  	kfree(bset);
409  	return ERR_PTR(ret);
410  }
411  EXPORT_SYMBOL_GPL(bsg_setup_queue);
412