1a497ee34SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0-or-later
2aa387cc8SMike Christie /*
3aa387cc8SMike Christie * BSG helper library
4aa387cc8SMike Christie *
5aa387cc8SMike Christie * Copyright (C) 2008 James Smart, Emulex Corporation
6aa387cc8SMike Christie * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
7aa387cc8SMike Christie * Copyright (C) 2011 Mike Christie
8aa387cc8SMike Christie */
9ead09dd3SChristoph Hellwig #include <linux/bsg.h>
10aa387cc8SMike Christie #include <linux/slab.h>
11cd2f076fSJens Axboe #include <linux/blk-mq.h>
12aa387cc8SMike Christie #include <linux/delay.h>
13aa387cc8SMike Christie #include <linux/scatterlist.h>
14aa387cc8SMike Christie #include <linux/bsg-lib.h>
156adb1236SPaul Gortmaker #include <linux/export.h>
16aa387cc8SMike Christie #include <scsi/scsi_cmnd.h>
1717cb960fSChristoph Hellwig #include <scsi/sg.h>
1817cb960fSChristoph Hellwig
1917cb960fSChristoph Hellwig #define uptr64(val) ((void __user *)(uintptr_t)(val))
2017cb960fSChristoph Hellwig
211028e4b3SJens Axboe struct bsg_set {
221028e4b3SJens Axboe struct blk_mq_tag_set tag_set;
23ead09dd3SChristoph Hellwig struct bsg_device *bd;
241028e4b3SJens Axboe bsg_job_fn *job_fn;
251028e4b3SJens Axboe bsg_timeout_fn *timeout_fn;
261028e4b3SJens Axboe };
271028e4b3SJens Axboe
bsg_transport_sg_io_fn(struct request_queue * q,struct sg_io_v4 * hdr,bool open_for_write,unsigned int timeout)2875ca5640SChristoph Hellwig static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
29*1991299eSChristoph Hellwig bool open_for_write, unsigned int timeout)
3017cb960fSChristoph Hellwig {
3175ca5640SChristoph Hellwig struct bsg_job *job;
3275ca5640SChristoph Hellwig struct request *rq;
3375ca5640SChristoph Hellwig struct bio *bio;
34237ea160SChristoph Hellwig void *reply;
3575ca5640SChristoph Hellwig int ret;
3675ca5640SChristoph Hellwig
3717cb960fSChristoph Hellwig if (hdr->protocol != BSG_PROTOCOL_SCSI ||
3817cb960fSChristoph Hellwig hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
3917cb960fSChristoph Hellwig return -EINVAL;
4017cb960fSChristoph Hellwig if (!capable(CAP_SYS_RAWIO))
4117cb960fSChristoph Hellwig return -EPERM;
4217cb960fSChristoph Hellwig
43237ea160SChristoph Hellwig rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
4475ca5640SChristoph Hellwig REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
4575ca5640SChristoph Hellwig if (IS_ERR(rq))
4675ca5640SChristoph Hellwig return PTR_ERR(rq);
4775ca5640SChristoph Hellwig rq->timeout = timeout;
4817cb960fSChristoph Hellwig
4975ca5640SChristoph Hellwig job = blk_mq_rq_to_pdu(rq);
50237ea160SChristoph Hellwig reply = job->reply;
51237ea160SChristoph Hellwig memset(job, 0, sizeof(*job));
52237ea160SChristoph Hellwig job->reply = reply;
53237ea160SChristoph Hellwig job->reply_len = SCSI_SENSE_BUFFERSIZE;
54237ea160SChristoph Hellwig job->dd_data = job + 1;
55237ea160SChristoph Hellwig
5617cb960fSChristoph Hellwig job->request_len = hdr->request_len;
5717cb960fSChristoph Hellwig job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
5875ca5640SChristoph Hellwig if (IS_ERR(job->request)) {
5975ca5640SChristoph Hellwig ret = PTR_ERR(job->request);
60237ea160SChristoph Hellwig goto out_free_rq;
6175ca5640SChristoph Hellwig }
6247255491Szhong jiang
63972248e9SChristoph Hellwig if (hdr->dout_xfer_len && hdr->din_xfer_len) {
64237ea160SChristoph Hellwig job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
65972248e9SChristoph Hellwig if (IS_ERR(job->bidi_rq)) {
66972248e9SChristoph Hellwig ret = PTR_ERR(job->bidi_rq);
6775ca5640SChristoph Hellwig goto out_free_job_request;
68972248e9SChristoph Hellwig }
69972248e9SChristoph Hellwig
70972248e9SChristoph Hellwig ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
71972248e9SChristoph Hellwig uptr64(hdr->din_xferp), hdr->din_xfer_len,
72972248e9SChristoph Hellwig GFP_KERNEL);
73972248e9SChristoph Hellwig if (ret)
74972248e9SChristoph Hellwig goto out_free_bidi_rq;
75972248e9SChristoph Hellwig
76972248e9SChristoph Hellwig job->bidi_bio = job->bidi_rq->bio;
77972248e9SChristoph Hellwig } else {
78972248e9SChristoph Hellwig job->bidi_rq = NULL;
79972248e9SChristoph Hellwig job->bidi_bio = NULL;
80972248e9SChristoph Hellwig }
81972248e9SChristoph Hellwig
82659a3784SChristoph Hellwig ret = 0;
8375ca5640SChristoph Hellwig if (hdr->dout_xfer_len) {
8475ca5640SChristoph Hellwig ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
8575ca5640SChristoph Hellwig hdr->dout_xfer_len, GFP_KERNEL);
8675ca5640SChristoph Hellwig } else if (hdr->din_xfer_len) {
8775ca5640SChristoph Hellwig ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
8875ca5640SChristoph Hellwig hdr->din_xfer_len, GFP_KERNEL);
8917cb960fSChristoph Hellwig }
9017cb960fSChristoph Hellwig
9175ca5640SChristoph Hellwig if (ret)
9275ca5640SChristoph Hellwig goto out_unmap_bidi_rq;
9375ca5640SChristoph Hellwig
9475ca5640SChristoph Hellwig bio = rq->bio;
95b84ba30bSChristoph Hellwig blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
9617cb960fSChristoph Hellwig
9717cb960fSChristoph Hellwig /*
9817cb960fSChristoph Hellwig * The assignments below don't make much sense, but are kept for
9917cb960fSChristoph Hellwig * bug by bug backwards compatibility:
10017cb960fSChristoph Hellwig */
10117cb960fSChristoph Hellwig hdr->device_status = job->result & 0xff;
10217cb960fSChristoph Hellwig hdr->transport_status = host_byte(job->result);
10354c29086SHannes Reinecke hdr->driver_status = 0;
10417cb960fSChristoph Hellwig hdr->info = 0;
10517cb960fSChristoph Hellwig if (hdr->device_status || hdr->transport_status || hdr->driver_status)
10617cb960fSChristoph Hellwig hdr->info |= SG_INFO_CHECK;
10717cb960fSChristoph Hellwig hdr->response_len = 0;
10817cb960fSChristoph Hellwig
10917cb960fSChristoph Hellwig if (job->result < 0) {
11017cb960fSChristoph Hellwig /* we're only returning the result field in the reply */
11117cb960fSChristoph Hellwig job->reply_len = sizeof(u32);
11217cb960fSChristoph Hellwig ret = job->result;
11317cb960fSChristoph Hellwig }
11417cb960fSChristoph Hellwig
11517cb960fSChristoph Hellwig if (job->reply_len && hdr->response) {
11617cb960fSChristoph Hellwig int len = min(hdr->max_response_len, job->reply_len);
11717cb960fSChristoph Hellwig
11817cb960fSChristoph Hellwig if (copy_to_user(uptr64(hdr->response), job->reply, len))
11917cb960fSChristoph Hellwig ret = -EFAULT;
12017cb960fSChristoph Hellwig else
12117cb960fSChristoph Hellwig hdr->response_len = len;
12217cb960fSChristoph Hellwig }
12317cb960fSChristoph Hellwig
12417cb960fSChristoph Hellwig /* we assume all request payload was transferred, residual == 0 */
12517cb960fSChristoph Hellwig hdr->dout_resid = 0;
12617cb960fSChristoph Hellwig
127972248e9SChristoph Hellwig if (job->bidi_rq) {
12817cb960fSChristoph Hellwig unsigned int rsp_len = job->reply_payload.payload_len;
12917cb960fSChristoph Hellwig
13017cb960fSChristoph Hellwig if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
13117cb960fSChristoph Hellwig hdr->din_resid = 0;
13217cb960fSChristoph Hellwig else
13317cb960fSChristoph Hellwig hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
13417cb960fSChristoph Hellwig } else {
13517cb960fSChristoph Hellwig hdr->din_resid = 0;
13617cb960fSChristoph Hellwig }
13717cb960fSChristoph Hellwig
13875ca5640SChristoph Hellwig blk_rq_unmap_user(bio);
13975ca5640SChristoph Hellwig out_unmap_bidi_rq:
14075ca5640SChristoph Hellwig if (job->bidi_rq)
14175ca5640SChristoph Hellwig blk_rq_unmap_user(job->bidi_bio);
14275ca5640SChristoph Hellwig out_free_bidi_rq:
14375ca5640SChristoph Hellwig if (job->bidi_rq)
144237ea160SChristoph Hellwig blk_mq_free_request(job->bidi_rq);
14575ca5640SChristoph Hellwig out_free_job_request:
14675ca5640SChristoph Hellwig kfree(job->request);
147237ea160SChristoph Hellwig out_free_rq:
148237ea160SChristoph Hellwig blk_mq_free_request(rq);
14917cb960fSChristoph Hellwig return ret;
15017cb960fSChristoph Hellwig }
15117cb960fSChristoph Hellwig
152aa387cc8SMike Christie /**
15350b4d485SBenjamin Block * bsg_teardown_job - routine to teardown a bsg job
154aa98192dSBart Van Assche * @kref: kref inside bsg_job that is to be torn down
155aa387cc8SMike Christie */
bsg_teardown_job(struct kref * kref)15650b4d485SBenjamin Block static void bsg_teardown_job(struct kref *kref)
157aa387cc8SMike Christie {
158bf0f2d38SJohannes Thumshirn struct bsg_job *job = container_of(kref, struct bsg_job, kref);
159ef6fa64fSChristoph Hellwig struct request *rq = blk_mq_rq_from_pdu(job);
160c00da4c9SJohannes Thumshirn
161aa387cc8SMike Christie put_device(job->dev); /* release reference for the request */
162aa387cc8SMike Christie
163aa387cc8SMike Christie kfree(job->request_payload.sg_list);
164aa387cc8SMike Christie kfree(job->reply_payload.sg_list);
16550b4d485SBenjamin Block
166cd2f076fSJens Axboe blk_mq_end_request(rq, BLK_STS_OK);
167aa387cc8SMike Christie }
168aa387cc8SMike Christie
bsg_job_put(struct bsg_job * job)169fb6f7c8dSJohannes Thumshirn void bsg_job_put(struct bsg_job *job)
170fb6f7c8dSJohannes Thumshirn {
17150b4d485SBenjamin Block kref_put(&job->kref, bsg_teardown_job);
172fb6f7c8dSJohannes Thumshirn }
173fb6f7c8dSJohannes Thumshirn EXPORT_SYMBOL_GPL(bsg_job_put);
174fb6f7c8dSJohannes Thumshirn
bsg_job_get(struct bsg_job * job)175fb6f7c8dSJohannes Thumshirn int bsg_job_get(struct bsg_job *job)
176fb6f7c8dSJohannes Thumshirn {
177fb6f7c8dSJohannes Thumshirn return kref_get_unless_zero(&job->kref);
178fb6f7c8dSJohannes Thumshirn }
179fb6f7c8dSJohannes Thumshirn EXPORT_SYMBOL_GPL(bsg_job_get);
180aa387cc8SMike Christie
181aa387cc8SMike Christie /**
182aa387cc8SMike Christie * bsg_job_done - completion routine for bsg requests
183aa387cc8SMike Christie * @job: bsg_job that is complete
184aa387cc8SMike Christie * @result: job reply result
185aa387cc8SMike Christie * @reply_payload_rcv_len: length of payload recvd
186aa387cc8SMike Christie *
187aa387cc8SMike Christie * The LLD should call this when the bsg job has completed.
188aa387cc8SMike Christie */
bsg_job_done(struct bsg_job * job,int result,unsigned int reply_payload_rcv_len)189aa387cc8SMike Christie void bsg_job_done(struct bsg_job *job, int result,
190aa387cc8SMike Christie unsigned int reply_payload_rcv_len)
191aa387cc8SMike Christie {
19215f73f5bSChristoph Hellwig struct request *rq = blk_mq_rq_from_pdu(job);
19315f73f5bSChristoph Hellwig
19417cb960fSChristoph Hellwig job->result = result;
19517cb960fSChristoph Hellwig job->reply_payload_rcv_len = reply_payload_rcv_len;
19615f73f5bSChristoph Hellwig if (likely(!blk_should_fake_timeout(rq->q)))
19715f73f5bSChristoph Hellwig blk_mq_complete_request(rq);
198aa387cc8SMike Christie }
199aa387cc8SMike Christie EXPORT_SYMBOL_GPL(bsg_job_done);
200aa387cc8SMike Christie
201aa387cc8SMike Christie /**
202cd2f076fSJens Axboe * bsg_complete - softirq done routine for destroying the bsg requests
203aa387cc8SMike Christie * @rq: BSG request that holds the job to be destroyed
204aa387cc8SMike Christie */
bsg_complete(struct request * rq)205cd2f076fSJens Axboe static void bsg_complete(struct request *rq)
206aa387cc8SMike Christie {
20750b4d485SBenjamin Block struct bsg_job *job = blk_mq_rq_to_pdu(rq);
208aa387cc8SMike Christie
209fb6f7c8dSJohannes Thumshirn bsg_job_put(job);
210aa387cc8SMike Christie }
211aa387cc8SMike Christie
bsg_map_buffer(struct bsg_buffer * buf,struct request * req)212aa387cc8SMike Christie static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
213aa387cc8SMike Christie {
214aa387cc8SMike Christie size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
215aa387cc8SMike Christie
216aa387cc8SMike Christie BUG_ON(!req->nr_phys_segments);
217aa387cc8SMike Christie
218f952eefeSJulia Lawall buf->sg_list = kmalloc(sz, GFP_KERNEL);
219aa387cc8SMike Christie if (!buf->sg_list)
220aa387cc8SMike Christie return -ENOMEM;
221aa387cc8SMike Christie sg_init_table(buf->sg_list, req->nr_phys_segments);
222aa387cc8SMike Christie buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
223aa387cc8SMike Christie buf->payload_len = blk_rq_bytes(req);
224aa387cc8SMike Christie return 0;
225aa387cc8SMike Christie }
226aa387cc8SMike Christie
227aa387cc8SMike Christie /**
22850b4d485SBenjamin Block * bsg_prepare_job - create the bsg_job structure for the bsg request
229aa387cc8SMike Christie * @dev: device that is being sent the bsg request
230aa387cc8SMike Christie * @req: BSG request that needs a job structure
231aa387cc8SMike Christie */
bsg_prepare_job(struct device * dev,struct request * req)23217cb960fSChristoph Hellwig static bool bsg_prepare_job(struct device *dev, struct request *req)
233aa387cc8SMike Christie {
23450b4d485SBenjamin Block struct bsg_job *job = blk_mq_rq_to_pdu(req);
235aa387cc8SMike Christie int ret;
236aa387cc8SMike Christie
23731156ec3SChristoph Hellwig job->timeout = req->timeout;
23850b4d485SBenjamin Block
239aa387cc8SMike Christie if (req->bio) {
240aa387cc8SMike Christie ret = bsg_map_buffer(&job->request_payload, req);
241aa387cc8SMike Christie if (ret)
242aa387cc8SMike Christie goto failjob_rls_job;
243aa387cc8SMike Christie }
244972248e9SChristoph Hellwig if (job->bidi_rq) {
245972248e9SChristoph Hellwig ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
246aa387cc8SMike Christie if (ret)
247aa387cc8SMike Christie goto failjob_rls_rqst_payload;
248aa387cc8SMike Christie }
249aa387cc8SMike Christie job->dev = dev;
250aa387cc8SMike Christie /* take a reference for the request */
251aa387cc8SMike Christie get_device(job->dev);
252bf0f2d38SJohannes Thumshirn kref_init(&job->kref);
25317cb960fSChristoph Hellwig return true;
254aa387cc8SMike Christie
255aa387cc8SMike Christie failjob_rls_rqst_payload:
256aa387cc8SMike Christie kfree(job->request_payload.sg_list);
257aa387cc8SMike Christie failjob_rls_job:
25817cb960fSChristoph Hellwig job->result = -ENOMEM;
25917cb960fSChristoph Hellwig return false;
260aa387cc8SMike Christie }
261aa387cc8SMike Christie
262aa387cc8SMike Christie /**
263cd2f076fSJens Axboe * bsg_queue_rq - generic handler for bsg requests
264cd2f076fSJens Axboe * @hctx: hardware queue
265cd2f076fSJens Axboe * @bd: queue data
266aa387cc8SMike Christie *
267aa387cc8SMike Christie * On error the create_bsg_job function should return a -Exyz error value
26817d5363bSChristoph Hellwig * that will be set to ->result.
269aa387cc8SMike Christie *
270aa387cc8SMike Christie * Drivers/subsys should pass this to the queue init function.
271aa387cc8SMike Christie */
bsg_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)272cd2f076fSJens Axboe static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
273cd2f076fSJens Axboe const struct blk_mq_queue_data *bd)
274aa387cc8SMike Christie {
275cd2f076fSJens Axboe struct request_queue *q = hctx->queue;
276aa387cc8SMike Christie struct device *dev = q->queuedata;
277cd2f076fSJens Axboe struct request *req = bd->rq;
2781028e4b3SJens Axboe struct bsg_set *bset =
2791028e4b3SJens Axboe container_of(q->tag_set, struct bsg_set, tag_set);
280c44a4edbSBart Van Assche blk_status_t sts = BLK_STS_IOERR;
281aa387cc8SMike Christie int ret;
282aa387cc8SMike Christie
283cd2f076fSJens Axboe blk_mq_start_request(req);
284cd2f076fSJens Axboe
285aa387cc8SMike Christie if (!get_device(dev))
286cd2f076fSJens Axboe return BLK_STS_IOERR;
287aa387cc8SMike Christie
288cd2f076fSJens Axboe if (!bsg_prepare_job(dev, req))
289d46fe2cbSMartin Wilck goto out;
290aa387cc8SMike Christie
2911028e4b3SJens Axboe ret = bset->job_fn(blk_mq_rq_to_pdu(req));
292d46fe2cbSMartin Wilck if (!ret)
293d46fe2cbSMartin Wilck sts = BLK_STS_OK;
294aa387cc8SMike Christie
295d46fe2cbSMartin Wilck out:
296aa387cc8SMike Christie put_device(dev);
297d46fe2cbSMartin Wilck return sts;
298aa387cc8SMike Christie }
299aa387cc8SMike Christie
30017cb960fSChristoph Hellwig /* called right after the request is allocated for the request_queue */
bsg_init_rq(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)301cd2f076fSJens Axboe static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
302cd2f076fSJens Axboe unsigned int hctx_idx, unsigned int numa_node)
30350b4d485SBenjamin Block {
30450b4d485SBenjamin Block struct bsg_job *job = blk_mq_rq_to_pdu(req);
30550b4d485SBenjamin Block
306cd2f076fSJens Axboe job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
30717cb960fSChristoph Hellwig if (!job->reply)
30850b4d485SBenjamin Block return -ENOMEM;
309eab40cf3SBenjamin Block return 0;
310eab40cf3SBenjamin Block }
311eab40cf3SBenjamin Block
bsg_exit_rq(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx)312cd2f076fSJens Axboe static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
313cd2f076fSJens Axboe unsigned int hctx_idx)
31450b4d485SBenjamin Block {
31550b4d485SBenjamin Block struct bsg_job *job = blk_mq_rq_to_pdu(req);
31650b4d485SBenjamin Block
31717cb960fSChristoph Hellwig kfree(job->reply);
31850b4d485SBenjamin Block }
31950b4d485SBenjamin Block
bsg_remove_queue(struct request_queue * q)3205e28b8d8SJens Axboe void bsg_remove_queue(struct request_queue *q)
3215e28b8d8SJens Axboe {
3225e28b8d8SJens Axboe if (q) {
3231028e4b3SJens Axboe struct bsg_set *bset =
3241028e4b3SJens Axboe container_of(q->tag_set, struct bsg_set, tag_set);
325cd2f076fSJens Axboe
326ead09dd3SChristoph Hellwig bsg_unregister_queue(bset->bd);
3276f8191fdSChristoph Hellwig blk_mq_destroy_queue(q);
3282b3f056fSChristoph Hellwig blk_put_queue(q);
3291028e4b3SJens Axboe blk_mq_free_tag_set(&bset->tag_set);
3301028e4b3SJens Axboe kfree(bset);
3315e28b8d8SJens Axboe }
3325e28b8d8SJens Axboe }
3335e28b8d8SJens Axboe EXPORT_SYMBOL_GPL(bsg_remove_queue);
3345e28b8d8SJens Axboe
bsg_timeout(struct request * rq)3359bdb4833SJohn Garry static enum blk_eh_timer_return bsg_timeout(struct request *rq)
336cd2f076fSJens Axboe {
3371028e4b3SJens Axboe struct bsg_set *bset =
3381028e4b3SJens Axboe container_of(rq->q->tag_set, struct bsg_set, tag_set);
339cd2f076fSJens Axboe
3401028e4b3SJens Axboe if (!bset->timeout_fn)
3411028e4b3SJens Axboe return BLK_EH_DONE;
3421028e4b3SJens Axboe return bset->timeout_fn(rq);
343cd2f076fSJens Axboe }
344cd2f076fSJens Axboe
345cd2f076fSJens Axboe static const struct blk_mq_ops bsg_mq_ops = {
346cd2f076fSJens Axboe .queue_rq = bsg_queue_rq,
347cd2f076fSJens Axboe .init_request = bsg_init_rq,
348cd2f076fSJens Axboe .exit_request = bsg_exit_rq,
349cd2f076fSJens Axboe .complete = bsg_complete,
350cd2f076fSJens Axboe .timeout = bsg_timeout,
351cd2f076fSJens Axboe };
352cd2f076fSJens Axboe
353aa387cc8SMike Christie /**
354aa387cc8SMike Christie * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
355aa387cc8SMike Christie * @dev: device to attach bsg device to
356aa387cc8SMike Christie * @name: device to give bsg device
357aa387cc8SMike Christie * @job_fn: bsg job handler
358a0b77e36SBart Van Assche * @timeout: timeout handler function pointer
359aa387cc8SMike Christie * @dd_job_size: size of LLD data needed for each job
360aa387cc8SMike Christie */
bsg_setup_queue(struct device * dev,const char * name,bsg_job_fn * job_fn,bsg_timeout_fn * timeout,int dd_job_size)361c1225f01SChristoph Hellwig struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
3621028e4b3SJens Axboe bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
363aa387cc8SMike Christie {
3641028e4b3SJens Axboe struct bsg_set *bset;
365cd2f076fSJens Axboe struct blk_mq_tag_set *set;
3668ae94eb6SChristoph Hellwig struct request_queue *q;
367cd2f076fSJens Axboe int ret = -ENOMEM;
368aa387cc8SMike Christie
3691028e4b3SJens Axboe bset = kzalloc(sizeof(*bset), GFP_KERNEL);
3701028e4b3SJens Axboe if (!bset)
3718ae94eb6SChristoph Hellwig return ERR_PTR(-ENOMEM);
37282ed4db4SChristoph Hellwig
3731028e4b3SJens Axboe bset->job_fn = job_fn;
3741028e4b3SJens Axboe bset->timeout_fn = timeout;
3751028e4b3SJens Axboe
3761028e4b3SJens Axboe set = &bset->tag_set;
37703ef5941SXu Wang set->ops = &bsg_mq_ops;
378cd2f076fSJens Axboe set->nr_hw_queues = 1;
379cd2f076fSJens Axboe set->queue_depth = 128;
380cd2f076fSJens Axboe set->numa_node = NUMA_NO_NODE;
381cd2f076fSJens Axboe set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
382cd2f076fSJens Axboe set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
383cd2f076fSJens Axboe if (blk_mq_alloc_tag_set(set))
384cd2f076fSJens Axboe goto out_tag_set;
385cd2f076fSJens Axboe
386cd2f076fSJens Axboe q = blk_mq_init_queue(set);
387cd2f076fSJens Axboe if (IS_ERR(q)) {
388cd2f076fSJens Axboe ret = PTR_ERR(q);
389cd2f076fSJens Axboe goto out_queue;
390cd2f076fSJens Axboe }
3918ae94eb6SChristoph Hellwig
392aa387cc8SMike Christie q->queuedata = dev;
393aa387cc8SMike Christie blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
394aa387cc8SMike Christie
39575ca5640SChristoph Hellwig bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
396ead09dd3SChristoph Hellwig if (IS_ERR(bset->bd)) {
397ead09dd3SChristoph Hellwig ret = PTR_ERR(bset->bd);
39882ed4db4SChristoph Hellwig goto out_cleanup_queue;
399aa387cc8SMike Christie }
400aa387cc8SMike Christie
4018ae94eb6SChristoph Hellwig return q;
40282ed4db4SChristoph Hellwig out_cleanup_queue:
4036f8191fdSChristoph Hellwig blk_mq_destroy_queue(q);
4042b3f056fSChristoph Hellwig blk_put_queue(q);
405cd2f076fSJens Axboe out_queue:
406cd2f076fSJens Axboe blk_mq_free_tag_set(set);
407cd2f076fSJens Axboe out_tag_set:
4081028e4b3SJens Axboe kfree(bset);
40982ed4db4SChristoph Hellwig return ERR_PTR(ret);
410aa387cc8SMike Christie }
411aa387cc8SMike Christie EXPORT_SYMBOL_GPL(bsg_setup_queue);
412