blob: 32da4a4429ce550f6cbf25bfb84026f1ae5bae45 [file] [log] [blame]
Christoph Hellwiga497ee32019-04-30 14:42:40 -04001// SPDX-License-Identifier: GPL-2.0-or-later
Mike Christieaa387cc2011-07-31 22:05:09 +02002/*
3 * BSG helper library
4 *
5 * Copyright (C) 2008 James Smart, Emulex Corporation
6 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2011 Mike Christie
Mike Christieaa387cc2011-07-31 22:05:09 +02008 */
Christoph Hellwigead09dd2021-07-29 08:48:42 +02009#include <linux/bsg.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020010#include <linux/slab.h>
Jens Axboecd2f0762018-10-24 07:11:39 -060011#include <linux/blk-mq.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020012#include <linux/delay.h>
13#include <linux/scatterlist.h>
14#include <linux/bsg-lib.h>
Paul Gortmaker6adb1232011-09-28 18:26:05 -040015#include <linux/export.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020016#include <scsi/scsi_cmnd.h>
Christoph Hellwig17cb9602018-03-13 17:28:41 +010017#include <scsi/sg.h>
18
19#define uptr64(val) ((void __user *)(uintptr_t)(val))
20
Jens Axboe1028e4b2018-10-29 09:47:17 -060021struct bsg_set {
22 struct blk_mq_tag_set tag_set;
Christoph Hellwigead09dd2021-07-29 08:48:42 +020023 struct bsg_device *bd;
Jens Axboe1028e4b2018-10-29 09:47:17 -060024 bsg_job_fn *job_fn;
25 bsg_timeout_fn *timeout_fn;
26};
27
Christoph Hellwig75ca5642021-07-29 08:48:45 +020028static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
Christoph Hellwig19912992023-06-08 13:02:48 +020029 bool open_for_write, unsigned int timeout)
Christoph Hellwig17cb9602018-03-13 17:28:41 +010030{
Christoph Hellwig75ca5642021-07-29 08:48:45 +020031 struct bsg_job *job;
32 struct request *rq;
33 struct bio *bio;
Christoph Hellwig237ea162021-10-21 08:06:04 +020034 void *reply;
Christoph Hellwig75ca5642021-07-29 08:48:45 +020035 int ret;
36
Christoph Hellwig17cb9602018-03-13 17:28:41 +010037 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
38 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
39 return -EINVAL;
40 if (!capable(CAP_SYS_RAWIO))
41 return -EPERM;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010042
Christoph Hellwig237ea162021-10-21 08:06:04 +020043 rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
Christoph Hellwig75ca5642021-07-29 08:48:45 +020044 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
45 if (IS_ERR(rq))
46 return PTR_ERR(rq);
47 rq->timeout = timeout;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010048
Christoph Hellwig75ca5642021-07-29 08:48:45 +020049 job = blk_mq_rq_to_pdu(rq);
Christoph Hellwig237ea162021-10-21 08:06:04 +020050 reply = job->reply;
51 memset(job, 0, sizeof(*job));
52 job->reply = reply;
53 job->reply_len = SCSI_SENSE_BUFFERSIZE;
54 job->dd_data = job + 1;
55
Christoph Hellwig17cb9602018-03-13 17:28:41 +010056 job->request_len = hdr->request_len;
57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
Christoph Hellwig75ca5642021-07-29 08:48:45 +020058 if (IS_ERR(job->request)) {
59 ret = PTR_ERR(job->request);
Christoph Hellwig237ea162021-10-21 08:06:04 +020060 goto out_free_rq;
Christoph Hellwig75ca5642021-07-29 08:48:45 +020061 }
zhong jiang47255492018-08-01 00:13:14 +080062
Christoph Hellwig972248e2019-01-29 09:32:03 +010063 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
Christoph Hellwig237ea162021-10-21 08:06:04 +020064 job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
Christoph Hellwig972248e2019-01-29 09:32:03 +010065 if (IS_ERR(job->bidi_rq)) {
66 ret = PTR_ERR(job->bidi_rq);
Christoph Hellwig75ca5642021-07-29 08:48:45 +020067 goto out_free_job_request;
Christoph Hellwig972248e2019-01-29 09:32:03 +010068 }
69
70 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
71 uptr64(hdr->din_xferp), hdr->din_xfer_len,
72 GFP_KERNEL);
73 if (ret)
74 goto out_free_bidi_rq;
75
76 job->bidi_bio = job->bidi_rq->bio;
77 } else {
78 job->bidi_rq = NULL;
79 job->bidi_bio = NULL;
80 }
81
Christoph Hellwig659a3782021-07-31 09:40:27 +020082 ret = 0;
Christoph Hellwig75ca5642021-07-29 08:48:45 +020083 if (hdr->dout_xfer_len) {
84 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
85 hdr->dout_xfer_len, GFP_KERNEL);
86 } else if (hdr->din_xfer_len) {
87 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
88 hdr->din_xfer_len, GFP_KERNEL);
89 }
Christoph Hellwig972248e2019-01-29 09:32:03 +010090
Christoph Hellwig75ca5642021-07-29 08:48:45 +020091 if (ret)
92 goto out_unmap_bidi_rq;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010093
Christoph Hellwig75ca5642021-07-29 08:48:45 +020094 bio = rq->bio;
Christoph Hellwigb84ba302021-11-26 13:18:01 +010095 blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
Christoph Hellwig17cb9602018-03-13 17:28:41 +010096
97 /*
98 * The assignments below don't make much sense, but are kept for
99 * bug by bug backwards compatibility:
100 */
101 hdr->device_status = job->result & 0xff;
102 hdr->transport_status = host_byte(job->result);
Hannes Reinecke54c29082021-04-27 10:30:20 +0200103 hdr->driver_status = 0;
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100104 hdr->info = 0;
105 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
106 hdr->info |= SG_INFO_CHECK;
107 hdr->response_len = 0;
108
109 if (job->result < 0) {
110 /* we're only returning the result field in the reply */
111 job->reply_len = sizeof(u32);
112 ret = job->result;
113 }
114
115 if (job->reply_len && hdr->response) {
116 int len = min(hdr->max_response_len, job->reply_len);
117
118 if (copy_to_user(uptr64(hdr->response), job->reply, len))
119 ret = -EFAULT;
120 else
121 hdr->response_len = len;
122 }
123
124 /* we assume all request payload was transferred, residual == 0 */
125 hdr->dout_resid = 0;
126
Christoph Hellwig972248e2019-01-29 09:32:03 +0100127 if (job->bidi_rq) {
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100128 unsigned int rsp_len = job->reply_payload.payload_len;
129
130 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
131 hdr->din_resid = 0;
132 else
133 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
134 } else {
135 hdr->din_resid = 0;
136 }
137
Christoph Hellwig75ca5642021-07-29 08:48:45 +0200138 blk_rq_unmap_user(bio);
139out_unmap_bidi_rq:
140 if (job->bidi_rq)
141 blk_rq_unmap_user(job->bidi_bio);
142out_free_bidi_rq:
143 if (job->bidi_rq)
Christoph Hellwig237ea162021-10-21 08:06:04 +0200144 blk_mq_free_request(job->bidi_rq);
Christoph Hellwig75ca5642021-07-29 08:48:45 +0200145out_free_job_request:
146 kfree(job->request);
Christoph Hellwig237ea162021-10-21 08:06:04 +0200147out_free_rq:
148 blk_mq_free_request(rq);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100149 return ret;
150}
151
Mike Christieaa387cc2011-07-31 22:05:09 +0200152/**
Benjamin Block50b4d482017-08-24 01:57:56 +0200153 * bsg_teardown_job - routine to teardown a bsg job
Bart Van Asscheaa981922018-01-09 10:11:00 -0800154 * @kref: kref inside bsg_job that is to be torn down
Mike Christieaa387cc2011-07-31 22:05:09 +0200155 */
Benjamin Block50b4d482017-08-24 01:57:56 +0200156static void bsg_teardown_job(struct kref *kref)
Mike Christieaa387cc2011-07-31 22:05:09 +0200157{
Johannes Thumshirnbf0f2d32016-11-17 10:31:18 +0100158 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
Christoph Hellwigef6fa642018-03-13 17:28:40 +0100159 struct request *rq = blk_mq_rq_from_pdu(job);
Johannes Thumshirnc00da4c2016-11-17 10:31:20 +0100160
Mike Christieaa387cc2011-07-31 22:05:09 +0200161 put_device(job->dev); /* release reference for the request */
162
163 kfree(job->request_payload.sg_list);
164 kfree(job->reply_payload.sg_list);
Benjamin Block50b4d482017-08-24 01:57:56 +0200165
Jens Axboecd2f0762018-10-24 07:11:39 -0600166 blk_mq_end_request(rq, BLK_STS_OK);
Mike Christieaa387cc2011-07-31 22:05:09 +0200167}
168
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100169void bsg_job_put(struct bsg_job *job)
170{
Benjamin Block50b4d482017-08-24 01:57:56 +0200171 kref_put(&job->kref, bsg_teardown_job);
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100172}
173EXPORT_SYMBOL_GPL(bsg_job_put);
174
175int bsg_job_get(struct bsg_job *job)
176{
177 return kref_get_unless_zero(&job->kref);
178}
179EXPORT_SYMBOL_GPL(bsg_job_get);
Mike Christieaa387cc2011-07-31 22:05:09 +0200180
181/**
182 * bsg_job_done - completion routine for bsg requests
183 * @job: bsg_job that is complete
184 * @result: job reply result
185 * @reply_payload_rcv_len: length of payload recvd
186 *
187 * The LLD should call this when the bsg job has completed.
188 */
189void bsg_job_done(struct bsg_job *job, int result,
190 unsigned int reply_payload_rcv_len)
191{
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200192 struct request *rq = blk_mq_rq_from_pdu(job);
193
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100194 job->result = result;
195 job->reply_payload_rcv_len = reply_payload_rcv_len;
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200196 if (likely(!blk_should_fake_timeout(rq->q)))
197 blk_mq_complete_request(rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200198}
199EXPORT_SYMBOL_GPL(bsg_job_done);
200
201/**
Jens Axboecd2f0762018-10-24 07:11:39 -0600202 * bsg_complete - softirq done routine for destroying the bsg requests
Mike Christieaa387cc2011-07-31 22:05:09 +0200203 * @rq: BSG request that holds the job to be destroyed
204 */
Jens Axboecd2f0762018-10-24 07:11:39 -0600205static void bsg_complete(struct request *rq)
Mike Christieaa387cc2011-07-31 22:05:09 +0200206{
Benjamin Block50b4d482017-08-24 01:57:56 +0200207 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200208
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100209 bsg_job_put(job);
Mike Christieaa387cc2011-07-31 22:05:09 +0200210}
211
212static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
213{
214 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
215
216 BUG_ON(!req->nr_phys_segments);
217
Julia Lawallf952eef2020-09-20 13:26:18 +0200218 buf->sg_list = kmalloc(sz, GFP_KERNEL);
Mike Christieaa387cc2011-07-31 22:05:09 +0200219 if (!buf->sg_list)
220 return -ENOMEM;
221 sg_init_table(buf->sg_list, req->nr_phys_segments);
222 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
223 buf->payload_len = blk_rq_bytes(req);
224 return 0;
225}
226
227/**
Benjamin Block50b4d482017-08-24 01:57:56 +0200228 * bsg_prepare_job - create the bsg_job structure for the bsg request
Mike Christieaa387cc2011-07-31 22:05:09 +0200229 * @dev: device that is being sent the bsg request
230 * @req: BSG request that needs a job structure
231 */
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100232static bool bsg_prepare_job(struct device *dev, struct request *req)
Mike Christieaa387cc2011-07-31 22:05:09 +0200233{
Benjamin Block50b4d482017-08-24 01:57:56 +0200234 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Mike Christieaa387cc2011-07-31 22:05:09 +0200235 int ret;
236
Christoph Hellwig31156ec2018-03-13 17:28:39 +0100237 job->timeout = req->timeout;
Benjamin Block50b4d482017-08-24 01:57:56 +0200238
Mike Christieaa387cc2011-07-31 22:05:09 +0200239 if (req->bio) {
240 ret = bsg_map_buffer(&job->request_payload, req);
241 if (ret)
242 goto failjob_rls_job;
243 }
Christoph Hellwig972248e2019-01-29 09:32:03 +0100244 if (job->bidi_rq) {
245 ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200246 if (ret)
247 goto failjob_rls_rqst_payload;
248 }
249 job->dev = dev;
250 /* take a reference for the request */
251 get_device(job->dev);
Johannes Thumshirnbf0f2d32016-11-17 10:31:18 +0100252 kref_init(&job->kref);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100253 return true;
Mike Christieaa387cc2011-07-31 22:05:09 +0200254
255failjob_rls_rqst_payload:
256 kfree(job->request_payload.sg_list);
257failjob_rls_job:
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100258 job->result = -ENOMEM;
259 return false;
Mike Christieaa387cc2011-07-31 22:05:09 +0200260}
261
Mike Christieaa387cc2011-07-31 22:05:09 +0200262/**
Jens Axboecd2f0762018-10-24 07:11:39 -0600263 * bsg_queue_rq - generic handler for bsg requests
264 * @hctx: hardware queue
265 * @bd: queue data
Mike Christieaa387cc2011-07-31 22:05:09 +0200266 *
267 * On error the create_bsg_job function should return a -Exyz error value
Christoph Hellwig17d53632017-04-20 16:03:01 +0200268 * that will be set to ->result.
Mike Christieaa387cc2011-07-31 22:05:09 +0200269 *
270 * Drivers/subsys should pass this to the queue init function.
271 */
Jens Axboecd2f0762018-10-24 07:11:39 -0600272static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
273 const struct blk_mq_queue_data *bd)
Mike Christieaa387cc2011-07-31 22:05:09 +0200274{
Jens Axboecd2f0762018-10-24 07:11:39 -0600275 struct request_queue *q = hctx->queue;
Mike Christieaa387cc2011-07-31 22:05:09 +0200276 struct device *dev = q->queuedata;
Jens Axboecd2f0762018-10-24 07:11:39 -0600277 struct request *req = bd->rq;
Jens Axboe1028e4b2018-10-29 09:47:17 -0600278 struct bsg_set *bset =
279 container_of(q->tag_set, struct bsg_set, tag_set);
Bart Van Asschec44a4ed2019-12-17 16:23:29 -0800280 blk_status_t sts = BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200281 int ret;
282
Jens Axboecd2f0762018-10-24 07:11:39 -0600283 blk_mq_start_request(req);
284
Mike Christieaa387cc2011-07-31 22:05:09 +0200285 if (!get_device(dev))
Jens Axboecd2f0762018-10-24 07:11:39 -0600286 return BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200287
Jens Axboecd2f0762018-10-24 07:11:39 -0600288 if (!bsg_prepare_job(dev, req))
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000289 goto out;
Mike Christieaa387cc2011-07-31 22:05:09 +0200290
Jens Axboe1028e4b2018-10-29 09:47:17 -0600291 ret = bset->job_fn(blk_mq_rq_to_pdu(req));
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000292 if (!ret)
293 sts = BLK_STS_OK;
Mike Christieaa387cc2011-07-31 22:05:09 +0200294
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000295out:
Mike Christieaa387cc2011-07-31 22:05:09 +0200296 put_device(dev);
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000297 return sts;
Mike Christieaa387cc2011-07-31 22:05:09 +0200298}
Mike Christieaa387cc2011-07-31 22:05:09 +0200299
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100300/* called right after the request is allocated for the request_queue */
Jens Axboecd2f0762018-10-24 07:11:39 -0600301static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
302 unsigned int hctx_idx, unsigned int numa_node)
Benjamin Block50b4d482017-08-24 01:57:56 +0200303{
304 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Benjamin Block50b4d482017-08-24 01:57:56 +0200305
Jens Axboecd2f0762018-10-24 07:11:39 -0600306 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100307 if (!job->reply)
Benjamin Block50b4d482017-08-24 01:57:56 +0200308 return -ENOMEM;
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200309 return 0;
310}
311
Jens Axboecd2f0762018-10-24 07:11:39 -0600312static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
313 unsigned int hctx_idx)
Benjamin Block50b4d482017-08-24 01:57:56 +0200314{
315 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Benjamin Block50b4d482017-08-24 01:57:56 +0200316
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100317 kfree(job->reply);
Benjamin Block50b4d482017-08-24 01:57:56 +0200318}
319
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600320void bsg_remove_queue(struct request_queue *q)
321{
322 if (q) {
Jens Axboe1028e4b2018-10-29 09:47:17 -0600323 struct bsg_set *bset =
324 container_of(q->tag_set, struct bsg_set, tag_set);
Jens Axboecd2f0762018-10-24 07:11:39 -0600325
Christoph Hellwigead09dd2021-07-29 08:48:42 +0200326 bsg_unregister_queue(bset->bd);
Christoph Hellwig6f8191f2022-06-19 08:05:51 +0200327 blk_mq_destroy_queue(q);
Christoph Hellwig2b3f0562022-10-18 15:57:17 +0200328 blk_put_queue(q);
Jens Axboe1028e4b2018-10-29 09:47:17 -0600329 blk_mq_free_tag_set(&bset->tag_set);
330 kfree(bset);
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600331 }
332}
333EXPORT_SYMBOL_GPL(bsg_remove_queue);
334
John Garry9bdb4832022-07-06 20:03:51 +0800335static enum blk_eh_timer_return bsg_timeout(struct request *rq)
Jens Axboecd2f0762018-10-24 07:11:39 -0600336{
Jens Axboe1028e4b2018-10-29 09:47:17 -0600337 struct bsg_set *bset =
338 container_of(rq->q->tag_set, struct bsg_set, tag_set);
Jens Axboecd2f0762018-10-24 07:11:39 -0600339
Jens Axboe1028e4b2018-10-29 09:47:17 -0600340 if (!bset->timeout_fn)
341 return BLK_EH_DONE;
342 return bset->timeout_fn(rq);
Jens Axboecd2f0762018-10-24 07:11:39 -0600343}
344
345static const struct blk_mq_ops bsg_mq_ops = {
346 .queue_rq = bsg_queue_rq,
347 .init_request = bsg_init_rq,
348 .exit_request = bsg_exit_rq,
Jens Axboecd2f0762018-10-24 07:11:39 -0600349 .complete = bsg_complete,
350 .timeout = bsg_timeout,
351};
352
Mike Christieaa387cc2011-07-31 22:05:09 +0200353/**
354 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
355 * @dev: device to attach bsg device to
Mike Christieaa387cc2011-07-31 22:05:09 +0200356 * @name: device to give bsg device
Christoph Hellwig4373d2e2024-04-09 16:37:27 +0200357 * @lim: queue limits for the bsg queue
Mike Christieaa387cc2011-07-31 22:05:09 +0200358 * @job_fn: bsg job handler
Bart Van Asschea0b77e32019-05-30 17:00:51 -0700359 * @timeout: timeout handler function pointer
Mike Christieaa387cc2011-07-31 22:05:09 +0200360 * @dd_job_size: size of LLD data needed for each job
Mike Christieaa387cc2011-07-31 22:05:09 +0200361 */
Christoph Hellwigc1225f02017-08-25 17:37:38 +0200362struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
Christoph Hellwig4373d2e2024-04-09 16:37:27 +0200363 struct queue_limits *lim, bsg_job_fn *job_fn,
364 bsg_timeout_fn *timeout, int dd_job_size)
Mike Christieaa387cc2011-07-31 22:05:09 +0200365{
Jens Axboe1028e4b2018-10-29 09:47:17 -0600366 struct bsg_set *bset;
Jens Axboecd2f0762018-10-24 07:11:39 -0600367 struct blk_mq_tag_set *set;
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300368 struct request_queue *q;
Jens Axboecd2f0762018-10-24 07:11:39 -0600369 int ret = -ENOMEM;
Mike Christieaa387cc2011-07-31 22:05:09 +0200370
Jens Axboe1028e4b2018-10-29 09:47:17 -0600371 bset = kzalloc(sizeof(*bset), GFP_KERNEL);
372 if (!bset)
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300373 return ERR_PTR(-ENOMEM);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100374
Jens Axboe1028e4b2018-10-29 09:47:17 -0600375 bset->job_fn = job_fn;
376 bset->timeout_fn = timeout;
377
378 set = &bset->tag_set;
Xu Wang03ef5942020-08-17 02:16:49 +0000379 set->ops = &bsg_mq_ops;
Jens Axboecd2f0762018-10-24 07:11:39 -0600380 set->nr_hw_queues = 1;
381 set->queue_depth = 128;
382 set->numa_node = NUMA_NO_NODE;
383 set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
384 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
385 if (blk_mq_alloc_tag_set(set))
386 goto out_tag_set;
387
John Garry41b75742024-05-24 08:48:29 +0000388 q = blk_mq_alloc_queue(set, lim, dev);
Jens Axboecd2f0762018-10-24 07:11:39 -0600389 if (IS_ERR(q)) {
390 ret = PTR_ERR(q);
391 goto out_queue;
392 }
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300393
Mike Christieaa387cc2011-07-31 22:05:09 +0200394 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
395
Christoph Hellwig75ca5642021-07-29 08:48:45 +0200396 bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
Christoph Hellwigead09dd2021-07-29 08:48:42 +0200397 if (IS_ERR(bset->bd)) {
398 ret = PTR_ERR(bset->bd);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100399 goto out_cleanup_queue;
Mike Christieaa387cc2011-07-31 22:05:09 +0200400 }
401
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300402 return q;
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100403out_cleanup_queue:
Christoph Hellwig6f8191f2022-06-19 08:05:51 +0200404 blk_mq_destroy_queue(q);
Christoph Hellwig2b3f0562022-10-18 15:57:17 +0200405 blk_put_queue(q);
Jens Axboecd2f0762018-10-24 07:11:39 -0600406out_queue:
407 blk_mq_free_tag_set(set);
408out_tag_set:
Jens Axboe1028e4b2018-10-29 09:47:17 -0600409 kfree(bset);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100410 return ERR_PTR(ret);
Mike Christieaa387cc2011-07-31 22:05:09 +0200411}
412EXPORT_SYMBOL_GPL(bsg_setup_queue);