1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bsg.c - block layer implementation of the sg v4 interface
4 */
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/file.h>
8 #include <linux/blkdev.h>
9 #include <linux/cdev.h>
10 #include <linux/jiffies.h>
11 #include <linux/percpu.h>
12 #include <linux/idr.h>
13 #include <linux/bsg.h>
14 #include <linux/slab.h>
15
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_ioctl.h>
18 #include <scsi/sg.h>
19
20 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
21 #define BSG_VERSION "0.4"
22
23 struct bsg_device {
24 struct request_queue *queue;
25 struct device device;
26 struct cdev cdev;
27 int max_queue;
28 unsigned int timeout;
29 unsigned int reserved_size;
30 bsg_sg_io_fn *sg_io_fn;
31 };
32
to_bsg_device(struct inode * inode)33 static inline struct bsg_device *to_bsg_device(struct inode *inode)
34 {
35 return container_of(inode->i_cdev, struct bsg_device, cdev);
36 }
37
38 #define BSG_DEFAULT_CMDS 64
39 #define BSG_MAX_DEVS (1 << MINORBITS)
40
41 static DEFINE_IDA(bsg_minor_ida);
42 static const struct class bsg_class;
43 static int bsg_major;
44
bsg_timeout(struct bsg_device * bd,struct sg_io_v4 * hdr)45 static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
46 {
47 unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
48
49 if (hdr->timeout)
50 timeout = msecs_to_jiffies(hdr->timeout);
51 else if (bd->timeout)
52 timeout = bd->timeout;
53
54 return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
55 }
56
bsg_sg_io(struct bsg_device * bd,bool open_for_write,void __user * uarg)57 static int bsg_sg_io(struct bsg_device *bd, bool open_for_write,
58 void __user *uarg)
59 {
60 struct sg_io_v4 hdr;
61 int ret;
62
63 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
64 return -EFAULT;
65 if (hdr.guard != 'Q')
66 return -EINVAL;
67 ret = bd->sg_io_fn(bd->queue, &hdr, open_for_write,
68 bsg_timeout(bd, &hdr));
69 if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
70 return -EFAULT;
71 return ret;
72 }
73
bsg_open(struct inode * inode,struct file * file)74 static int bsg_open(struct inode *inode, struct file *file)
75 {
76 if (!blk_get_queue(to_bsg_device(inode)->queue))
77 return -ENXIO;
78 return 0;
79 }
80
bsg_release(struct inode * inode,struct file * file)81 static int bsg_release(struct inode *inode, struct file *file)
82 {
83 blk_put_queue(to_bsg_device(inode)->queue);
84 return 0;
85 }
86
bsg_get_command_q(struct bsg_device * bd,int __user * uarg)87 static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
88 {
89 return put_user(READ_ONCE(bd->max_queue), uarg);
90 }
91
bsg_set_command_q(struct bsg_device * bd,int __user * uarg)92 static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
93 {
94 int max_queue;
95
96 if (get_user(max_queue, uarg))
97 return -EFAULT;
98 if (max_queue < 1)
99 return -EINVAL;
100 WRITE_ONCE(bd->max_queue, max_queue);
101 return 0;
102 }
103
bsg_ioctl(struct file * file,unsigned int cmd,unsigned long arg)104 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
105 {
106 struct bsg_device *bd = to_bsg_device(file_inode(file));
107 struct request_queue *q = bd->queue;
108 void __user *uarg = (void __user *) arg;
109 int __user *intp = uarg;
110 int val;
111
112 switch (cmd) {
113 /*
114 * Our own ioctls
115 */
116 case SG_GET_COMMAND_Q:
117 return bsg_get_command_q(bd, uarg);
118 case SG_SET_COMMAND_Q:
119 return bsg_set_command_q(bd, uarg);
120
121 /*
122 * SCSI/sg ioctls
123 */
124 case SG_GET_VERSION_NUM:
125 return put_user(30527, intp);
126 case SCSI_IOCTL_GET_IDLUN:
127 return put_user(0, intp);
128 case SCSI_IOCTL_GET_BUS_NUMBER:
129 return put_user(0, intp);
130 case SG_SET_TIMEOUT:
131 if (get_user(val, intp))
132 return -EFAULT;
133 bd->timeout = clock_t_to_jiffies(val);
134 return 0;
135 case SG_GET_TIMEOUT:
136 return jiffies_to_clock_t(bd->timeout);
137 case SG_GET_RESERVED_SIZE:
138 return put_user(min(bd->reserved_size, queue_max_bytes(q)),
139 intp);
140 case SG_SET_RESERVED_SIZE:
141 if (get_user(val, intp))
142 return -EFAULT;
143 if (val < 0)
144 return -EINVAL;
145 bd->reserved_size =
146 min_t(unsigned int, val, queue_max_bytes(q));
147 return 0;
148 case SG_EMULATED_HOST:
149 return put_user(1, intp);
150 case SG_IO:
151 return bsg_sg_io(bd, file->f_mode & FMODE_WRITE, uarg);
152 case SCSI_IOCTL_SEND_COMMAND:
153 pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
154 current->comm);
155 return -EINVAL;
156 default:
157 return -ENOTTY;
158 }
159 }
160
161 static const struct file_operations bsg_fops = {
162 .open = bsg_open,
163 .release = bsg_release,
164 .unlocked_ioctl = bsg_ioctl,
165 .compat_ioctl = compat_ptr_ioctl,
166 .owner = THIS_MODULE,
167 .llseek = default_llseek,
168 };
169
bsg_device_release(struct device * dev)170 static void bsg_device_release(struct device *dev)
171 {
172 struct bsg_device *bd = container_of(dev, struct bsg_device, device);
173
174 ida_free(&bsg_minor_ida, MINOR(bd->device.devt));
175 kfree(bd);
176 }
177
bsg_unregister_queue(struct bsg_device * bd)178 void bsg_unregister_queue(struct bsg_device *bd)
179 {
180 struct gendisk *disk = bd->queue->disk;
181
182 if (disk && disk->queue_kobj.sd)
183 sysfs_remove_link(&disk->queue_kobj, "bsg");
184 cdev_device_del(&bd->cdev, &bd->device);
185 put_device(&bd->device);
186 }
187 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
188
bsg_register_queue(struct request_queue * q,struct device * parent,const char * name,bsg_sg_io_fn * sg_io_fn)189 struct bsg_device *bsg_register_queue(struct request_queue *q,
190 struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
191 {
192 struct bsg_device *bd;
193 int ret;
194
195 bd = kzalloc(sizeof(*bd), GFP_KERNEL);
196 if (!bd)
197 return ERR_PTR(-ENOMEM);
198 bd->max_queue = BSG_DEFAULT_CMDS;
199 bd->reserved_size = INT_MAX;
200 bd->queue = q;
201 bd->sg_io_fn = sg_io_fn;
202
203 ret = ida_alloc_max(&bsg_minor_ida, BSG_MAX_DEVS - 1, GFP_KERNEL);
204 if (ret < 0) {
205 if (ret == -ENOSPC)
206 dev_err(parent, "bsg: too many bsg devices\n");
207 kfree(bd);
208 return ERR_PTR(ret);
209 }
210 bd->device.devt = MKDEV(bsg_major, ret);
211 bd->device.class = &bsg_class;
212 bd->device.parent = parent;
213 bd->device.release = bsg_device_release;
214 dev_set_name(&bd->device, "%s", name);
215 device_initialize(&bd->device);
216
217 cdev_init(&bd->cdev, &bsg_fops);
218 bd->cdev.owner = THIS_MODULE;
219 ret = cdev_device_add(&bd->cdev, &bd->device);
220 if (ret)
221 goto out_put_device;
222
223 if (q->disk && q->disk->queue_kobj.sd) {
224 ret = sysfs_create_link(&q->disk->queue_kobj, &bd->device.kobj,
225 "bsg");
226 if (ret)
227 goto out_device_del;
228 }
229
230 return bd;
231
232 out_device_del:
233 cdev_device_del(&bd->cdev, &bd->device);
234 out_put_device:
235 put_device(&bd->device);
236 return ERR_PTR(ret);
237 }
238 EXPORT_SYMBOL_GPL(bsg_register_queue);
239
bsg_devnode(const struct device * dev,umode_t * mode)240 static char *bsg_devnode(const struct device *dev, umode_t *mode)
241 {
242 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
243 }
244
245 static const struct class bsg_class = {
246 .name = "bsg",
247 .devnode = bsg_devnode,
248 };
249
bsg_init(void)250 static int __init bsg_init(void)
251 {
252 dev_t devid;
253 int ret;
254
255 ret = class_register(&bsg_class);
256 if (ret)
257 return ret;
258
259 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
260 if (ret)
261 goto destroy_bsg_class;
262 bsg_major = MAJOR(devid);
263
264 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
265 " loaded (major %d)\n", bsg_major);
266 return 0;
267
268 destroy_bsg_class:
269 class_unregister(&bsg_class);
270 return ret;
271 }
272
273 MODULE_AUTHOR("Jens Axboe");
274 MODULE_DESCRIPTION(BSG_DESCRIPTION);
275 MODULE_LICENSE("GPL");
276
277 device_initcall(bsg_init);
278