1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/capability.h>
3 #include <linux/compat.h>
4 #include <linux/blkdev.h>
5 #include <linux/export.h>
6 #include <linux/gfp.h>
7 #include <linux/blkpg.h>
8 #include <linux/hdreg.h>
9 #include <linux/backing-dev.h>
10 #include <linux/fs.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/pr.h>
13 #include <linux/uaccess.h>
14 #include "blk.h"
15
blkpg_do_ioctl(struct block_device * bdev,struct blkpg_partition __user * upart,int op)16 static int blkpg_do_ioctl(struct block_device *bdev,
17 struct blkpg_partition __user *upart, int op)
18 {
19 struct gendisk *disk = bdev->bd_disk;
20 struct blkpg_partition p;
21 sector_t start, length;
22
23 if (!capable(CAP_SYS_ADMIN))
24 return -EACCES;
25 if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
26 return -EFAULT;
27 if (bdev_is_partition(bdev))
28 return -EINVAL;
29
30 if (p.pno <= 0)
31 return -EINVAL;
32
33 if (op == BLKPG_DEL_PARTITION)
34 return bdev_del_partition(disk, p.pno);
35
36 if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
37 return -EINVAL;
38 /* Check that the partition is aligned to the block size */
39 if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
40 return -EINVAL;
41
42 start = p.start >> SECTOR_SHIFT;
43 length = p.length >> SECTOR_SHIFT;
44
45 switch (op) {
46 case BLKPG_ADD_PARTITION:
47 return bdev_add_partition(disk, p.pno, start, length);
48 case BLKPG_RESIZE_PARTITION:
49 return bdev_resize_partition(disk, p.pno, start, length);
50 default:
51 return -EINVAL;
52 }
53 }
54
blkpg_ioctl(struct block_device * bdev,struct blkpg_ioctl_arg __user * arg)55 static int blkpg_ioctl(struct block_device *bdev,
56 struct blkpg_ioctl_arg __user *arg)
57 {
58 struct blkpg_partition __user *udata;
59 int op;
60
61 if (get_user(op, &arg->op) || get_user(udata, &arg->data))
62 return -EFAULT;
63
64 return blkpg_do_ioctl(bdev, udata, op);
65 }
66
67 #ifdef CONFIG_COMPAT
68 struct compat_blkpg_ioctl_arg {
69 compat_int_t op;
70 compat_int_t flags;
71 compat_int_t datalen;
72 compat_caddr_t data;
73 };
74
compat_blkpg_ioctl(struct block_device * bdev,struct compat_blkpg_ioctl_arg __user * arg)75 static int compat_blkpg_ioctl(struct block_device *bdev,
76 struct compat_blkpg_ioctl_arg __user *arg)
77 {
78 compat_caddr_t udata;
79 int op;
80
81 if (get_user(op, &arg->op) || get_user(udata, &arg->data))
82 return -EFAULT;
83
84 return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
85 }
86 #endif
87
blk_ioctl_discard(struct block_device * bdev,blk_mode_t mode,unsigned long arg)88 static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
89 unsigned long arg)
90 {
91 uint64_t range[2];
92 uint64_t start, len, end;
93 struct inode *inode = bdev->bd_inode;
94 int err;
95
96 if (!(mode & BLK_OPEN_WRITE))
97 return -EBADF;
98
99 if (!bdev_max_discard_sectors(bdev))
100 return -EOPNOTSUPP;
101
102 if (copy_from_user(range, (void __user *)arg, sizeof(range)))
103 return -EFAULT;
104
105 start = range[0];
106 len = range[1];
107
108 if (start & 511)
109 return -EINVAL;
110 if (len & 511)
111 return -EINVAL;
112
113 if (check_add_overflow(start, len, &end) ||
114 end > bdev_nr_bytes(bdev))
115 return -EINVAL;
116
117 filemap_invalidate_lock(inode->i_mapping);
118 err = truncate_bdev_range(bdev, mode, start, start + len - 1);
119 if (err)
120 goto fail;
121 err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
122 fail:
123 filemap_invalidate_unlock(inode->i_mapping);
124 return err;
125 }
126
blk_ioctl_secure_erase(struct block_device * bdev,blk_mode_t mode,void __user * argp)127 static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
128 void __user *argp)
129 {
130 uint64_t start, len;
131 uint64_t range[2];
132 int err;
133
134 if (!(mode & BLK_OPEN_WRITE))
135 return -EBADF;
136 if (!bdev_max_secure_erase_sectors(bdev))
137 return -EOPNOTSUPP;
138 if (copy_from_user(range, argp, sizeof(range)))
139 return -EFAULT;
140
141 start = range[0];
142 len = range[1];
143 if ((start & 511) || (len & 511))
144 return -EINVAL;
145 if (start + len > bdev_nr_bytes(bdev))
146 return -EINVAL;
147
148 filemap_invalidate_lock(bdev->bd_inode->i_mapping);
149 err = truncate_bdev_range(bdev, mode, start, start + len - 1);
150 if (!err)
151 err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
152 GFP_KERNEL);
153 filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
154 return err;
155 }
156
157
blk_ioctl_zeroout(struct block_device * bdev,blk_mode_t mode,unsigned long arg)158 static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
159 unsigned long arg)
160 {
161 uint64_t range[2];
162 uint64_t start, end, len;
163 struct inode *inode = bdev->bd_inode;
164 int err;
165
166 if (!(mode & BLK_OPEN_WRITE))
167 return -EBADF;
168
169 if (copy_from_user(range, (void __user *)arg, sizeof(range)))
170 return -EFAULT;
171
172 start = range[0];
173 len = range[1];
174 end = start + len - 1;
175
176 if (start & 511)
177 return -EINVAL;
178 if (len & 511)
179 return -EINVAL;
180 if (end >= (uint64_t)bdev_nr_bytes(bdev))
181 return -EINVAL;
182 if (end < start)
183 return -EINVAL;
184
185 /* Invalidate the page cache, including dirty pages */
186 filemap_invalidate_lock(inode->i_mapping);
187 err = truncate_bdev_range(bdev, mode, start, end);
188 if (err)
189 goto fail;
190
191 err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
192 BLKDEV_ZERO_NOUNMAP);
193
194 fail:
195 filemap_invalidate_unlock(inode->i_mapping);
196 return err;
197 }
198
put_ushort(unsigned short __user * argp,unsigned short val)199 static int put_ushort(unsigned short __user *argp, unsigned short val)
200 {
201 return put_user(val, argp);
202 }
203
put_int(int __user * argp,int val)204 static int put_int(int __user *argp, int val)
205 {
206 return put_user(val, argp);
207 }
208
put_uint(unsigned int __user * argp,unsigned int val)209 static int put_uint(unsigned int __user *argp, unsigned int val)
210 {
211 return put_user(val, argp);
212 }
213
put_long(long __user * argp,long val)214 static int put_long(long __user *argp, long val)
215 {
216 return put_user(val, argp);
217 }
218
put_ulong(unsigned long __user * argp,unsigned long val)219 static int put_ulong(unsigned long __user *argp, unsigned long val)
220 {
221 return put_user(val, argp);
222 }
223
put_u64(u64 __user * argp,u64 val)224 static int put_u64(u64 __user *argp, u64 val)
225 {
226 return put_user(val, argp);
227 }
228
229 #ifdef CONFIG_COMPAT
compat_put_long(compat_long_t __user * argp,long val)230 static int compat_put_long(compat_long_t __user *argp, long val)
231 {
232 return put_user(val, argp);
233 }
234
compat_put_ulong(compat_ulong_t __user * argp,compat_ulong_t val)235 static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
236 {
237 return put_user(val, argp);
238 }
239 #endif
240
241 #ifdef CONFIG_COMPAT
242 /*
243 * This is the equivalent of compat_ptr_ioctl(), to be used by block
244 * drivers that implement only commands that are completely compatible
245 * between 32-bit and 64-bit user space
246 */
blkdev_compat_ptr_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned cmd,unsigned long arg)247 int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode,
248 unsigned cmd, unsigned long arg)
249 {
250 struct gendisk *disk = bdev->bd_disk;
251
252 if (disk->fops->ioctl)
253 return disk->fops->ioctl(bdev, mode, cmd,
254 (unsigned long)compat_ptr(arg));
255
256 return -ENOIOCTLCMD;
257 }
258 EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
259 #endif
260
blkdev_pr_allowed(struct block_device * bdev,blk_mode_t mode)261 static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode)
262 {
263 /* no sense to make reservations for partitions */
264 if (bdev_is_partition(bdev))
265 return false;
266
267 if (capable(CAP_SYS_ADMIN))
268 return true;
269 /*
270 * Only allow unprivileged reservations if the file descriptor is open
271 * for writing.
272 */
273 return mode & BLK_OPEN_WRITE;
274 }
275
blkdev_pr_register(struct block_device * bdev,blk_mode_t mode,struct pr_registration __user * arg)276 static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode,
277 struct pr_registration __user *arg)
278 {
279 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
280 struct pr_registration reg;
281
282 if (!blkdev_pr_allowed(bdev, mode))
283 return -EPERM;
284 if (!ops || !ops->pr_register)
285 return -EOPNOTSUPP;
286 if (copy_from_user(®, arg, sizeof(reg)))
287 return -EFAULT;
288
289 if (reg.flags & ~PR_FL_IGNORE_KEY)
290 return -EOPNOTSUPP;
291 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
292 }
293
blkdev_pr_reserve(struct block_device * bdev,blk_mode_t mode,struct pr_reservation __user * arg)294 static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode,
295 struct pr_reservation __user *arg)
296 {
297 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
298 struct pr_reservation rsv;
299
300 if (!blkdev_pr_allowed(bdev, mode))
301 return -EPERM;
302 if (!ops || !ops->pr_reserve)
303 return -EOPNOTSUPP;
304 if (copy_from_user(&rsv, arg, sizeof(rsv)))
305 return -EFAULT;
306
307 if (rsv.flags & ~PR_FL_IGNORE_KEY)
308 return -EOPNOTSUPP;
309 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
310 }
311
blkdev_pr_release(struct block_device * bdev,blk_mode_t mode,struct pr_reservation __user * arg)312 static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode,
313 struct pr_reservation __user *arg)
314 {
315 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
316 struct pr_reservation rsv;
317
318 if (!blkdev_pr_allowed(bdev, mode))
319 return -EPERM;
320 if (!ops || !ops->pr_release)
321 return -EOPNOTSUPP;
322 if (copy_from_user(&rsv, arg, sizeof(rsv)))
323 return -EFAULT;
324
325 if (rsv.flags)
326 return -EOPNOTSUPP;
327 return ops->pr_release(bdev, rsv.key, rsv.type);
328 }
329
blkdev_pr_preempt(struct block_device * bdev,blk_mode_t mode,struct pr_preempt __user * arg,bool abort)330 static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode,
331 struct pr_preempt __user *arg, bool abort)
332 {
333 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
334 struct pr_preempt p;
335
336 if (!blkdev_pr_allowed(bdev, mode))
337 return -EPERM;
338 if (!ops || !ops->pr_preempt)
339 return -EOPNOTSUPP;
340 if (copy_from_user(&p, arg, sizeof(p)))
341 return -EFAULT;
342
343 if (p.flags)
344 return -EOPNOTSUPP;
345 return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
346 }
347
blkdev_pr_clear(struct block_device * bdev,blk_mode_t mode,struct pr_clear __user * arg)348 static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
349 struct pr_clear __user *arg)
350 {
351 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
352 struct pr_clear c;
353
354 if (!blkdev_pr_allowed(bdev, mode))
355 return -EPERM;
356 if (!ops || !ops->pr_clear)
357 return -EOPNOTSUPP;
358 if (copy_from_user(&c, arg, sizeof(c)))
359 return -EFAULT;
360
361 if (c.flags)
362 return -EOPNOTSUPP;
363 return ops->pr_clear(bdev, c.key);
364 }
365
blkdev_flushbuf(struct block_device * bdev,unsigned cmd,unsigned long arg)366 static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
367 unsigned long arg)
368 {
369 if (!capable(CAP_SYS_ADMIN))
370 return -EACCES;
371
372 mutex_lock(&bdev->bd_holder_lock);
373 if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
374 bdev->bd_holder_ops->sync(bdev);
375 else
376 sync_blockdev(bdev);
377 mutex_unlock(&bdev->bd_holder_lock);
378
379 invalidate_bdev(bdev);
380 return 0;
381 }
382
blkdev_roset(struct block_device * bdev,unsigned cmd,unsigned long arg)383 static int blkdev_roset(struct block_device *bdev, unsigned cmd,
384 unsigned long arg)
385 {
386 int ret, n;
387
388 if (!capable(CAP_SYS_ADMIN))
389 return -EACCES;
390
391 if (get_user(n, (int __user *)arg))
392 return -EFAULT;
393 if (bdev->bd_disk->fops->set_read_only) {
394 ret = bdev->bd_disk->fops->set_read_only(bdev, n);
395 if (ret)
396 return ret;
397 }
398 bdev->bd_read_only = n;
399 return 0;
400 }
401
blkdev_getgeo(struct block_device * bdev,struct hd_geometry __user * argp)402 static int blkdev_getgeo(struct block_device *bdev,
403 struct hd_geometry __user *argp)
404 {
405 struct gendisk *disk = bdev->bd_disk;
406 struct hd_geometry geo;
407 int ret;
408
409 if (!argp)
410 return -EINVAL;
411 if (!disk->fops->getgeo)
412 return -ENOTTY;
413
414 /*
415 * We need to set the startsect first, the driver may
416 * want to override it.
417 */
418 memset(&geo, 0, sizeof(geo));
419 geo.start = get_start_sect(bdev);
420 ret = disk->fops->getgeo(bdev, &geo);
421 if (ret)
422 return ret;
423 if (copy_to_user(argp, &geo, sizeof(geo)))
424 return -EFAULT;
425 return 0;
426 }
427
428 #ifdef CONFIG_COMPAT
429 struct compat_hd_geometry {
430 unsigned char heads;
431 unsigned char sectors;
432 unsigned short cylinders;
433 u32 start;
434 };
435
compat_hdio_getgeo(struct block_device * bdev,struct compat_hd_geometry __user * ugeo)436 static int compat_hdio_getgeo(struct block_device *bdev,
437 struct compat_hd_geometry __user *ugeo)
438 {
439 struct gendisk *disk = bdev->bd_disk;
440 struct hd_geometry geo;
441 int ret;
442
443 if (!ugeo)
444 return -EINVAL;
445 if (!disk->fops->getgeo)
446 return -ENOTTY;
447
448 memset(&geo, 0, sizeof(geo));
449 /*
450 * We need to set the startsect first, the driver may
451 * want to override it.
452 */
453 geo.start = get_start_sect(bdev);
454 ret = disk->fops->getgeo(bdev, &geo);
455 if (ret)
456 return ret;
457
458 ret = copy_to_user(ugeo, &geo, 4);
459 ret |= put_user(geo.start, &ugeo->start);
460 if (ret)
461 ret = -EFAULT;
462
463 return ret;
464 }
465 #endif
466
467 /* set the logical block size */
blkdev_bszset(struct block_device * bdev,blk_mode_t mode,int __user * argp)468 static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
469 int __user *argp)
470 {
471 int ret, n;
472
473 if (!capable(CAP_SYS_ADMIN))
474 return -EACCES;
475 if (!argp)
476 return -EINVAL;
477 if (get_user(n, argp))
478 return -EFAULT;
479
480 if (mode & BLK_OPEN_EXCL)
481 return set_blocksize(bdev, n);
482
483 if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode, &bdev, NULL)))
484 return -EBUSY;
485 ret = set_blocksize(bdev, n);
486 blkdev_put(bdev, &bdev);
487
488 return ret;
489 }
490
491 /*
492 * Common commands that are handled the same way on native and compat
493 * user space. Note the separate arg/argp parameters that are needed
494 * to deal with the compat_ptr() conversion.
495 */
blkdev_common_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg,void __user * argp)496 static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
497 unsigned int cmd, unsigned long arg,
498 void __user *argp)
499 {
500 unsigned int max_sectors;
501
502 switch (cmd) {
503 case BLKFLSBUF:
504 return blkdev_flushbuf(bdev, cmd, arg);
505 case BLKROSET:
506 return blkdev_roset(bdev, cmd, arg);
507 case BLKDISCARD:
508 return blk_ioctl_discard(bdev, mode, arg);
509 case BLKSECDISCARD:
510 return blk_ioctl_secure_erase(bdev, mode, argp);
511 case BLKZEROOUT:
512 return blk_ioctl_zeroout(bdev, mode, arg);
513 case BLKGETDISKSEQ:
514 return put_u64(argp, bdev->bd_disk->diskseq);
515 case BLKREPORTZONE:
516 return blkdev_report_zones_ioctl(bdev, cmd, arg);
517 case BLKRESETZONE:
518 case BLKOPENZONE:
519 case BLKCLOSEZONE:
520 case BLKFINISHZONE:
521 return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
522 case BLKGETZONESZ:
523 return put_uint(argp, bdev_zone_sectors(bdev));
524 case BLKGETNRZONES:
525 return put_uint(argp, bdev_nr_zones(bdev));
526 case BLKROGET:
527 return put_int(argp, bdev_read_only(bdev) != 0);
528 case BLKSSZGET: /* get block device logical block size */
529 return put_int(argp, bdev_logical_block_size(bdev));
530 case BLKPBSZGET: /* get block device physical block size */
531 return put_uint(argp, bdev_physical_block_size(bdev));
532 case BLKIOMIN:
533 return put_uint(argp, bdev_io_min(bdev));
534 case BLKIOOPT:
535 return put_uint(argp, bdev_io_opt(bdev));
536 case BLKALIGNOFF:
537 return put_int(argp, bdev_alignment_offset(bdev));
538 case BLKDISCARDZEROES:
539 return put_uint(argp, 0);
540 case BLKSECTGET:
541 max_sectors = min_t(unsigned int, USHRT_MAX,
542 queue_max_sectors(bdev_get_queue(bdev)));
543 return put_ushort(argp, max_sectors);
544 case BLKROTATIONAL:
545 return put_ushort(argp, !bdev_nonrot(bdev));
546 case BLKRASET:
547 case BLKFRASET:
548 if(!capable(CAP_SYS_ADMIN))
549 return -EACCES;
550 bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
551 return 0;
552 case BLKRRPART:
553 if (!capable(CAP_SYS_ADMIN))
554 return -EACCES;
555 if (bdev_is_partition(bdev))
556 return -EINVAL;
557 return disk_scan_partitions(bdev->bd_disk, mode);
558 case BLKTRACESTART:
559 case BLKTRACESTOP:
560 case BLKTRACETEARDOWN:
561 return blk_trace_ioctl(bdev, cmd, argp);
562 case IOC_PR_REGISTER:
563 return blkdev_pr_register(bdev, mode, argp);
564 case IOC_PR_RESERVE:
565 return blkdev_pr_reserve(bdev, mode, argp);
566 case IOC_PR_RELEASE:
567 return blkdev_pr_release(bdev, mode, argp);
568 case IOC_PR_PREEMPT:
569 return blkdev_pr_preempt(bdev, mode, argp, false);
570 case IOC_PR_PREEMPT_ABORT:
571 return blkdev_pr_preempt(bdev, mode, argp, true);
572 case IOC_PR_CLEAR:
573 return blkdev_pr_clear(bdev, mode, argp);
574 default:
575 return -ENOIOCTLCMD;
576 }
577 }
578
579 /*
580 * Always keep this in sync with compat_blkdev_ioctl()
581 * to handle all incompatible commands in both functions.
582 *
583 * New commands must be compatible and go into blkdev_common_ioctl
584 */
blkdev_ioctl(struct file * file,unsigned cmd,unsigned long arg)585 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
586 {
587 struct block_device *bdev = I_BDEV(file->f_mapping->host);
588 void __user *argp = (void __user *)arg;
589 blk_mode_t mode = file_to_blk_mode(file);
590 int ret;
591
592 switch (cmd) {
593 /* These need separate implementations for the data structure */
594 case HDIO_GETGEO:
595 return blkdev_getgeo(bdev, argp);
596 case BLKPG:
597 return blkpg_ioctl(bdev, argp);
598
599 /* Compat mode returns 32-bit data instead of 'long' */
600 case BLKRAGET:
601 case BLKFRAGET:
602 if (!argp)
603 return -EINVAL;
604 return put_long(argp,
605 (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
606 case BLKGETSIZE:
607 if (bdev_nr_sectors(bdev) > ~0UL)
608 return -EFBIG;
609 return put_ulong(argp, bdev_nr_sectors(bdev));
610
611 /* The data is compatible, but the command number is different */
612 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
613 return put_int(argp, block_size(bdev));
614 case BLKBSZSET:
615 return blkdev_bszset(bdev, mode, argp);
616 case BLKGETSIZE64:
617 return put_u64(argp, bdev_nr_bytes(bdev));
618
619 /* Incompatible alignment on i386 */
620 case BLKTRACESETUP:
621 return blk_trace_ioctl(bdev, cmd, argp);
622 default:
623 break;
624 }
625
626 ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
627 if (ret != -ENOIOCTLCMD)
628 return ret;
629
630 if (!bdev->bd_disk->fops->ioctl)
631 return -ENOTTY;
632 return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
633 }
634
635 #ifdef CONFIG_COMPAT
636
637 #define BLKBSZGET_32 _IOR(0x12, 112, int)
638 #define BLKBSZSET_32 _IOW(0x12, 113, int)
639 #define BLKGETSIZE64_32 _IOR(0x12, 114, int)
640
641 /* Most of the generic ioctls are handled in the normal fallback path.
642 This assumes the blkdev's low level compat_ioctl always returns
643 ENOIOCTLCMD for unknown ioctls. */
compat_blkdev_ioctl(struct file * file,unsigned cmd,unsigned long arg)644 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
645 {
646 int ret;
647 void __user *argp = compat_ptr(arg);
648 struct block_device *bdev = I_BDEV(file->f_mapping->host);
649 struct gendisk *disk = bdev->bd_disk;
650 blk_mode_t mode = file_to_blk_mode(file);
651
652 switch (cmd) {
653 /* These need separate implementations for the data structure */
654 case HDIO_GETGEO:
655 return compat_hdio_getgeo(bdev, argp);
656 case BLKPG:
657 return compat_blkpg_ioctl(bdev, argp);
658
659 /* Compat mode returns 32-bit data instead of 'long' */
660 case BLKRAGET:
661 case BLKFRAGET:
662 if (!argp)
663 return -EINVAL;
664 return compat_put_long(argp,
665 (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
666 case BLKGETSIZE:
667 if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
668 return -EFBIG;
669 return compat_put_ulong(argp, bdev_nr_sectors(bdev));
670
671 /* The data is compatible, but the command number is different */
672 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
673 return put_int(argp, bdev_logical_block_size(bdev));
674 case BLKBSZSET_32:
675 return blkdev_bszset(bdev, mode, argp);
676 case BLKGETSIZE64_32:
677 return put_u64(argp, bdev_nr_bytes(bdev));
678
679 /* Incompatible alignment on i386 */
680 case BLKTRACESETUP32:
681 return blk_trace_ioctl(bdev, cmd, argp);
682 default:
683 break;
684 }
685
686 ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
687 if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
688 ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
689
690 return ret;
691 }
692 #endif
693