1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) 2001 Clemson University and The University of Chicago 4 * 5 * Changes by Acxiom Corporation to add protocol version to kernel 6 * communication, Copyright Acxiom Corporation, 2005. 7 * 8 * See COPYING in top-level directory. 9 */ 10 11 #include "protocol.h" 12 #include "orangefs-kernel.h" 13 #include "orangefs-dev-proto.h" 14 #include "orangefs-bufmap.h" 15 #include "orangefs-debugfs.h" 16 17 #include <linux/debugfs.h> 18 #include <linux/slab.h> 19 20 /* this file implements the /dev/pvfs2-req device node */ 21 22 uint32_t orangefs_userspace_version; 23 24 static int open_access_count; 25 26 static DEFINE_MUTEX(devreq_mutex); 27 28 #define DUMP_DEVICE_ERROR() \ 29 do { \ 30 gossip_err("*****************************************************\n");\ 31 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \ 32 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \ 33 "are no ", ORANGEFS_REQDEVICE_NAME); \ 34 gossip_err("instances of a program using this device\ncurrently " \ 35 "running. (You must verify this!)\n"); \ 36 gossip_err("For example, you can use the lsof program as follows:\n");\ 37 gossip_err("'lsof | grep %s' (run this as root)\n", \ 38 ORANGEFS_REQDEVICE_NAME); \ 39 gossip_err(" open_access_count = %d\n", open_access_count); \ 40 gossip_err("*****************************************************\n");\ 41 } while (0) 42 43 static int hash_func(__u64 tag, int table_size) 44 { 45 return do_div(tag, (unsigned int)table_size); 46 } 47 48 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op) 49 { 50 int index = hash_func(op->tag, hash_table_size); 51 52 list_add_tail(&op->list, &orangefs_htable_ops_in_progress[index]); 53 } 54 55 /* 56 * find the op with this tag and remove it from the in progress 57 * hash table. 58 */ 59 static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag) 60 { 61 struct orangefs_kernel_op_s *op, *next; 62 int index; 63 64 index = hash_func(tag, hash_table_size); 65 66 spin_lock(&orangefs_htable_ops_in_progress_lock); 67 list_for_each_entry_safe(op, 68 next, 69 &orangefs_htable_ops_in_progress[index], 70 list) { 71 if (op->tag == tag && !op_state_purged(op) && 72 !op_state_given_up(op)) { 73 list_del_init(&op->list); 74 spin_unlock(&orangefs_htable_ops_in_progress_lock); 75 return op; 76 } 77 } 78 79 spin_unlock(&orangefs_htable_ops_in_progress_lock); 80 return NULL; 81 } 82 83 /* Returns whether any FS are still pending remounted */ 84 static int mark_all_pending_mounts(void) 85 { 86 int unmounted = 1; 87 struct orangefs_sb_info_s *orangefs_sb = NULL; 88 89 spin_lock(&orangefs_superblocks_lock); 90 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { 91 /* All of these file system require a remount */ 92 orangefs_sb->mount_pending = 1; 93 unmounted = 0; 94 } 95 spin_unlock(&orangefs_superblocks_lock); 96 return unmounted; 97 } 98 99 /* 100 * Determine if a given file system needs to be remounted or not 101 * Returns -1 on error 102 * 0 if already mounted 103 * 1 if needs remount 104 */ 105 static int fs_mount_pending(__s32 fsid) 106 { 107 int mount_pending = -1; 108 struct orangefs_sb_info_s *orangefs_sb = NULL; 109 110 spin_lock(&orangefs_superblocks_lock); 111 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { 112 if (orangefs_sb->fs_id == fsid) { 113 mount_pending = orangefs_sb->mount_pending; 114 break; 115 } 116 } 117 spin_unlock(&orangefs_superblocks_lock); 118 return mount_pending; 119 } 120 121 static int orangefs_devreq_open(struct inode *inode, struct file *file) 122 { 123 int ret = -EINVAL; 124 125 /* in order to ensure that the filesystem driver sees correct UIDs */ 126 if (file->f_cred->user_ns != &init_user_ns) { 127 gossip_err("%s: device cannot be opened outside init_user_ns\n", 128 __func__); 129 goto out; 130 } 131 132 if (!(file->f_flags & O_NONBLOCK)) { 133 gossip_err("%s: device cannot be opened in blocking mode\n", 134 __func__); 135 goto out; 136 } 137 ret = -EACCES; 138 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n"); 139 mutex_lock(&devreq_mutex); 140 141 if (open_access_count == 0) { 142 open_access_count = 1; 143 ret = 0; 144 } else { 145 DUMP_DEVICE_ERROR(); 146 } 147 mutex_unlock(&devreq_mutex); 148 149 out: 150 151 gossip_debug(GOSSIP_DEV_DEBUG, 152 "pvfs2-client-core: open device complete (ret = %d)\n", 153 ret); 154 return ret; 155 } 156 157 /* Function for read() callers into the device */ 158 static ssize_t orangefs_devreq_read(struct file *file, 159 char __user *buf, 160 size_t count, loff_t *offset) 161 { 162 struct orangefs_kernel_op_s *op, *temp; 163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; 164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC; 165 struct orangefs_kernel_op_s *cur_op; 166 unsigned long ret; 167 168 /* We do not support blocking IO. */ 169 if (!(file->f_flags & O_NONBLOCK)) { 170 gossip_err("%s: blocking read from client-core.\n", 171 __func__); 172 return -EINVAL; 173 } 174 175 /* 176 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then 177 * always read with that size buffer. 178 */ 179 if (count != MAX_DEV_REQ_UPSIZE) { 180 gossip_err("orangefs: client-core tried to read wrong size\n"); 181 return -EINVAL; 182 } 183 184 /* Check for an empty list before locking. */ 185 if (list_empty(&orangefs_request_list)) 186 return -EAGAIN; 187 188 restart: 189 cur_op = NULL; 190 /* Get next op (if any) from top of list. */ 191 spin_lock(&orangefs_request_list_lock); 192 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { 193 __s32 fsid; 194 /* This lock is held past the end of the loop when we break. */ 195 spin_lock(&op->lock); 196 if (unlikely(op_state_purged(op) || op_state_given_up(op))) { 197 spin_unlock(&op->lock); 198 continue; 199 } 200 201 fsid = fsid_of_op(op); 202 if (fsid != ORANGEFS_FS_ID_NULL) { 203 int ret; 204 /* Skip ops whose filesystem needs to be mounted. */ 205 ret = fs_mount_pending(fsid); 206 if (ret == 1) { 207 gossip_debug(GOSSIP_DEV_DEBUG, 208 "%s: mount pending, skipping op tag " 209 "%llu %s\n", 210 __func__, 211 llu(op->tag), 212 get_opname_string(op)); 213 spin_unlock(&op->lock); 214 continue; 215 /* 216 * Skip ops whose filesystem we don't know about unless 217 * it is being mounted or unmounted. It is possible for 218 * a filesystem we don't know about to be unmounted if 219 * it fails to mount in the kernel after userspace has 220 * been sent the mount request. 221 */ 222 /* XXX: is there a better way to detect this? */ 223 } else if (ret == -1 && 224 !(op->upcall.type == 225 ORANGEFS_VFS_OP_FS_MOUNT || 226 op->upcall.type == 227 ORANGEFS_VFS_OP_GETATTR || 228 op->upcall.type == 229 ORANGEFS_VFS_OP_FS_UMOUNT)) { 230 gossip_debug(GOSSIP_DEV_DEBUG, 231 "orangefs: skipping op tag %llu %s\n", 232 llu(op->tag), get_opname_string(op)); 233 gossip_err( 234 "orangefs: ERROR: fs_mount_pending %d\n", 235 fsid); 236 spin_unlock(&op->lock); 237 continue; 238 } 239 } 240 /* 241 * Either this op does not pertain to a filesystem, is mounting 242 * a filesystem, or pertains to a mounted filesystem. Let it 243 * through. 244 */ 245 cur_op = op; 246 break; 247 } 248 249 /* 250 * At this point we either have a valid op and can continue or have not 251 * found an op and must ask the client to try again later. 252 */ 253 if (!cur_op) { 254 spin_unlock(&orangefs_request_list_lock); 255 return -EAGAIN; 256 } 257 258 gossip_debug(GOSSIP_DEV_DEBUG, "%s: reading op tag %llu %s\n", 259 __func__, 260 llu(cur_op->tag), 261 get_opname_string(cur_op)); 262 263 /* 264 * Such an op should never be on the list in the first place. If so, we 265 * will abort. 266 */ 267 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) { 268 gossip_err("orangefs: ERROR: Current op already queued.\n"); 269 list_del_init(&cur_op->list); 270 spin_unlock(&cur_op->lock); 271 spin_unlock(&orangefs_request_list_lock); 272 return -EAGAIN; 273 } 274 275 list_del_init(&cur_op->list); 276 spin_unlock(&orangefs_request_list_lock); 277 278 spin_unlock(&cur_op->lock); 279 280 /* Push the upcall out. */ 281 ret = copy_to_user(buf, &proto_ver, sizeof(__s32)); 282 if (ret != 0) 283 goto error; 284 ret = copy_to_user(buf + sizeof(__s32), &magic, sizeof(__s32)); 285 if (ret != 0) 286 goto error; 287 ret = copy_to_user(buf + 2 * sizeof(__s32), 288 &cur_op->tag, 289 sizeof(__u64)); 290 if (ret != 0) 291 goto error; 292 ret = copy_to_user(buf + 2 * sizeof(__s32) + sizeof(__u64), 293 &cur_op->upcall, 294 sizeof(struct orangefs_upcall_s)); 295 if (ret != 0) 296 goto error; 297 298 spin_lock(&orangefs_htable_ops_in_progress_lock); 299 spin_lock(&cur_op->lock); 300 if (unlikely(op_state_given_up(cur_op))) { 301 spin_unlock(&cur_op->lock); 302 spin_unlock(&orangefs_htable_ops_in_progress_lock); 303 complete(&cur_op->waitq); 304 goto restart; 305 } 306 307 /* 308 * Set the operation to be in progress and move it between lists since 309 * it has been sent to the client. 310 */ 311 set_op_state_inprogress(cur_op); 312 gossip_debug(GOSSIP_DEV_DEBUG, 313 "%s: 1 op:%s: op_state:%d: process:%s:\n", 314 __func__, 315 get_opname_string(cur_op), 316 cur_op->op_state, 317 current->comm); 318 orangefs_devreq_add_op(cur_op); 319 spin_unlock(&cur_op->lock); 320 spin_unlock(&orangefs_htable_ops_in_progress_lock); 321 322 /* The client only asks to read one size buffer. */ 323 return MAX_DEV_REQ_UPSIZE; 324 error: 325 /* 326 * We were unable to copy the op data to the client. Put the op back in 327 * list. If client has crashed, the op will be purged later when the 328 * device is released. 329 */ 330 gossip_err("orangefs: Failed to copy data to user space\n"); 331 spin_lock(&orangefs_request_list_lock); 332 spin_lock(&cur_op->lock); 333 if (likely(!op_state_given_up(cur_op))) { 334 set_op_state_waiting(cur_op); 335 gossip_debug(GOSSIP_DEV_DEBUG, 336 "%s: 2 op:%s: op_state:%d: process:%s:\n", 337 __func__, 338 get_opname_string(cur_op), 339 cur_op->op_state, 340 current->comm); 341 list_add(&cur_op->list, &orangefs_request_list); 342 spin_unlock(&cur_op->lock); 343 } else { 344 spin_unlock(&cur_op->lock); 345 complete(&cur_op->waitq); 346 } 347 spin_unlock(&orangefs_request_list_lock); 348 return -EFAULT; 349 } 350 351 /* 352 * Function for writev() callers into the device. 353 * 354 * Userspace should have written: 355 * - __u32 version 356 * - __u32 magic 357 * - __u64 tag 358 * - struct orangefs_downcall_s 359 * - trailer buffer (in the case of READDIR operations) 360 */ 361 static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb, 362 struct iov_iter *iter) 363 { 364 ssize_t ret; 365 struct orangefs_kernel_op_s *op = NULL; 366 struct { 367 __u32 version; 368 __u32 magic; 369 __u64 tag; 370 } head; 371 int total = ret = iov_iter_count(iter); 372 int downcall_size = sizeof(struct orangefs_downcall_s); 373 int head_size = sizeof(head); 374 375 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n", 376 __func__, 377 total, 378 ret); 379 380 if (total < MAX_DEV_REQ_DOWNSIZE) { 381 gossip_err("%s: total:%d: must be at least:%u:\n", 382 __func__, 383 total, 384 (unsigned int) MAX_DEV_REQ_DOWNSIZE); 385 return -EFAULT; 386 } 387 388 if (!copy_from_iter_full(&head, head_size, iter)) { 389 gossip_err("%s: failed to copy head.\n", __func__); 390 return -EFAULT; 391 } 392 393 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) { 394 gossip_err("%s: userspace claims version" 395 "%d, minimum version required: %d.\n", 396 __func__, 397 head.version, 398 ORANGEFS_MINIMUM_USERSPACE_VERSION); 399 return -EPROTO; 400 } 401 402 if (head.magic != ORANGEFS_DEVREQ_MAGIC) { 403 gossip_err("Error: Device magic number does not match.\n"); 404 return -EPROTO; 405 } 406 407 if (!orangefs_userspace_version) { 408 orangefs_userspace_version = head.version; 409 } else if (orangefs_userspace_version != head.version) { 410 gossip_err("Error: userspace version changes\n"); 411 return -EPROTO; 412 } 413 414 /* remove the op from the in progress hash table */ 415 op = orangefs_devreq_remove_op(head.tag); 416 if (!op) { 417 gossip_debug(GOSSIP_DEV_DEBUG, 418 "%s: No one's waiting for tag %llu\n", 419 __func__, llu(head.tag)); 420 return ret; 421 } 422 423 if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) { 424 gossip_err("%s: failed to copy downcall.\n", __func__); 425 goto Efault; 426 } 427 428 if (op->downcall.status) 429 goto wakeup; 430 431 /* 432 * We've successfully peeled off the head and the downcall. 433 * Something has gone awry if total doesn't equal the 434 * sum of head_size, downcall_size and trailer_size. 435 */ 436 if ((head_size + downcall_size + op->downcall.trailer_size) != total) { 437 gossip_err("%s: funky write, head_size:%d" 438 ": downcall_size:%d: trailer_size:%lld" 439 ": total size:%d:\n", 440 __func__, 441 head_size, 442 downcall_size, 443 op->downcall.trailer_size, 444 total); 445 goto Efault; 446 } 447 448 /* Only READDIR operations should have trailers. */ 449 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) && 450 (op->downcall.trailer_size != 0)) { 451 gossip_err("%s: %x operation with trailer.", 452 __func__, 453 op->downcall.type); 454 goto Efault; 455 } 456 457 /* READDIR operations should always have trailers. */ 458 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) && 459 (op->downcall.trailer_size == 0)) { 460 gossip_err("%s: %x operation with no trailer.", 461 __func__, 462 op->downcall.type); 463 goto Efault; 464 } 465 466 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR) 467 goto wakeup; 468 469 op->downcall.trailer_buf = vzalloc(op->downcall.trailer_size); 470 if (!op->downcall.trailer_buf) 471 goto Enomem; 472 473 if (!copy_from_iter_full(op->downcall.trailer_buf, 474 op->downcall.trailer_size, iter)) { 475 gossip_err("%s: failed to copy trailer.\n", __func__); 476 vfree(op->downcall.trailer_buf); 477 goto Efault; 478 } 479 480 wakeup: 481 /* 482 * Return to vfs waitqueue, and back to service_operation 483 * through wait_for_matching_downcall. 484 */ 485 spin_lock(&op->lock); 486 if (unlikely(op_is_cancel(op))) { 487 spin_unlock(&op->lock); 488 put_cancel(op); 489 } else if (unlikely(op_state_given_up(op))) { 490 spin_unlock(&op->lock); 491 complete(&op->waitq); 492 } else { 493 set_op_state_serviced(op); 494 gossip_debug(GOSSIP_DEV_DEBUG, 495 "%s: op:%s: op_state:%d: process:%s:\n", 496 __func__, 497 get_opname_string(op), 498 op->op_state, 499 current->comm); 500 spin_unlock(&op->lock); 501 } 502 return ret; 503 504 Efault: 505 op->downcall.status = -(ORANGEFS_ERROR_BIT | 9); 506 ret = -EFAULT; 507 goto wakeup; 508 509 Enomem: 510 op->downcall.status = -(ORANGEFS_ERROR_BIT | 8); 511 ret = -ENOMEM; 512 goto wakeup; 513 } 514 515 /* 516 * NOTE: gets called when the last reference to this device is dropped. 517 * Using the open_access_count variable, we enforce a reference count 518 * on this file so that it can be opened by only one process at a time. 519 * the devreq_mutex is used to make sure all i/o has completed 520 * before we call orangefs_bufmap_finalize, and similar such tricky 521 * situations 522 */ 523 static int orangefs_devreq_release(struct inode *inode, struct file *file) 524 { 525 int unmounted = 0; 526 527 gossip_debug(GOSSIP_DEV_DEBUG, 528 "%s:pvfs2-client-core: exiting, closing device\n", 529 __func__); 530 531 mutex_lock(&devreq_mutex); 532 orangefs_bufmap_finalize(); 533 534 open_access_count = -1; 535 536 unmounted = mark_all_pending_mounts(); 537 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n", 538 (unmounted ? "UNMOUNTED" : "MOUNTED")); 539 540 purge_waiting_ops(); 541 purge_inprogress_ops(); 542 543 orangefs_bufmap_run_down(); 544 545 gossip_debug(GOSSIP_DEV_DEBUG, 546 "pvfs2-client-core: device close complete\n"); 547 open_access_count = 0; 548 orangefs_userspace_version = 0; 549 mutex_unlock(&devreq_mutex); 550 return 0; 551 } 552 553 int is_daemon_in_service(void) 554 { 555 int in_service; 556 557 /* 558 * What this function does is checks if client-core is alive 559 * based on the access count we maintain on the device. 560 */ 561 mutex_lock(&devreq_mutex); 562 in_service = open_access_count == 1 ? 0 : -EIO; 563 mutex_unlock(&devreq_mutex); 564 return in_service; 565 } 566 567 bool __is_daemon_in_service(void) 568 { 569 return open_access_count == 1; 570 } 571 572 static inline long check_ioctl_command(unsigned int command) 573 { 574 /* Check for valid ioctl codes */ 575 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) { 576 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n", 577 command, 578 _IOC_TYPE(command), 579 ORANGEFS_DEV_MAGIC); 580 return -EINVAL; 581 } 582 /* and valid ioctl commands */ 583 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) { 584 gossip_err("Invalid ioctl command number [%d >= %d]\n", 585 _IOC_NR(command), ORANGEFS_DEV_MAXNR); 586 return -ENOIOCTLCMD; 587 } 588 return 0; 589 } 590 591 static long dispatch_ioctl_command(unsigned int command, unsigned long arg) 592 { 593 static __s32 magic = ORANGEFS_DEVREQ_MAGIC; 594 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE; 595 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE; 596 struct ORANGEFS_dev_map_desc user_desc; 597 int ret = 0; 598 int upstream_kmod = 1; 599 struct orangefs_sb_info_s *orangefs_sb; 600 601 /* mtmoore: add locking here */ 602 603 switch (command) { 604 case ORANGEFS_DEV_GET_MAGIC: 605 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ? 606 -EIO : 607 0); 608 case ORANGEFS_DEV_GET_MAX_UPSIZE: 609 return ((put_user(max_up_size, 610 (__s32 __user *) arg) == -EFAULT) ? 611 -EIO : 612 0); 613 case ORANGEFS_DEV_GET_MAX_DOWNSIZE: 614 return ((put_user(max_down_size, 615 (__s32 __user *) arg) == -EFAULT) ? 616 -EIO : 617 0); 618 case ORANGEFS_DEV_MAP: 619 ret = copy_from_user(&user_desc, 620 (struct ORANGEFS_dev_map_desc __user *) 621 arg, 622 sizeof(struct ORANGEFS_dev_map_desc)); 623 /* WTF -EIO and not -EFAULT? */ 624 return ret ? -EIO : orangefs_bufmap_initialize(&user_desc); 625 case ORANGEFS_DEV_REMOUNT_ALL: 626 gossip_debug(GOSSIP_DEV_DEBUG, 627 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n", 628 __func__); 629 630 /* 631 * remount all mounted orangefs volumes to regain the lost 632 * dynamic mount tables (if any) -- NOTE: this is done 633 * without keeping the superblock list locked due to the 634 * upcall/downcall waiting. also, the request mutex is 635 * used to ensure that no operations will be serviced until 636 * all of the remounts are serviced (to avoid ops between 637 * mounts to fail) 638 */ 639 ret = mutex_lock_interruptible(&orangefs_request_mutex); 640 if (ret < 0) 641 return ret; 642 gossip_debug(GOSSIP_DEV_DEBUG, 643 "%s: priority remount in progress\n", 644 __func__); 645 spin_lock(&orangefs_superblocks_lock); 646 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { 647 /* 648 * We have to drop the spinlock, so entries can be 649 * removed. They can't be freed, though, so we just 650 * keep the forward pointers and zero the back ones - 651 * that way we can get to the rest of the list. 652 */ 653 if (!orangefs_sb->list.prev) 654 continue; 655 gossip_debug(GOSSIP_DEV_DEBUG, 656 "%s: Remounting SB %p\n", 657 __func__, 658 orangefs_sb); 659 660 spin_unlock(&orangefs_superblocks_lock); 661 ret = orangefs_remount(orangefs_sb); 662 spin_lock(&orangefs_superblocks_lock); 663 if (ret) { 664 gossip_debug(GOSSIP_DEV_DEBUG, 665 "SB %p remount failed\n", 666 orangefs_sb); 667 break; 668 } 669 } 670 spin_unlock(&orangefs_superblocks_lock); 671 gossip_debug(GOSSIP_DEV_DEBUG, 672 "%s: priority remount complete\n", 673 __func__); 674 mutex_unlock(&orangefs_request_mutex); 675 return ret; 676 677 case ORANGEFS_DEV_UPSTREAM: 678 ret = copy_to_user((void __user *)arg, 679 &upstream_kmod, 680 sizeof(upstream_kmod)); 681 682 if (ret != 0) 683 return -EIO; 684 else 685 return ret; 686 687 case ORANGEFS_DEV_CLIENT_MASK: 688 return orangefs_debugfs_new_client_mask((void __user *)arg); 689 case ORANGEFS_DEV_CLIENT_STRING: 690 return orangefs_debugfs_new_client_string((void __user *)arg); 691 case ORANGEFS_DEV_DEBUG: 692 return orangefs_debugfs_new_debug((void __user *)arg); 693 default: 694 return -ENOIOCTLCMD; 695 } 696 return -ENOIOCTLCMD; 697 } 698 699 static long orangefs_devreq_ioctl(struct file *file, 700 unsigned int command, unsigned long arg) 701 { 702 long ret; 703 704 /* Check for properly constructed commands */ 705 ret = check_ioctl_command(command); 706 if (ret < 0) 707 return (int)ret; 708 709 return (int)dispatch_ioctl_command(command, arg); 710 } 711 712 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ 713 714 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */ 715 struct ORANGEFS_dev_map_desc32 { 716 compat_uptr_t ptr; 717 __s32 total_size; 718 __s32 size; 719 __s32 count; 720 }; 721 722 /* 723 * 32 bit user-space apps' ioctl handlers when kernel modules 724 * is compiled as a 64 bit one 725 */ 726 static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd, 727 unsigned long args) 728 { 729 long ret; 730 731 /* Check for properly constructed commands */ 732 ret = check_ioctl_command(cmd); 733 if (ret < 0) 734 return ret; 735 if (cmd == ORANGEFS_DEV_MAP) { 736 struct ORANGEFS_dev_map_desc desc; 737 struct ORANGEFS_dev_map_desc32 d32; 738 739 if (copy_from_user(&d32, (void __user *)args, sizeof(d32))) 740 return -EFAULT; 741 742 desc.ptr = compat_ptr(d32.ptr); 743 desc.total_size = d32.total_size; 744 desc.size = d32.size; 745 desc.count = d32.count; 746 return orangefs_bufmap_initialize(&desc); 747 } 748 /* no other ioctl requires translation */ 749 return dispatch_ioctl_command(cmd, args); 750 } 751 752 #endif /* CONFIG_COMPAT is in .config */ 753 754 static __poll_t orangefs_devreq_poll(struct file *file, 755 struct poll_table_struct *poll_table) 756 { 757 __poll_t poll_revent_mask = 0; 758 759 poll_wait(file, &orangefs_request_list_waitq, poll_table); 760 761 if (!list_empty(&orangefs_request_list)) 762 poll_revent_mask |= EPOLLIN; 763 return poll_revent_mask; 764 } 765 766 /* the assigned character device major number */ 767 static int orangefs_dev_major; 768 769 static const struct file_operations orangefs_devreq_file_operations = { 770 .owner = THIS_MODULE, 771 .read = orangefs_devreq_read, 772 .write_iter = orangefs_devreq_write_iter, 773 .open = orangefs_devreq_open, 774 .release = orangefs_devreq_release, 775 .unlocked_ioctl = orangefs_devreq_ioctl, 776 777 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ 778 .compat_ioctl = orangefs_devreq_compat_ioctl, 779 #endif 780 .poll = orangefs_devreq_poll 781 }; 782 783 /* 784 * Initialize orangefs device specific state: 785 * Must be called at module load time only 786 */ 787 int orangefs_dev_init(void) 788 { 789 /* register orangefs-req device */ 790 orangefs_dev_major = register_chrdev(0, 791 ORANGEFS_REQDEVICE_NAME, 792 &orangefs_devreq_file_operations); 793 if (orangefs_dev_major < 0) { 794 gossip_debug(GOSSIP_DEV_DEBUG, 795 "Failed to register /dev/%s (error %d)\n", 796 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); 797 return orangefs_dev_major; 798 } 799 800 gossip_debug(GOSSIP_DEV_DEBUG, 801 "*** /dev/%s character device registered ***\n", 802 ORANGEFS_REQDEVICE_NAME); 803 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n", 804 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); 805 return 0; 806 } 807 808 void orangefs_dev_cleanup(void) 809 { 810 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME); 811 gossip_debug(GOSSIP_DEV_DEBUG, 812 "*** /dev/%s character device unregistered ***\n", 813 ORANGEFS_REQDEVICE_NAME); 814 } 815