1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * umh - the kernel usermode helper 4 */ 5 #include <linux/module.h> 6 #include <linux/sched.h> 7 #include <linux/sched/task.h> 8 #include <linux/binfmts.h> 9 #include <linux/syscalls.h> 10 #include <linux/unistd.h> 11 #include <linux/kmod.h> 12 #include <linux/slab.h> 13 #include <linux/completion.h> 14 #include <linux/cred.h> 15 #include <linux/file.h> 16 #include <linux/fdtable.h> 17 #include <linux/workqueue.h> 18 #include <linux/security.h> 19 #include <linux/mount.h> 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/resource.h> 23 #include <linux/notifier.h> 24 #include <linux/suspend.h> 25 #include <linux/rwsem.h> 26 #include <linux/ptrace.h> 27 #include <linux/async.h> 28 #include <linux/uaccess.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/pipe_fs_i.h> 31 32 #include <trace/events/module.h> 33 34 #define CAP_BSET (void *)1 35 #define CAP_PI (void *)2 36 37 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; 38 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 39 static DEFINE_SPINLOCK(umh_sysctl_lock); 40 static DECLARE_RWSEM(umhelper_sem); 41 static LIST_HEAD(umh_list); 42 static DEFINE_MUTEX(umh_list_lock); 43 44 static void call_usermodehelper_freeinfo(struct subprocess_info *info) 45 { 46 if (info->cleanup) 47 (*info->cleanup)(info); 48 kfree(info); 49 } 50 51 static void umh_complete(struct subprocess_info *sub_info) 52 { 53 struct completion *comp = xchg(&sub_info->complete, NULL); 54 /* 55 * See call_usermodehelper_exec(). If xchg() returns NULL 56 * we own sub_info, the UMH_KILLABLE caller has gone away 57 * or the caller used UMH_NO_WAIT. 58 */ 59 if (comp) 60 complete(comp); 61 else 62 call_usermodehelper_freeinfo(sub_info); 63 } 64 65 /* 66 * This is the task which runs the usermode application 67 */ 68 static int call_usermodehelper_exec_async(void *data) 69 { 70 struct subprocess_info *sub_info = data; 71 struct cred *new; 72 int retval; 73 74 spin_lock_irq(¤t->sighand->siglock); 75 flush_signal_handlers(current, 1); 76 spin_unlock_irq(¤t->sighand->siglock); 77 78 /* 79 * Our parent (unbound workqueue) runs with elevated scheduling 80 * priority. Avoid propagating that into the userspace child. 81 */ 82 set_user_nice(current, 0); 83 84 retval = -ENOMEM; 85 new = prepare_kernel_cred(current); 86 if (!new) 87 goto out; 88 89 spin_lock(&umh_sysctl_lock); 90 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); 91 new->cap_inheritable = cap_intersect(usermodehelper_inheritable, 92 new->cap_inheritable); 93 spin_unlock(&umh_sysctl_lock); 94 95 if (sub_info->init) { 96 retval = sub_info->init(sub_info, new); 97 if (retval) { 98 abort_creds(new); 99 goto out; 100 } 101 } 102 103 commit_creds(new); 104 105 sub_info->pid = task_pid_nr(current); 106 if (sub_info->file) { 107 retval = do_execve_file(sub_info->file, 108 sub_info->argv, sub_info->envp); 109 if (!retval) 110 current->flags |= PF_UMH; 111 } else 112 retval = do_execve(getname_kernel(sub_info->path), 113 (const char __user *const __user *)sub_info->argv, 114 (const char __user *const __user *)sub_info->envp); 115 out: 116 sub_info->retval = retval; 117 /* 118 * call_usermodehelper_exec_sync() will call umh_complete 119 * if UHM_WAIT_PROC. 120 */ 121 if (!(sub_info->wait & UMH_WAIT_PROC)) 122 umh_complete(sub_info); 123 if (!retval) 124 return 0; 125 do_exit(0); 126 } 127 128 /* Handles UMH_WAIT_PROC. */ 129 static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info) 130 { 131 pid_t pid; 132 133 /* If SIGCLD is ignored kernel_wait4 won't populate the status. */ 134 kernel_sigaction(SIGCHLD, SIG_DFL); 135 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD); 136 if (pid < 0) { 137 sub_info->retval = pid; 138 } else { 139 int ret = -ECHILD; 140 /* 141 * Normally it is bogus to call wait4() from in-kernel because 142 * wait4() wants to write the exit code to a userspace address. 143 * But call_usermodehelper_exec_sync() always runs as kernel 144 * thread (workqueue) and put_user() to a kernel address works 145 * OK for kernel threads, due to their having an mm_segment_t 146 * which spans the entire address space. 147 * 148 * Thus the __user pointer cast is valid here. 149 */ 150 kernel_wait4(pid, (int __user *)&ret, 0, NULL); 151 152 /* 153 * If ret is 0, either call_usermodehelper_exec_async failed and 154 * the real error code is already in sub_info->retval or 155 * sub_info->retval is 0 anyway, so don't mess with it then. 156 */ 157 if (ret) 158 sub_info->retval = ret; 159 } 160 161 /* Restore default kernel sig handler */ 162 kernel_sigaction(SIGCHLD, SIG_IGN); 163 164 umh_complete(sub_info); 165 } 166 167 /* 168 * We need to create the usermodehelper kernel thread from a task that is affine 169 * to an optimized set of CPUs (or nohz housekeeping ones) such that they 170 * inherit a widest affinity irrespective of call_usermodehelper() callers with 171 * possibly reduced affinity (eg: per-cpu workqueues). We don't want 172 * usermodehelper targets to contend a busy CPU. 173 * 174 * Unbound workqueues provide such wide affinity and allow to block on 175 * UMH_WAIT_PROC requests without blocking pending request (up to some limit). 176 * 177 * Besides, workqueues provide the privilege level that caller might not have 178 * to perform the usermodehelper request. 179 * 180 */ 181 static void call_usermodehelper_exec_work(struct work_struct *work) 182 { 183 struct subprocess_info *sub_info = 184 container_of(work, struct subprocess_info, work); 185 186 if (sub_info->wait & UMH_WAIT_PROC) { 187 call_usermodehelper_exec_sync(sub_info); 188 } else { 189 pid_t pid; 190 /* 191 * Use CLONE_PARENT to reparent it to kthreadd; we do not 192 * want to pollute current->children, and we need a parent 193 * that always ignores SIGCHLD to ensure auto-reaping. 194 */ 195 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, 196 CLONE_PARENT | SIGCHLD); 197 if (pid < 0) { 198 sub_info->retval = pid; 199 umh_complete(sub_info); 200 } 201 } 202 } 203 204 /* 205 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY 206 * (used for preventing user land processes from being created after the user 207 * land has been frozen during a system-wide hibernation or suspend operation). 208 * Should always be manipulated under umhelper_sem acquired for write. 209 */ 210 static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; 211 212 /* Number of helpers running */ 213 static atomic_t running_helpers = ATOMIC_INIT(0); 214 215 /* 216 * Wait queue head used by usermodehelper_disable() to wait for all running 217 * helpers to finish. 218 */ 219 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); 220 221 /* 222 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled 223 * to become 'false'. 224 */ 225 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); 226 227 /* 228 * Time to wait for running_helpers to become zero before the setting of 229 * usermodehelper_disabled in usermodehelper_disable() fails 230 */ 231 #define RUNNING_HELPERS_TIMEOUT (5 * HZ) 232 233 int usermodehelper_read_trylock(void) 234 { 235 DEFINE_WAIT(wait); 236 int ret = 0; 237 238 down_read(&umhelper_sem); 239 for (;;) { 240 prepare_to_wait(&usermodehelper_disabled_waitq, &wait, 241 TASK_INTERRUPTIBLE); 242 if (!usermodehelper_disabled) 243 break; 244 245 if (usermodehelper_disabled == UMH_DISABLED) 246 ret = -EAGAIN; 247 248 up_read(&umhelper_sem); 249 250 if (ret) 251 break; 252 253 schedule(); 254 try_to_freeze(); 255 256 down_read(&umhelper_sem); 257 } 258 finish_wait(&usermodehelper_disabled_waitq, &wait); 259 return ret; 260 } 261 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); 262 263 long usermodehelper_read_lock_wait(long timeout) 264 { 265 DEFINE_WAIT(wait); 266 267 if (timeout < 0) 268 return -EINVAL; 269 270 down_read(&umhelper_sem); 271 for (;;) { 272 prepare_to_wait(&usermodehelper_disabled_waitq, &wait, 273 TASK_UNINTERRUPTIBLE); 274 if (!usermodehelper_disabled) 275 break; 276 277 up_read(&umhelper_sem); 278 279 timeout = schedule_timeout(timeout); 280 if (!timeout) 281 break; 282 283 down_read(&umhelper_sem); 284 } 285 finish_wait(&usermodehelper_disabled_waitq, &wait); 286 return timeout; 287 } 288 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); 289 290 void usermodehelper_read_unlock(void) 291 { 292 up_read(&umhelper_sem); 293 } 294 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); 295 296 /** 297 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. 298 * @depth: New value to assign to usermodehelper_disabled. 299 * 300 * Change the value of usermodehelper_disabled (under umhelper_sem locked for 301 * writing) and wakeup tasks waiting for it to change. 302 */ 303 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) 304 { 305 down_write(&umhelper_sem); 306 usermodehelper_disabled = depth; 307 wake_up(&usermodehelper_disabled_waitq); 308 up_write(&umhelper_sem); 309 } 310 311 /** 312 * __usermodehelper_disable - Prevent new helpers from being started. 313 * @depth: New value to assign to usermodehelper_disabled. 314 * 315 * Set usermodehelper_disabled to @depth and wait for running helpers to exit. 316 */ 317 int __usermodehelper_disable(enum umh_disable_depth depth) 318 { 319 long retval; 320 321 if (!depth) 322 return -EINVAL; 323 324 down_write(&umhelper_sem); 325 usermodehelper_disabled = depth; 326 up_write(&umhelper_sem); 327 328 /* 329 * From now on call_usermodehelper_exec() won't start any new 330 * helpers, so it is sufficient if running_helpers turns out to 331 * be zero at one point (it may be increased later, but that 332 * doesn't matter). 333 */ 334 retval = wait_event_timeout(running_helpers_waitq, 335 atomic_read(&running_helpers) == 0, 336 RUNNING_HELPERS_TIMEOUT); 337 if (retval) 338 return 0; 339 340 __usermodehelper_set_disable_depth(UMH_ENABLED); 341 return -EAGAIN; 342 } 343 344 static void helper_lock(void) 345 { 346 atomic_inc(&running_helpers); 347 smp_mb__after_atomic(); 348 } 349 350 static void helper_unlock(void) 351 { 352 if (atomic_dec_and_test(&running_helpers)) 353 wake_up(&running_helpers_waitq); 354 } 355 356 /** 357 * call_usermodehelper_setup - prepare to call a usermode helper 358 * @path: path to usermode executable 359 * @argv: arg vector for process 360 * @envp: environment for process 361 * @gfp_mask: gfp mask for memory allocation 362 * @cleanup: a cleanup function 363 * @init: an init function 364 * @data: arbitrary context sensitive data 365 * 366 * Returns either %NULL on allocation failure, or a subprocess_info 367 * structure. This should be passed to call_usermodehelper_exec to 368 * exec the process and free the structure. 369 * 370 * The init function is used to customize the helper process prior to 371 * exec. A non-zero return code causes the process to error out, exit, 372 * and return the failure to the calling process 373 * 374 * The cleanup function is just before ethe subprocess_info is about to 375 * be freed. This can be used for freeing the argv and envp. The 376 * Function must be runnable in either a process context or the 377 * context in which call_usermodehelper_exec is called. 378 */ 379 struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv, 380 char **envp, gfp_t gfp_mask, 381 int (*init)(struct subprocess_info *info, struct cred *new), 382 void (*cleanup)(struct subprocess_info *info), 383 void *data) 384 { 385 struct subprocess_info *sub_info; 386 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); 387 if (!sub_info) 388 goto out; 389 390 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); 391 392 #ifdef CONFIG_STATIC_USERMODEHELPER 393 sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH; 394 #else 395 sub_info->path = path; 396 #endif 397 sub_info->argv = argv; 398 sub_info->envp = envp; 399 400 sub_info->cleanup = cleanup; 401 sub_info->init = init; 402 sub_info->data = data; 403 out: 404 return sub_info; 405 } 406 EXPORT_SYMBOL(call_usermodehelper_setup); 407 408 struct subprocess_info *call_usermodehelper_setup_file(struct file *file, 409 int (*init)(struct subprocess_info *info, struct cred *new), 410 void (*cleanup)(struct subprocess_info *info), void *data) 411 { 412 struct subprocess_info *sub_info; 413 struct umh_info *info = data; 414 const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; 415 416 sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL); 417 if (!sub_info) 418 return NULL; 419 420 sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL); 421 if (!sub_info->argv) { 422 kfree(sub_info); 423 return NULL; 424 } 425 426 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); 427 sub_info->path = "none"; 428 sub_info->file = file; 429 sub_info->init = init; 430 sub_info->cleanup = cleanup; 431 sub_info->data = data; 432 return sub_info; 433 } 434 435 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) 436 { 437 struct umh_info *umh_info = info->data; 438 struct file *from_umh[2]; 439 struct file *to_umh[2]; 440 int err; 441 442 /* create pipe to send data to umh */ 443 err = create_pipe_files(to_umh, 0); 444 if (err) 445 return err; 446 err = replace_fd(0, to_umh[0], 0); 447 fput(to_umh[0]); 448 if (err < 0) { 449 fput(to_umh[1]); 450 return err; 451 } 452 453 /* create pipe to receive data from umh */ 454 err = create_pipe_files(from_umh, 0); 455 if (err) { 456 fput(to_umh[1]); 457 replace_fd(0, NULL, 0); 458 return err; 459 } 460 err = replace_fd(1, from_umh[1], 0); 461 fput(from_umh[1]); 462 if (err < 0) { 463 fput(to_umh[1]); 464 replace_fd(0, NULL, 0); 465 fput(from_umh[0]); 466 return err; 467 } 468 469 umh_info->pipe_to_umh = to_umh[1]; 470 umh_info->pipe_from_umh = from_umh[0]; 471 return 0; 472 } 473 474 static void umh_clean_and_save_pid(struct subprocess_info *info) 475 { 476 struct umh_info *umh_info = info->data; 477 478 /* cleanup if umh_pipe_setup() was successful but exec failed */ 479 if (info->pid && info->retval) { 480 fput(umh_info->pipe_to_umh); 481 fput(umh_info->pipe_from_umh); 482 } 483 484 argv_free(info->argv); 485 umh_info->pid = info->pid; 486 } 487 488 /** 489 * fork_usermode_blob - fork a blob of bytes as a usermode process 490 * @data: a blob of bytes that can be do_execv-ed as a file 491 * @len: length of the blob 492 * @info: information about usermode process (shouldn't be NULL) 493 * 494 * If info->cmdline is set it will be used as command line for the 495 * user process, else "usermodehelper" is used. 496 * 497 * Returns either negative error or zero which indicates success 498 * in executing a blob of bytes as a usermode process. In such 499 * case 'struct umh_info *info' is populated with two pipes 500 * and a pid of the process. The caller is responsible for health 501 * check of the user process, killing it via pid, and closing the 502 * pipes when user process is no longer needed. 503 */ 504 int fork_usermode_blob(void *data, size_t len, struct umh_info *info) 505 { 506 struct subprocess_info *sub_info; 507 struct file *file; 508 ssize_t written; 509 loff_t pos = 0; 510 int err; 511 512 file = shmem_kernel_file_setup("", len, 0); 513 if (IS_ERR(file)) 514 return PTR_ERR(file); 515 516 written = kernel_write(file, data, len, &pos); 517 if (written != len) { 518 err = written; 519 if (err >= 0) 520 err = -ENOMEM; 521 goto out; 522 } 523 524 err = -ENOMEM; 525 sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup, 526 umh_clean_and_save_pid, info); 527 if (!sub_info) 528 goto out; 529 530 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); 531 if (!err) { 532 mutex_lock(&umh_list_lock); 533 list_add(&info->list, &umh_list); 534 mutex_unlock(&umh_list_lock); 535 } 536 out: 537 fput(file); 538 return err; 539 } 540 EXPORT_SYMBOL_GPL(fork_usermode_blob); 541 542 /** 543 * call_usermodehelper_exec - start a usermode application 544 * @sub_info: information about the subprocessa 545 * @wait: wait for the application to finish and return status. 546 * when UMH_NO_WAIT don't wait at all, but you get no useful error back 547 * when the program couldn't be exec'ed. This makes it safe to call 548 * from interrupt context. 549 * 550 * Runs a user-space application. The application is started 551 * asynchronously if wait is not set, and runs as a child of system workqueues. 552 * (ie. it runs with full root capabilities and optimized affinity). 553 * 554 * Note: successful return value does not guarantee the helper was called at 555 * all. You can't rely on sub_info->{init,cleanup} being called even for 556 * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers 557 * into a successful no-op. 558 */ 559 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) 560 { 561 DECLARE_COMPLETION_ONSTACK(done); 562 int retval = 0; 563 564 if (!sub_info->path) { 565 call_usermodehelper_freeinfo(sub_info); 566 return -EINVAL; 567 } 568 helper_lock(); 569 if (usermodehelper_disabled) { 570 retval = -EBUSY; 571 goto out; 572 } 573 574 /* 575 * If there is no binary for us to call, then just return and get out of 576 * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and 577 * disable all call_usermodehelper() calls. 578 */ 579 if (strlen(sub_info->path) == 0) 580 goto out; 581 582 /* 583 * Set the completion pointer only if there is a waiter. 584 * This makes it possible to use umh_complete to free 585 * the data structure in case of UMH_NO_WAIT. 586 */ 587 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; 588 sub_info->wait = wait; 589 590 queue_work(system_unbound_wq, &sub_info->work); 591 if (wait == UMH_NO_WAIT) /* task has freed sub_info */ 592 goto unlock; 593 594 if (wait & UMH_KILLABLE) { 595 retval = wait_for_completion_killable(&done); 596 if (!retval) 597 goto wait_done; 598 599 /* umh_complete() will see NULL and free sub_info */ 600 if (xchg(&sub_info->complete, NULL)) 601 goto unlock; 602 /* fallthrough, umh_complete() was already called */ 603 } 604 605 wait_for_completion(&done); 606 wait_done: 607 retval = sub_info->retval; 608 out: 609 call_usermodehelper_freeinfo(sub_info); 610 unlock: 611 helper_unlock(); 612 return retval; 613 } 614 EXPORT_SYMBOL(call_usermodehelper_exec); 615 616 /** 617 * call_usermodehelper() - prepare and start a usermode application 618 * @path: path to usermode executable 619 * @argv: arg vector for process 620 * @envp: environment for process 621 * @wait: wait for the application to finish and return status. 622 * when UMH_NO_WAIT don't wait at all, but you get no useful error back 623 * when the program couldn't be exec'ed. This makes it safe to call 624 * from interrupt context. 625 * 626 * This function is the equivalent to use call_usermodehelper_setup() and 627 * call_usermodehelper_exec(). 628 */ 629 int call_usermodehelper(const char *path, char **argv, char **envp, int wait) 630 { 631 struct subprocess_info *info; 632 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 633 634 info = call_usermodehelper_setup(path, argv, envp, gfp_mask, 635 NULL, NULL, NULL); 636 if (info == NULL) 637 return -ENOMEM; 638 639 return call_usermodehelper_exec(info, wait); 640 } 641 EXPORT_SYMBOL(call_usermodehelper); 642 643 static int proc_cap_handler(struct ctl_table *table, int write, 644 void *buffer, size_t *lenp, loff_t *ppos) 645 { 646 struct ctl_table t; 647 unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; 648 kernel_cap_t new_cap; 649 int err, i; 650 651 if (write && (!capable(CAP_SETPCAP) || 652 !capable(CAP_SYS_MODULE))) 653 return -EPERM; 654 655 /* 656 * convert from the global kernel_cap_t to the ulong array to print to 657 * userspace if this is a read. 658 */ 659 spin_lock(&umh_sysctl_lock); 660 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { 661 if (table->data == CAP_BSET) 662 cap_array[i] = usermodehelper_bset.cap[i]; 663 else if (table->data == CAP_PI) 664 cap_array[i] = usermodehelper_inheritable.cap[i]; 665 else 666 BUG(); 667 } 668 spin_unlock(&umh_sysctl_lock); 669 670 t = *table; 671 t.data = &cap_array; 672 673 /* 674 * actually read or write and array of ulongs from userspace. Remember 675 * these are least significant 32 bits first 676 */ 677 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); 678 if (err < 0) 679 return err; 680 681 /* 682 * convert from the sysctl array of ulongs to the kernel_cap_t 683 * internal representation 684 */ 685 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) 686 new_cap.cap[i] = cap_array[i]; 687 688 /* 689 * Drop everything not in the new_cap (but don't add things) 690 */ 691 if (write) { 692 spin_lock(&umh_sysctl_lock); 693 if (table->data == CAP_BSET) 694 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); 695 if (table->data == CAP_PI) 696 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); 697 spin_unlock(&umh_sysctl_lock); 698 } 699 700 return 0; 701 } 702 703 void __exit_umh(struct task_struct *tsk) 704 { 705 struct umh_info *info; 706 pid_t pid = tsk->pid; 707 708 mutex_lock(&umh_list_lock); 709 list_for_each_entry(info, &umh_list, list) { 710 if (info->pid == pid) { 711 list_del(&info->list); 712 mutex_unlock(&umh_list_lock); 713 goto out; 714 } 715 } 716 mutex_unlock(&umh_list_lock); 717 return; 718 out: 719 if (info->cleanup) 720 info->cleanup(info); 721 } 722 723 struct ctl_table usermodehelper_table[] = { 724 { 725 .procname = "bset", 726 .data = CAP_BSET, 727 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), 728 .mode = 0600, 729 .proc_handler = proc_cap_handler, 730 }, 731 { 732 .procname = "inheritable", 733 .data = CAP_PI, 734 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), 735 .mode = 0600, 736 .proc_handler = proc_cap_handler, 737 }, 738 { } 739 }; 740