xref: /openbmc/linux/arch/arm/kernel/signal.c (revision ee89bd6b)
1 /*
2  *  linux/arch/arm/kernel/signal.c
3  *
4  *  Copyright (C) 1995-2009 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/uaccess.h>
14 #include <linux/tracehook.h>
15 
16 #include <asm/elf.h>
17 #include <asm/cacheflush.h>
18 #include <asm/ucontext.h>
19 #include <asm/unistd.h>
20 #include <asm/vfp.h>
21 
22 #include "signal.h"
23 
24 /*
25  * For ARM syscalls, we encode the syscall number into the instruction.
26  */
27 #define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
28 #define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
29 
30 /*
31  * With EABI, the syscall number has to be loaded into r7.
32  */
33 #define MOV_R7_NR_SIGRETURN	(0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
34 #define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
35 
36 /*
37  * For Thumb syscalls, we pass the syscall number via r7.  We therefore
38  * need two 16-bit instructions.
39  */
40 #define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41 #define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
42 
43 const unsigned long sigreturn_codes[7] = {
44 	MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
45 	MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
46 };
47 
48 #ifdef CONFIG_CRUNCH
49 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
50 {
51 	char kbuf[sizeof(*frame) + 8];
52 	struct crunch_sigframe *kframe;
53 
54 	/* the crunch context must be 64 bit aligned */
55 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
56 	kframe->magic = CRUNCH_MAGIC;
57 	kframe->size = CRUNCH_STORAGE_SIZE;
58 	crunch_task_copy(current_thread_info(), &kframe->storage);
59 	return __copy_to_user(frame, kframe, sizeof(*frame));
60 }
61 
62 static int restore_crunch_context(struct crunch_sigframe __user *frame)
63 {
64 	char kbuf[sizeof(*frame) + 8];
65 	struct crunch_sigframe *kframe;
66 
67 	/* the crunch context must be 64 bit aligned */
68 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
69 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
70 		return -1;
71 	if (kframe->magic != CRUNCH_MAGIC ||
72 	    kframe->size != CRUNCH_STORAGE_SIZE)
73 		return -1;
74 	crunch_task_restore(current_thread_info(), &kframe->storage);
75 	return 0;
76 }
77 #endif
78 
79 #ifdef CONFIG_IWMMXT
80 
81 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
82 {
83 	char kbuf[sizeof(*frame) + 8];
84 	struct iwmmxt_sigframe *kframe;
85 
86 	/* the iWMMXt context must be 64 bit aligned */
87 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
88 	kframe->magic = IWMMXT_MAGIC;
89 	kframe->size = IWMMXT_STORAGE_SIZE;
90 	iwmmxt_task_copy(current_thread_info(), &kframe->storage);
91 	return __copy_to_user(frame, kframe, sizeof(*frame));
92 }
93 
94 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
95 {
96 	char kbuf[sizeof(*frame) + 8];
97 	struct iwmmxt_sigframe *kframe;
98 
99 	/* the iWMMXt context must be 64 bit aligned */
100 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
101 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
102 		return -1;
103 	if (kframe->magic != IWMMXT_MAGIC ||
104 	    kframe->size != IWMMXT_STORAGE_SIZE)
105 		return -1;
106 	iwmmxt_task_restore(current_thread_info(), &kframe->storage);
107 	return 0;
108 }
109 
110 #endif
111 
112 #ifdef CONFIG_VFP
113 
114 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
115 {
116 	const unsigned long magic = VFP_MAGIC;
117 	const unsigned long size = VFP_STORAGE_SIZE;
118 	int err = 0;
119 
120 	__put_user_error(magic, &frame->magic, err);
121 	__put_user_error(size, &frame->size, err);
122 
123 	if (err)
124 		return -EFAULT;
125 
126 	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
127 }
128 
129 static int restore_vfp_context(struct vfp_sigframe __user *frame)
130 {
131 	unsigned long magic;
132 	unsigned long size;
133 	int err = 0;
134 
135 	__get_user_error(magic, &frame->magic, err);
136 	__get_user_error(size, &frame->size, err);
137 
138 	if (err)
139 		return -EFAULT;
140 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
141 		return -EINVAL;
142 
143 	return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
144 }
145 
146 #endif
147 
148 /*
149  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
150  */
151 struct sigframe {
152 	struct ucontext uc;
153 	unsigned long retcode[2];
154 };
155 
156 struct rt_sigframe {
157 	struct siginfo info;
158 	struct sigframe sig;
159 };
160 
161 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
162 {
163 	struct aux_sigframe __user *aux;
164 	sigset_t set;
165 	int err;
166 
167 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
168 	if (err == 0)
169 		set_current_blocked(&set);
170 
171 	__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
172 	__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
173 	__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
174 	__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
175 	__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
176 	__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
177 	__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
178 	__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
179 	__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
180 	__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
181 	__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
182 	__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
183 	__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
184 	__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
185 	__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
186 	__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
187 	__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
188 
189 	err |= !valid_user_regs(regs);
190 
191 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
192 #ifdef CONFIG_CRUNCH
193 	if (err == 0)
194 		err |= restore_crunch_context(&aux->crunch);
195 #endif
196 #ifdef CONFIG_IWMMXT
197 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
198 		err |= restore_iwmmxt_context(&aux->iwmmxt);
199 #endif
200 #ifdef CONFIG_VFP
201 	if (err == 0)
202 		err |= restore_vfp_context(&aux->vfp);
203 #endif
204 
205 	return err;
206 }
207 
208 asmlinkage int sys_sigreturn(struct pt_regs *regs)
209 {
210 	struct sigframe __user *frame;
211 
212 	/* Always make any pending restarted system calls return -EINTR */
213 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
214 
215 	/*
216 	 * Since we stacked the signal on a 64-bit boundary,
217 	 * then 'sp' should be word aligned here.  If it's
218 	 * not, then the user is trying to mess with us.
219 	 */
220 	if (regs->ARM_sp & 7)
221 		goto badframe;
222 
223 	frame = (struct sigframe __user *)regs->ARM_sp;
224 
225 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
226 		goto badframe;
227 
228 	if (restore_sigframe(regs, frame))
229 		goto badframe;
230 
231 	return regs->ARM_r0;
232 
233 badframe:
234 	force_sig(SIGSEGV, current);
235 	return 0;
236 }
237 
238 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
239 {
240 	struct rt_sigframe __user *frame;
241 
242 	/* Always make any pending restarted system calls return -EINTR */
243 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
244 
245 	/*
246 	 * Since we stacked the signal on a 64-bit boundary,
247 	 * then 'sp' should be word aligned here.  If it's
248 	 * not, then the user is trying to mess with us.
249 	 */
250 	if (regs->ARM_sp & 7)
251 		goto badframe;
252 
253 	frame = (struct rt_sigframe __user *)regs->ARM_sp;
254 
255 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
256 		goto badframe;
257 
258 	if (restore_sigframe(regs, &frame->sig))
259 		goto badframe;
260 
261 	if (restore_altstack(&frame->sig.uc.uc_stack))
262 		goto badframe;
263 
264 	return regs->ARM_r0;
265 
266 badframe:
267 	force_sig(SIGSEGV, current);
268 	return 0;
269 }
270 
271 static int
272 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
273 {
274 	struct aux_sigframe __user *aux;
275 	int err = 0;
276 
277 	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
278 	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
279 	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
280 	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
281 	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
282 	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
283 	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
284 	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
285 	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
286 	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
287 	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
288 	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
289 	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
290 	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
291 	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
292 	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
293 	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
294 
295 	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
296 	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
297 	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
298 	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
299 
300 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
301 
302 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
303 #ifdef CONFIG_CRUNCH
304 	if (err == 0)
305 		err |= preserve_crunch_context(&aux->crunch);
306 #endif
307 #ifdef CONFIG_IWMMXT
308 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
309 		err |= preserve_iwmmxt_context(&aux->iwmmxt);
310 #endif
311 #ifdef CONFIG_VFP
312 	if (err == 0)
313 		err |= preserve_vfp_context(&aux->vfp);
314 #endif
315 	__put_user_error(0, &aux->end_magic, err);
316 
317 	return err;
318 }
319 
320 static inline void __user *
321 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
322 {
323 	unsigned long sp = sigsp(regs->ARM_sp, ksig);
324 	void __user *frame;
325 
326 	/*
327 	 * ATPCS B01 mandates 8-byte alignment
328 	 */
329 	frame = (void __user *)((sp - framesize) & ~7);
330 
331 	/*
332 	 * Check that we can actually write to the signal frame.
333 	 */
334 	if (!access_ok(VERIFY_WRITE, frame, framesize))
335 		frame = NULL;
336 
337 	return frame;
338 }
339 
340 /*
341  * translate the signal
342  */
343 static inline int map_sig(int sig)
344 {
345 	struct thread_info *thread = current_thread_info();
346 	if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
347 		sig = thread->exec_domain->signal_invmap[sig];
348 	return sig;
349 }
350 
351 static int
352 setup_return(struct pt_regs *regs, struct ksignal *ksig,
353 	     unsigned long __user *rc, void __user *frame)
354 {
355 	unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
356 	unsigned long retcode;
357 	int thumb = 0;
358 	unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
359 
360 	cpsr |= PSR_ENDSTATE;
361 
362 	/*
363 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
364 	 */
365 	if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
366 		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
367 
368 #ifdef CONFIG_ARM_THUMB
369 	if (elf_hwcap & HWCAP_THUMB) {
370 		/*
371 		 * The LSB of the handler determines if we're going to
372 		 * be using THUMB or ARM mode for this signal handler.
373 		 */
374 		thumb = handler & 1;
375 
376 		if (thumb) {
377 			cpsr |= PSR_T_BIT;
378 #if __LINUX_ARM_ARCH__ >= 7
379 			/* clear the If-Then Thumb-2 execution state */
380 			cpsr &= ~PSR_IT_MASK;
381 #endif
382 		} else
383 			cpsr &= ~PSR_T_BIT;
384 	}
385 #endif
386 
387 	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
388 		retcode = (unsigned long)ksig->ka.sa.sa_restorer;
389 	} else {
390 		unsigned int idx = thumb << 1;
391 
392 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
393 			idx += 3;
394 
395 		if (__put_user(sigreturn_codes[idx],   rc) ||
396 		    __put_user(sigreturn_codes[idx+1], rc+1))
397 			return 1;
398 
399 		if (cpsr & MODE32_BIT) {
400 			/*
401 			 * 32-bit code can use the new high-page
402 			 * signal return code support.
403 			 */
404 			retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
405 		} else {
406 			/*
407 			 * Ensure that the instruction cache sees
408 			 * the return code written onto the stack.
409 			 */
410 			flush_icache_range((unsigned long)rc,
411 					   (unsigned long)(rc + 2));
412 
413 			retcode = ((unsigned long)rc) + thumb;
414 		}
415 	}
416 
417 	regs->ARM_r0 = map_sig(ksig->sig);
418 	regs->ARM_sp = (unsigned long)frame;
419 	regs->ARM_lr = retcode;
420 	regs->ARM_pc = handler;
421 	regs->ARM_cpsr = cpsr;
422 
423 	return 0;
424 }
425 
426 static int
427 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
428 {
429 	struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
430 	int err = 0;
431 
432 	if (!frame)
433 		return 1;
434 
435 	/*
436 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
437 	 */
438 	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
439 
440 	err |= setup_sigframe(frame, regs, set);
441 	if (err == 0)
442 		err = setup_return(regs, ksig, frame->retcode, frame);
443 
444 	return err;
445 }
446 
447 static int
448 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
449 {
450 	struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
451 	int err = 0;
452 
453 	if (!frame)
454 		return 1;
455 
456 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
457 
458 	__put_user_error(0, &frame->sig.uc.uc_flags, err);
459 	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
460 
461 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
462 	err |= setup_sigframe(&frame->sig, regs, set);
463 	if (err == 0)
464 		err = setup_return(regs, ksig, frame->sig.retcode, frame);
465 
466 	if (err == 0) {
467 		/*
468 		 * For realtime signals we must also set the second and third
469 		 * arguments for the signal handler.
470 		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
471 		 */
472 		regs->ARM_r1 = (unsigned long)&frame->info;
473 		regs->ARM_r2 = (unsigned long)&frame->sig.uc;
474 	}
475 
476 	return err;
477 }
478 
479 /*
480  * OK, we're invoking a handler
481  */
482 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
483 {
484 	sigset_t *oldset = sigmask_to_save();
485 	int ret;
486 
487 	/*
488 	 * Set up the stack frame
489 	 */
490 	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
491 		ret = setup_rt_frame(ksig, oldset, regs);
492 	else
493 		ret = setup_frame(ksig, oldset, regs);
494 
495 	/*
496 	 * Check that the resulting registers are actually sane.
497 	 */
498 	ret |= !valid_user_regs(regs);
499 
500 	signal_setup_done(ret, ksig, 0);
501 }
502 
503 /*
504  * Note that 'init' is a special process: it doesn't get signals it doesn't
505  * want to handle. Thus you cannot kill init even with a SIGKILL even by
506  * mistake.
507  *
508  * Note that we go through the signals twice: once to check the signals that
509  * the kernel can handle, and then we build all the user-level signal handling
510  * stack-frames in one go after that.
511  */
512 static int do_signal(struct pt_regs *regs, int syscall)
513 {
514 	unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
515 	struct ksignal ksig;
516 	int restart = 0;
517 
518 	/*
519 	 * If we were from a system call, check for system call restarting...
520 	 */
521 	if (syscall) {
522 		continue_addr = regs->ARM_pc;
523 		restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
524 		retval = regs->ARM_r0;
525 
526 		/*
527 		 * Prepare for system call restart.  We do this here so that a
528 		 * debugger will see the already changed PSW.
529 		 */
530 		switch (retval) {
531 		case -ERESTART_RESTARTBLOCK:
532 			restart -= 2;
533 		case -ERESTARTNOHAND:
534 		case -ERESTARTSYS:
535 		case -ERESTARTNOINTR:
536 			restart++;
537 			regs->ARM_r0 = regs->ARM_ORIG_r0;
538 			regs->ARM_pc = restart_addr;
539 			break;
540 		}
541 	}
542 
543 	/*
544 	 * Get the signal to deliver.  When running under ptrace, at this
545 	 * point the debugger may change all our registers ...
546 	 */
547 	/*
548 	 * Depending on the signal settings we may need to revert the
549 	 * decision to restart the system call.  But skip this if a
550 	 * debugger has chosen to restart at a different PC.
551 	 */
552 	if (get_signal(&ksig)) {
553 		/* handler */
554 		if (unlikely(restart) && regs->ARM_pc == restart_addr) {
555 			if (retval == -ERESTARTNOHAND ||
556 			    retval == -ERESTART_RESTARTBLOCK
557 			    || (retval == -ERESTARTSYS
558 				&& !(ksig.ka.sa.sa_flags & SA_RESTART))) {
559 				regs->ARM_r0 = -EINTR;
560 				regs->ARM_pc = continue_addr;
561 			}
562 		}
563 		handle_signal(&ksig, regs);
564 	} else {
565 		/* no handler */
566 		restore_saved_sigmask();
567 		if (unlikely(restart) && regs->ARM_pc == restart_addr) {
568 			regs->ARM_pc = continue_addr;
569 			return restart;
570 		}
571 	}
572 	return 0;
573 }
574 
575 asmlinkage int
576 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
577 {
578 	do {
579 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
580 			schedule();
581 		} else {
582 			if (unlikely(!user_mode(regs)))
583 				return 0;
584 			local_irq_enable();
585 			if (thread_flags & _TIF_SIGPENDING) {
586 				int restart = do_signal(regs, syscall);
587 				if (unlikely(restart)) {
588 					/*
589 					 * Restart without handlers.
590 					 * Deal with it without leaving
591 					 * the kernel space.
592 					 */
593 					return restart;
594 				}
595 				syscall = 0;
596 			} else {
597 				clear_thread_flag(TIF_NOTIFY_RESUME);
598 				tracehook_notify_resume(regs);
599 			}
600 		}
601 		local_irq_disable();
602 		thread_flags = current_thread_info()->flags;
603 	} while (thread_flags & _TIF_WORK_MASK);
604 	return 0;
605 }
606