process.c (9c75a31c3525a127f70b919856e32be3d8b03755) process.c (ce48b2100785e5ca629fb3aa8e3b50aca808f692)
1/*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version

--- 39 unchanged lines hidden (view full) ---

48#include <asm/firmware.h>
49#endif
50
51extern unsigned long _get_SP(void);
52
53#ifndef CONFIG_SMP
54struct task_struct *last_task_used_math = NULL;
55struct task_struct *last_task_used_altivec = NULL;
1/*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version

--- 39 unchanged lines hidden (view full) ---

48#include <asm/firmware.h>
49#endif
50
51extern unsigned long _get_SP(void);
52
53#ifndef CONFIG_SMP
54struct task_struct *last_task_used_math = NULL;
55struct task_struct *last_task_used_altivec = NULL;
56struct task_struct *last_task_used_vsx = NULL;
56struct task_struct *last_task_used_spe = NULL;
57#endif
58
59/*
60 * Make sure the floating-point register state in the
61 * the thread_struct is up to date for task tsk.
62 */
63void flush_fp_to_thread(struct task_struct *tsk)

--- 37 unchanged lines hidden (view full) ---

101#else
102 giveup_fpu(last_task_used_math);
103#endif /* CONFIG_SMP */
104}
105EXPORT_SYMBOL(enable_kernel_fp);
106
107int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
108{
57struct task_struct *last_task_used_spe = NULL;
58#endif
59
60/*
61 * Make sure the floating-point register state in the
62 * the thread_struct is up to date for task tsk.
63 */
64void flush_fp_to_thread(struct task_struct *tsk)

--- 37 unchanged lines hidden (view full) ---

102#else
103 giveup_fpu(last_task_used_math);
104#endif /* CONFIG_SMP */
105}
106EXPORT_SYMBOL(enable_kernel_fp);
107
108int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
109{
110#ifdef CONFIG_VSX
111 int i;
112 elf_fpreg_t *reg;
113#endif
114
109 if (!tsk->thread.regs)
110 return 0;
111 flush_fp_to_thread(current);
112
115 if (!tsk->thread.regs)
116 return 0;
117 flush_fp_to_thread(current);
118
119#ifdef CONFIG_VSX
120 reg = (elf_fpreg_t *)fpregs;
121 for (i = 0; i < ELF_NFPREG - 1; i++, reg++)
122 *reg = tsk->thread.TS_FPR(i);
123 memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t));
124#else
113 memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
125 memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
126#endif
114
115 return 1;
116}
117
118#ifdef CONFIG_ALTIVEC
119void enable_kernel_altivec(void)
120{
121 WARN_ON(preemptible());

--- 22 unchanged lines hidden (view full) ---

144 BUG_ON(tsk != current);
145#endif
146 giveup_altivec(tsk);
147 }
148 preempt_enable();
149 }
150}
151
127
128 return 1;
129}
130
131#ifdef CONFIG_ALTIVEC
132void enable_kernel_altivec(void)
133{
134 WARN_ON(preemptible());

--- 22 unchanged lines hidden (view full) ---

157 BUG_ON(tsk != current);
158#endif
159 giveup_altivec(tsk);
160 }
161 preempt_enable();
162 }
163}
164
152int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
165int dump_task_altivec(struct task_struct *tsk, elf_vrreg_t *vrregs)
153{
154 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
155 * separately, see below */
156 const int nregs = ELF_NVRREG - 2;
157 elf_vrreg_t *reg;
158 u32 *dest;
159
160 if (tsk == current)

--- 13 unchanged lines hidden (view full) ---

174 memset(reg, 0, sizeof(*reg));
175 dest = (u32 *)reg;
176 *dest = tsk->thread.vrsave;
177
178 return 1;
179}
180#endif /* CONFIG_ALTIVEC */
181
166{
167 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
168 * separately, see below */
169 const int nregs = ELF_NVRREG - 2;
170 elf_vrreg_t *reg;
171 u32 *dest;
172
173 if (tsk == current)

--- 13 unchanged lines hidden (view full) ---

187 memset(reg, 0, sizeof(*reg));
188 dest = (u32 *)reg;
189 *dest = tsk->thread.vrsave;
190
191 return 1;
192}
193#endif /* CONFIG_ALTIVEC */
194
195#ifdef CONFIG_VSX
196#if 0
197/* not currently used, but some crazy RAID module might want to later */
198void enable_kernel_vsx(void)
199{
200 WARN_ON(preemptible());
201
202#ifdef CONFIG_SMP
203 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
204 giveup_vsx(current);
205 else
206 giveup_vsx(NULL); /* just enable vsx for kernel - force */
207#else
208 giveup_vsx(last_task_used_vsx);
209#endif /* CONFIG_SMP */
210}
211EXPORT_SYMBOL(enable_kernel_vsx);
212#endif
213
214void flush_vsx_to_thread(struct task_struct *tsk)
215{
216 if (tsk->thread.regs) {
217 preempt_disable();
218 if (tsk->thread.regs->msr & MSR_VSX) {
219#ifdef CONFIG_SMP
220 BUG_ON(tsk != current);
221#endif
222 giveup_vsx(tsk);
223 }
224 preempt_enable();
225 }
226}
227
228/*
229 * This dumps the lower half 64bits of the first 32 VSX registers.
230 * This needs to be called with dump_task_fp and dump_task_altivec to
231 * get all the VSX state.
232 */
233int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs)
234{
235 elf_vrreg_t *reg;
236 double buf[32];
237 int i;
238
239 if (tsk == current)
240 flush_vsx_to_thread(tsk);
241
242 reg = (elf_vrreg_t *)vrregs;
243
244 for (i = 0; i < 32 ; i++)
245 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
246 memcpy(reg, buf, sizeof(buf));
247
248 return 1;
249}
250#endif /* CONFIG_VSX */
251
252int dump_task_vector(struct task_struct *tsk, elf_vrregset_t *vrregs)
253{
254 int rc = 0;
255 elf_vrreg_t *regs = (elf_vrreg_t *)vrregs;
256#ifdef CONFIG_ALTIVEC
257 rc = dump_task_altivec(tsk, regs);
258 if (rc)
259 return rc;
260 regs += ELF_NVRREG;
261#endif
262
263#ifdef CONFIG_VSX
264 rc = dump_task_vsx(tsk, regs);
265#endif
266 return rc;
267}
268
182#ifdef CONFIG_SPE
183
184void enable_kernel_spe(void)
185{
186 WARN_ON(preemptible());
187
188#ifdef CONFIG_SMP
189 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))

--- 38 unchanged lines hidden (view full) ---

228{
229 preempt_disable();
230 if (last_task_used_math == current)
231 last_task_used_math = NULL;
232#ifdef CONFIG_ALTIVEC
233 if (last_task_used_altivec == current)
234 last_task_used_altivec = NULL;
235#endif /* CONFIG_ALTIVEC */
269#ifdef CONFIG_SPE
270
271void enable_kernel_spe(void)
272{
273 WARN_ON(preemptible());
274
275#ifdef CONFIG_SMP
276 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))

--- 38 unchanged lines hidden (view full) ---

315{
316 preempt_disable();
317 if (last_task_used_math == current)
318 last_task_used_math = NULL;
319#ifdef CONFIG_ALTIVEC
320 if (last_task_used_altivec == current)
321 last_task_used_altivec = NULL;
322#endif /* CONFIG_ALTIVEC */
323#ifdef CONFIG_VSX
324 if (last_task_used_vsx == current)
325 last_task_used_vsx = NULL;
326#endif /* CONFIG_VSX */
236#ifdef CONFIG_SPE
237 if (last_task_used_spe == current)
238 last_task_used_spe = NULL;
239#endif
240 preempt_enable();
241}
242#endif /* CONFIG_SMP */
243

--- 48 unchanged lines hidden (view full) ---

292 *
293 * On SMP we always save/restore altivec regs just to avoid the
294 * complexity of changing processors.
295 * -- Cort
296 */
297 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
298 giveup_altivec(prev);
299#endif /* CONFIG_ALTIVEC */
327#ifdef CONFIG_SPE
328 if (last_task_used_spe == current)
329 last_task_used_spe = NULL;
330#endif
331 preempt_enable();
332}
333#endif /* CONFIG_SMP */
334

--- 48 unchanged lines hidden (view full) ---

383 *
384 * On SMP we always save/restore altivec regs just to avoid the
385 * complexity of changing processors.
386 * -- Cort
387 */
388 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
389 giveup_altivec(prev);
390#endif /* CONFIG_ALTIVEC */
391#ifdef CONFIG_VSX
392 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
393 giveup_vsx(prev);
394#endif /* CONFIG_VSX */
300#ifdef CONFIG_SPE
301 /*
302 * If the previous thread used spe in the last quantum
303 * (thus changing spe regs) then save them.
304 *
305 * On SMP we always save/restore spe regs just to avoid the
306 * complexity of changing processors.
307 */

--- 4 unchanged lines hidden (view full) ---

312#else /* CONFIG_SMP */
313#ifdef CONFIG_ALTIVEC
314 /* Avoid the trap. On smp this this never happens since
315 * we don't set last_task_used_altivec -- Cort
316 */
317 if (new->thread.regs && last_task_used_altivec == new)
318 new->thread.regs->msr |= MSR_VEC;
319#endif /* CONFIG_ALTIVEC */
395#ifdef CONFIG_SPE
396 /*
397 * If the previous thread used spe in the last quantum
398 * (thus changing spe regs) then save them.
399 *
400 * On SMP we always save/restore spe regs just to avoid the
401 * complexity of changing processors.
402 */

--- 4 unchanged lines hidden (view full) ---

407#else /* CONFIG_SMP */
408#ifdef CONFIG_ALTIVEC
409 /* Avoid the trap. On smp this this never happens since
410 * we don't set last_task_used_altivec -- Cort
411 */
412 if (new->thread.regs && last_task_used_altivec == new)
413 new->thread.regs->msr |= MSR_VEC;
414#endif /* CONFIG_ALTIVEC */
415#ifdef CONFIG_VSX
416 if (new->thread.regs && last_task_used_vsx == new)
417 new->thread.regs->msr |= MSR_VSX;
418#endif /* CONFIG_VSX */
320#ifdef CONFIG_SPE
321 /* Avoid the trap. On smp this this never happens since
322 * we don't set last_task_used_spe
323 */
324 if (new->thread.regs && last_task_used_spe == new)
325 new->thread.regs->msr |= MSR_SPE;
326#endif /* CONFIG_SPE */
327

--- 84 unchanged lines hidden (view full) ---

412
413static struct regbit {
414 unsigned long bit;
415 const char *name;
416} msr_bits[] = {
417 {MSR_EE, "EE"},
418 {MSR_PR, "PR"},
419 {MSR_FP, "FP"},
419#ifdef CONFIG_SPE
420 /* Avoid the trap. On smp this this never happens since
421 * we don't set last_task_used_spe
422 */
423 if (new->thread.regs && last_task_used_spe == new)
424 new->thread.regs->msr |= MSR_SPE;
425#endif /* CONFIG_SPE */
426

--- 84 unchanged lines hidden (view full) ---

511
512static struct regbit {
513 unsigned long bit;
514 const char *name;
515} msr_bits[] = {
516 {MSR_EE, "EE"},
517 {MSR_PR, "PR"},
518 {MSR_FP, "FP"},
519 {MSR_VEC, "VEC"},
520 {MSR_VSX, "VSX"},
420 {MSR_ME, "ME"},
421 {MSR_IR, "IR"},
422 {MSR_DR, "DR"},
423 {0, NULL}
424};
425
426static void printbits(unsigned long val, struct regbit *bits)
427{

--- 101 unchanged lines hidden (view full) ---

529/*
530 * This gets called before we allocate a new thread and copy
531 * the current task into it.
532 */
533void prepare_to_copy(struct task_struct *tsk)
534{
535 flush_fp_to_thread(current);
536 flush_altivec_to_thread(current);
521 {MSR_ME, "ME"},
522 {MSR_IR, "IR"},
523 {MSR_DR, "DR"},
524 {0, NULL}
525};
526
527static void printbits(unsigned long val, struct regbit *bits)
528{

--- 101 unchanged lines hidden (view full) ---

630/*
631 * This gets called before we allocate a new thread and copy
632 * the current task into it.
633 */
634void prepare_to_copy(struct task_struct *tsk)
635{
636 flush_fp_to_thread(current);
637 flush_altivec_to_thread(current);
638 flush_vsx_to_thread(current);
537 flush_spe_to_thread(current);
538}
539
540/*
541 * Copy a thread..
542 */
543int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
544 unsigned long unused, struct task_struct *p,

--- 139 unchanged lines hidden (view full) ---

684 } else {
685 regs->nip = start;
686 regs->gpr[2] = 0;
687 regs->msr = MSR_USER32;
688 }
689#endif
690
691 discard_lazy_cpu_state();
639 flush_spe_to_thread(current);
640}
641
642/*
643 * Copy a thread..
644 */
645int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
646 unsigned long unused, struct task_struct *p,

--- 139 unchanged lines hidden (view full) ---

786 } else {
787 regs->nip = start;
788 regs->gpr[2] = 0;
789 regs->msr = MSR_USER32;
790 }
791#endif
792
793 discard_lazy_cpu_state();
794#ifdef CONFIG_VSX
795 current->thread.used_vsr = 0;
796#endif
692 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
693 current->thread.fpscr.val = 0;
694#ifdef CONFIG_ALTIVEC
695 memset(current->thread.vr, 0, sizeof(current->thread.vr));
696 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
697 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
698 current->thread.vrsave = 0;
699 current->thread.used_vr = 0;

--- 369 unchanged lines hidden ---
797 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
798 current->thread.fpscr.val = 0;
799#ifdef CONFIG_ALTIVEC
800 memset(current->thread.vr, 0, sizeof(current->thread.vr));
801 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
802 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
803 current->thread.vrsave = 0;
804 current->thread.used_vr = 0;

--- 369 unchanged lines hidden ---