smp.c (8239c25f47d2b318156993b15f33900a86ea5e17) smp.c (e80e7813cd772cf30597024b371e73df9736de8d)
1/*
2 * SMP related functions
3 *
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *

--- 71 unchanged lines hidden (view full) ---

80
81enum {
82 CPU_STATE_STANDBY,
83 CPU_STATE_CONFIGURED,
84};
85
86struct pcpu {
87 struct cpu cpu;
1/*
2 * SMP related functions
3 *
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *

--- 71 unchanged lines hidden (view full) ---

80
81enum {
82 CPU_STATE_STANDBY,
83 CPU_STATE_CONFIGURED,
84};
85
86struct pcpu {
87 struct cpu cpu;
88 struct task_struct *idle; /* idle process for the cpu */
89 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
90 unsigned long async_stack; /* async stack for the cpu */
91 unsigned long panic_stack; /* panic stack for the cpu */
92 unsigned long ec_mask; /* bit mask for ec_xxx functions */
93 int state; /* physical cpu state */
94 u32 status; /* last status received via sigp */
95 u16 address; /* physical cpu address */
96};

--- 619 unchanged lines hidden (view full) ---

716 ipi_call_lock();
717 set_cpu_online(smp_processor_id(), true);
718 ipi_call_unlock();
719 local_irq_enable();
720 /* cpu_idle will call schedule for us */
721 cpu_idle();
722}
723
88 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
89 unsigned long async_stack; /* async stack for the cpu */
90 unsigned long panic_stack; /* panic stack for the cpu */
91 unsigned long ec_mask; /* bit mask for ec_xxx functions */
92 int state; /* physical cpu state */
93 u32 status; /* last status received via sigp */
94 u16 address; /* physical cpu address */
95};

--- 619 unchanged lines hidden (view full) ---

715 ipi_call_lock();
716 set_cpu_online(smp_processor_id(), true);
717 ipi_call_unlock();
718 local_irq_enable();
719 /* cpu_idle will call schedule for us */
720 cpu_idle();
721}
722
724struct create_idle {
725 struct work_struct work;
726 struct task_struct *idle;
727 struct completion done;
728 int cpu;
729};
730
731static void __cpuinit smp_fork_idle(struct work_struct *work)
732{
733 struct create_idle *c_idle;
734
735 c_idle = container_of(work, struct create_idle, work);
736 c_idle->idle = fork_idle(c_idle->cpu);
737 complete(&c_idle->done);
738}
739
740/* Upping and downing of CPUs */
741int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
742{
723/* Upping and downing of CPUs */
724int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
725{
743 struct create_idle c_idle;
744 struct pcpu *pcpu;
745 int rc;
746
747 pcpu = pcpu_devices + cpu;
748 if (pcpu->state != CPU_STATE_CONFIGURED)
749 return -EIO;
750 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
751 sigp_order_code_accepted)
752 return -EIO;
726 struct pcpu *pcpu;
727 int rc;
728
729 pcpu = pcpu_devices + cpu;
730 if (pcpu->state != CPU_STATE_CONFIGURED)
731 return -EIO;
732 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
733 sigp_order_code_accepted)
734 return -EIO;
753 if (!pcpu->idle) {
754 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
755 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
756 c_idle.cpu = cpu;
757 schedule_work(&c_idle.work);
758 wait_for_completion(&c_idle.done);
759 if (IS_ERR(c_idle.idle))
760 return PTR_ERR(c_idle.idle);
761 pcpu->idle = c_idle.idle;
762 }
763 init_idle(pcpu->idle, cpu);
735
764 rc = pcpu_alloc_lowcore(pcpu, cpu);
765 if (rc)
766 return rc;
767 pcpu_prepare_secondary(pcpu, cpu);
736 rc = pcpu_alloc_lowcore(pcpu, cpu);
737 if (rc)
738 return rc;
739 pcpu_prepare_secondary(pcpu, cpu);
768 pcpu_attach_task(pcpu, pcpu->idle);
740 pcpu_attach_task(pcpu, tidle);
769 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
770 while (!cpu_online(cpu))
771 cpu_relax();
772 return 0;
773}
774
775static int __init setup_possible_cpus(char *s)
776{

--- 70 unchanged lines hidden (view full) ---

847 smp_detect_cpus();
848}
849
850void __init smp_prepare_boot_cpu(void)
851{
852 struct pcpu *pcpu = pcpu_devices;
853
854 boot_cpu_address = stap();
741 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
742 while (!cpu_online(cpu))
743 cpu_relax();
744 return 0;
745}
746
747static int __init setup_possible_cpus(char *s)
748{

--- 70 unchanged lines hidden (view full) ---

819 smp_detect_cpus();
820}
821
822void __init smp_prepare_boot_cpu(void)
823{
824 struct pcpu *pcpu = pcpu_devices;
825
826 boot_cpu_address = stap();
855 pcpu->idle = current;
856 pcpu->state = CPU_STATE_CONFIGURED;
857 pcpu->address = boot_cpu_address;
858 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
859 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
860 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
861 S390_lowcore.percpu_offset = __per_cpu_offset[0];
862 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
863 set_cpu_present(0, true);

--- 283 unchanged lines hidden ---
827 pcpu->state = CPU_STATE_CONFIGURED;
828 pcpu->address = boot_cpu_address;
829 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
830 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
831 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
832 S390_lowcore.percpu_offset = __per_cpu_offset[0];
833 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
834 set_cpu_present(0, true);

--- 283 unchanged lines hidden ---