1a7ed099fSArnd Bergmann /* 2a7ed099fSArnd Bergmann * linux/arch/arm/mach-spear13xx/hotplug.c 3a7ed099fSArnd Bergmann * 4a7ed099fSArnd Bergmann * Copyright (C) 2012 ST Microelectronics Ltd. 5a7ed099fSArnd Bergmann * Deepak Sikri <deepak.sikri@st.com> 6a7ed099fSArnd Bergmann * 7a7ed099fSArnd Bergmann * based upon linux/arch/arm/mach-realview/hotplug.c 8a7ed099fSArnd Bergmann * 9a7ed099fSArnd Bergmann * This program is free software; you can redistribute it and/or modify 10a7ed099fSArnd Bergmann * it under the terms of the GNU General Public License version 2 as 11a7ed099fSArnd Bergmann * published by the Free Software Foundation. 12a7ed099fSArnd Bergmann */ 13a7ed099fSArnd Bergmann #include <linux/kernel.h> 14a7ed099fSArnd Bergmann #include <linux/errno.h> 15a7ed099fSArnd Bergmann #include <linux/smp.h> 16a7ed099fSArnd Bergmann #include <asm/cp15.h> 17a7ed099fSArnd Bergmann #include <asm/smp_plat.h> 18a7ed099fSArnd Bergmann 196213f70eSRussell King #include "generic.h" 206213f70eSRussell King 21a7ed099fSArnd Bergmann static inline void cpu_enter_lowpower(void) 22a7ed099fSArnd Bergmann { 23a7ed099fSArnd Bergmann unsigned int v; 24a7ed099fSArnd Bergmann 25a7ed099fSArnd Bergmann asm volatile( 26a7ed099fSArnd Bergmann " mcr p15, 0, %1, c7, c5, 0\n" 27a7ed099fSArnd Bergmann " dsb\n" 28a7ed099fSArnd Bergmann /* 29a7ed099fSArnd Bergmann * Turn off coherency 30a7ed099fSArnd Bergmann */ 31a7ed099fSArnd Bergmann " mrc p15, 0, %0, c1, c0, 1\n" 32a7ed099fSArnd Bergmann " bic %0, %0, #0x20\n" 33a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 1\n" 34a7ed099fSArnd Bergmann " mrc p15, 0, %0, c1, c0, 0\n" 35a7ed099fSArnd Bergmann " bic %0, %0, %2\n" 36a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 0\n" 37a7ed099fSArnd Bergmann : "=&r" (v) 38a7ed099fSArnd Bergmann : "r" (0), "Ir" (CR_C) 39a7ed099fSArnd Bergmann : "cc", "memory"); 40a7ed099fSArnd Bergmann } 41a7ed099fSArnd Bergmann 42a7ed099fSArnd Bergmann static inline void cpu_leave_lowpower(void) 43a7ed099fSArnd Bergmann { 44a7ed099fSArnd Bergmann unsigned int v; 45a7ed099fSArnd Bergmann 46a7ed099fSArnd Bergmann asm volatile("mrc p15, 0, %0, c1, c0, 0\n" 47a7ed099fSArnd Bergmann " orr %0, %0, %1\n" 48a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 0\n" 49a7ed099fSArnd Bergmann " mrc p15, 0, %0, c1, c0, 1\n" 50a7ed099fSArnd Bergmann " orr %0, %0, #0x20\n" 51a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 1\n" 52a7ed099fSArnd Bergmann : "=&r" (v) 53a7ed099fSArnd Bergmann : "Ir" (CR_C) 54a7ed099fSArnd Bergmann : "cc"); 55a7ed099fSArnd Bergmann } 56a7ed099fSArnd Bergmann 57a7ed099fSArnd Bergmann static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious) 58a7ed099fSArnd Bergmann { 59a7ed099fSArnd Bergmann for (;;) { 60a7ed099fSArnd Bergmann wfi(); 61a7ed099fSArnd Bergmann 626213f70eSRussell King if (spear_pen_release == cpu) { 63a7ed099fSArnd Bergmann /* 64a7ed099fSArnd Bergmann * OK, proper wakeup, we're done 65a7ed099fSArnd Bergmann */ 66a7ed099fSArnd Bergmann break; 67a7ed099fSArnd Bergmann } 68a7ed099fSArnd Bergmann 69a7ed099fSArnd Bergmann /* 70a7ed099fSArnd Bergmann * Getting here, means that we have come out of WFI without 71a7ed099fSArnd Bergmann * having been woken up - this shouldn't happen 72a7ed099fSArnd Bergmann * 73a7ed099fSArnd Bergmann * Just note it happening - when we're woken, we can report 74a7ed099fSArnd Bergmann * its occurrence. 75a7ed099fSArnd Bergmann */ 76a7ed099fSArnd Bergmann (*spurious)++; 77a7ed099fSArnd Bergmann } 78a7ed099fSArnd Bergmann } 79a7ed099fSArnd Bergmann 80a7ed099fSArnd Bergmann /* 81a7ed099fSArnd Bergmann * platform-specific code to shutdown a CPU 82a7ed099fSArnd Bergmann * 83a7ed099fSArnd Bergmann * Called with IRQs disabled 84a7ed099fSArnd Bergmann */ 85b96fc2f3SStephen Boyd void spear13xx_cpu_die(unsigned int cpu) 86a7ed099fSArnd Bergmann { 87a7ed099fSArnd Bergmann int spurious = 0; 88a7ed099fSArnd Bergmann 89a7ed099fSArnd Bergmann /* 90a7ed099fSArnd Bergmann * we're ready for shutdown now, so do it 91a7ed099fSArnd Bergmann */ 92a7ed099fSArnd Bergmann cpu_enter_lowpower(); 93a7ed099fSArnd Bergmann spear13xx_do_lowpower(cpu, &spurious); 94a7ed099fSArnd Bergmann 95a7ed099fSArnd Bergmann /* 96a7ed099fSArnd Bergmann * bring this CPU back into the world of cache 97a7ed099fSArnd Bergmann * coherency, and then restore interrupts 98a7ed099fSArnd Bergmann */ 99a7ed099fSArnd Bergmann cpu_leave_lowpower(); 100a7ed099fSArnd Bergmann 101a7ed099fSArnd Bergmann if (spurious) 102a7ed099fSArnd Bergmann pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 103a7ed099fSArnd Bergmann } 104