xref: /openbmc/qemu/target/s390x/helper.c (revision d6fd5d83)
1 /*
2  *  S/390 helpers - sysemu only
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "s390x-internal.h"
24 #include "gdbstub/helpers.h"
25 #include "qemu/timer.h"
26 #include "hw/s390x/ioinst.h"
27 #include "target/s390x/kvm/pv.h"
28 #include "sysemu/hw_accel.h"
29 #include "sysemu/runstate.h"
30 
31 void s390x_tod_timer(void *opaque)
32 {
33     cpu_inject_clock_comparator((S390CPU *) opaque);
34 }
35 
36 void s390x_cpu_timer(void *opaque)
37 {
38     cpu_inject_cpu_timer((S390CPU *) opaque);
39 }
40 
41 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
42 {
43     S390CPU *cpu = S390_CPU(cs);
44     CPUS390XState *env = &cpu->env;
45     target_ulong raddr;
46     int prot;
47     uint64_t asc = env->psw.mask & PSW_MASK_ASC;
48     uint64_t tec;
49 
50     /* 31-Bit mode */
51     if (!(env->psw.mask & PSW_MASK_64)) {
52         vaddr &= 0x7fffffff;
53     }
54 
55     /* We want to read the code (e.g., see what we are single-stepping).*/
56     if (asc != PSW_ASC_HOME) {
57         asc = PSW_ASC_PRIMARY;
58     }
59 
60     /*
61      * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead
62      * of MMU_INST_FETCH.
63      */
64     if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) {
65         return -1;
66     }
67     return raddr;
68 }
69 
70 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
71 {
72     hwaddr phys_addr;
73     target_ulong page;
74 
75     page = vaddr & TARGET_PAGE_MASK;
76     phys_addr = cpu_get_phys_page_debug(cs, page);
77     phys_addr += (vaddr & ~TARGET_PAGE_MASK);
78 
79     return phys_addr;
80 }
81 
82 static inline bool is_special_wait_psw(uint64_t psw_addr)
83 {
84     /* signal quiesce */
85     return (psw_addr & 0xfffUL) == 0xfffUL;
86 }
87 
88 void s390_handle_wait(S390CPU *cpu)
89 {
90     CPUState *cs = CPU(cpu);
91 
92     if (s390_cpu_halt(cpu) == 0) {
93         if (is_special_wait_psw(cpu->env.psw.addr)) {
94             qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
95         } else {
96             cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT;
97             qemu_system_guest_panicked(cpu_get_crash_info(cs));
98         }
99     }
100 }
101 
102 LowCore *cpu_map_lowcore(CPUS390XState *env)
103 {
104     LowCore *lowcore;
105     hwaddr len = sizeof(LowCore);
106 
107     lowcore = cpu_physical_memory_map(env->psa, &len, true);
108 
109     if (len < sizeof(LowCore)) {
110         cpu_abort(env_cpu(env), "Could not map lowcore\n");
111     }
112 
113     return lowcore;
114 }
115 
116 void cpu_unmap_lowcore(LowCore *lowcore)
117 {
118     cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
119 }
120 
121 void do_restart_interrupt(CPUS390XState *env)
122 {
123     uint64_t mask, addr;
124     LowCore *lowcore;
125 
126     lowcore = cpu_map_lowcore(env);
127 
128     lowcore->restart_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
129     lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
130     mask = be64_to_cpu(lowcore->restart_new_psw.mask);
131     addr = be64_to_cpu(lowcore->restart_new_psw.addr);
132 
133     cpu_unmap_lowcore(lowcore);
134     env->pending_int &= ~INTERRUPT_RESTART;
135 
136     s390_cpu_set_psw(env, mask, addr);
137 }
138 
139 void s390_cpu_recompute_watchpoints(CPUState *cs)
140 {
141     const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
142     CPUS390XState *env = cpu_env(cs);
143 
144     /* We are called when the watchpoints have changed. First
145        remove them all.  */
146     cpu_watchpoint_remove_all(cs, BP_CPU);
147 
148     /* Return if PER is not enabled */
149     if (!(env->psw.mask & PSW_MASK_PER)) {
150         return;
151     }
152 
153     /* Return if storage-alteration event is not enabled.  */
154     if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
155         return;
156     }
157 
158     if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
159         /* We can't create a watchoint spanning the whole memory range, so
160            split it in two parts.   */
161         cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
162         cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
163     } else if (env->cregs[10] > env->cregs[11]) {
164         /* The address range loops, create two watchpoints.  */
165         cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
166                               wp_flags, NULL);
167         cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
168 
169     } else {
170         /* Default case, create a single watchpoint.  */
171         cpu_watchpoint_insert(cs, env->cregs[10],
172                               env->cregs[11] - env->cregs[10] + 1,
173                               wp_flags, NULL);
174     }
175 }
176 
177 typedef struct SigpSaveArea {
178     uint64_t    fprs[16];                       /* 0x0000 */
179     uint64_t    grs[16];                        /* 0x0080 */
180     PSW         psw;                            /* 0x0100 */
181     uint8_t     pad_0x0110[0x0118 - 0x0110];    /* 0x0110 */
182     uint32_t    prefix;                         /* 0x0118 */
183     uint32_t    fpc;                            /* 0x011c */
184     uint8_t     pad_0x0120[0x0124 - 0x0120];    /* 0x0120 */
185     uint32_t    todpr;                          /* 0x0124 */
186     uint64_t    cputm;                          /* 0x0128 */
187     uint64_t    ckc;                            /* 0x0130 */
188     uint8_t     pad_0x0138[0x0140 - 0x0138];    /* 0x0138 */
189     uint32_t    ars[16];                        /* 0x0140 */
190     uint64_t    crs[16];                        /* 0x0384 */
191 } SigpSaveArea;
192 QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);
193 
194 int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
195 {
196     static const uint8_t ar_id = 1;
197     SigpSaveArea *sa;
198     hwaddr len = sizeof(*sa);
199     int i;
200 
201     /* For PVMs storing will occur when this cpu enters SIE again */
202     if (s390_is_pv()) {
203         return 0;
204     }
205 
206     sa = cpu_physical_memory_map(addr, &len, true);
207     if (!sa) {
208         return -EFAULT;
209     }
210     if (len != sizeof(*sa)) {
211         cpu_physical_memory_unmap(sa, len, 1, 0);
212         return -EFAULT;
213     }
214 
215     if (store_arch) {
216         cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
217     }
218     for (i = 0; i < 16; ++i) {
219         sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i));
220     }
221     for (i = 0; i < 16; ++i) {
222         sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
223     }
224     sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
225     sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env));
226     sa->prefix = cpu_to_be32(cpu->env.psa);
227     sa->fpc = cpu_to_be32(cpu->env.fpc);
228     sa->todpr = cpu_to_be32(cpu->env.todpr);
229     sa->cputm = cpu_to_be64(cpu->env.cputm);
230     sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
231     for (i = 0; i < 16; ++i) {
232         sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
233     }
234     for (i = 0; i < 16; ++i) {
235         sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
236     }
237 
238     cpu_physical_memory_unmap(sa, len, 1, len);
239 
240     return 0;
241 }
242 
243 typedef struct SigpAdtlSaveArea {
244     uint64_t    vregs[32][2];                     /* 0x0000 */
245     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
246     uint64_t    gscb[4];                          /* 0x0400 */
247     uint8_t     pad_0x0420[0x1000 - 0x0420];      /* 0x0420 */
248 } SigpAdtlSaveArea;
249 QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);
250 
251 #define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
252 int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
253 {
254     SigpAdtlSaveArea *sa;
255     hwaddr save = len;
256     int i;
257 
258     sa = cpu_physical_memory_map(addr, &save, true);
259     if (!sa) {
260         return -EFAULT;
261     }
262     if (save != len) {
263         cpu_physical_memory_unmap(sa, len, 1, 0);
264         return -EFAULT;
265     }
266 
267     if (s390_has_feat(S390_FEAT_VECTOR)) {
268         for (i = 0; i < 32; i++) {
269             sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]);
270             sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]);
271         }
272     }
273     if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
274         for (i = 0; i < 4; i++) {
275             sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
276         }
277     }
278 
279     cpu_physical_memory_unmap(sa, len, 1, len);
280     return 0;
281 }
282