1From cb48d5d1592e63ebd0d4a3e300ef98e38e6306d7 Mon Sep 17 00:00:00 2001
2From: Richard Henderson <richard.henderson@linaro.org>
3Date: Wed, 28 Feb 2024 10:25:17 -1000
4Subject: [PATCH 4/5] linux-user: Rewrite target_shmat
5
6Handle combined host and guest alignment requirements.
7Handle host and guest page size differences.
8Handle SHM_EXEC.
9
10Upstream-Status: Submitted [https://www.mail-archive.com/qemu-devel@nongnu.org/msg1026793.html]
11
12Resolves: https://gitlab.com/qemu-project/qemu/-/issues/115
13Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
15---
16 linux-user/mmap.c | 166 +++++++++++++++++++++++++++++++++++++---------
17 1 file changed, 133 insertions(+), 33 deletions(-)
18
19diff --git a/linux-user/mmap.c b/linux-user/mmap.c
20index 18fb3aaf7..6a2f649bb 100644
21--- a/linux-user/mmap.c
22+++ b/linux-user/mmap.c
23@@ -1062,69 +1062,161 @@ static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
24 }
25 #endif
26
27+#if defined(__arm__) || defined(__mips__) || defined(__sparc__)
28+#define HOST_FORCE_SHMLBA 1
29+#else
30+#define HOST_FORCE_SHMLBA 0
31+#endif
32+
33 abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
34                        abi_ulong shmaddr, int shmflg)
35 {
36     CPUState *cpu = env_cpu(cpu_env);
37-    abi_ulong raddr;
38     struct shmid_ds shm_info;
39     int ret;
40-    abi_ulong shmlba;
41+    int h_pagesize;
42+    int t_shmlba, h_shmlba, m_shmlba;
43+    size_t t_len, h_len, m_len;
44
45     /* shmat pointers are always untagged */
46
47-    /* find out the length of the shared memory segment */
48+    /*
49+     * Because we can't use host shmat() unless the address is sufficiently
50+     * aligned for the host, we'll need to check both.
51+     * TODO: Could be fixed with softmmu.
52+     */
53+    t_shmlba = target_shmlba(cpu_env);
54+    h_pagesize = qemu_real_host_page_size();
55+    h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
56+    m_shmlba = MAX(t_shmlba, h_shmlba);
57+
58+    if (shmaddr) {
59+        if (shmaddr & (m_shmlba - 1)) {
60+            if (shmflg & SHM_RND) {
61+                /*
62+                 * The guest is allowing the kernel to round the address.
63+                 * Assume that the guest is ok with us rounding to the
64+                 * host required alignment too.  Anyway if we don't, we'll
65+                 * get an error from the kernel.
66+                 */
67+                shmaddr &= ~(m_shmlba - 1);
68+                if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
69+                    return -TARGET_EINVAL;
70+                }
71+            } else {
72+                int require = TARGET_PAGE_SIZE;
73+#ifdef TARGET_FORCE_SHMLBA
74+                require = t_shmlba;
75+#endif
76+                /*
77+                 * Include host required alignment, as otherwise we cannot
78+                 * use host shmat at all.
79+                 */
80+                require = MAX(require, h_shmlba);
81+                if (shmaddr & (require - 1)) {
82+                    return -TARGET_EINVAL;
83+                }
84+            }
85+        }
86+    } else {
87+        if (shmflg & SHM_REMAP) {
88+            return -TARGET_EINVAL;
89+        }
90+    }
91+    /* All rounding now manually concluded. */
92+    shmflg &= ~SHM_RND;
93+
94+    /* Find out the length of the shared memory segment. */
95     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
96     if (is_error(ret)) {
97         /* can't get length, bail out */
98         return ret;
99     }
100+    t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
101+    h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
102+    m_len = MAX(t_len, h_len);
103
104-    shmlba = target_shmlba(cpu_env);
105-
106-    if (shmaddr & (shmlba - 1)) {
107-        if (shmflg & SHM_RND) {
108-            shmaddr &= ~(shmlba - 1);
109-        } else {
110-            return -TARGET_EINVAL;
111-        }
112-    }
113-    if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
114+    if (!guest_range_valid_untagged(shmaddr, m_len)) {
115         return -TARGET_EINVAL;
116     }
117
118     WITH_MMAP_LOCK_GUARD() {
119-        void *host_raddr;
120+        bool mapped = false;
121+        void *want, *test;
122         abi_ulong last;
123
124-        if (shmaddr) {
125-            host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
126+        if (!shmaddr) {
127+            shmaddr = mmap_find_vma(0, m_len, m_shmlba);
128+            if (shmaddr == -1) {
129+                return -TARGET_ENOMEM;
130+            }
131+            mapped = !reserved_va;
132+        } else if (shmflg & SHM_REMAP) {
133+            /*
134+             * If host page size > target page size, the host shmat may map
135+             * more memory than the guest expects.  Reject a mapping that
136+             * would replace memory in the unexpected gap.
137+             * TODO: Could be fixed with softmmu.
138+             */
139+            if (t_len < h_len &&
140+                !page_check_range_empty(shmaddr + t_len,
141+                                        shmaddr + h_len - 1)) {
142+                return -TARGET_EINVAL;
143+            }
144         } else {
145-            abi_ulong mmap_start;
146+            if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
147+                return -TARGET_EINVAL;
148+            }
149+        }
150
151-            /* In order to use the host shmat, we need to honor host SHMLBA.  */
152-            mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
153-                                       MAX(SHMLBA, shmlba));
154+        /* All placement is now complete. */
155+        want = (void *)g2h_untagged(shmaddr);
156
157-            if (mmap_start == -1) {
158-                return -TARGET_ENOMEM;
159+        /*
160+         * Map anonymous pages across the entire range, then remap with
161+         * the shared memory.  This is required for a number of corner
162+         * cases for which host and guest page sizes differ.
163+         */
164+        if (h_len != t_len) {
165+            int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
166+            int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
167+                       | (reserved_va || (shmflg & SHM_REMAP)
168+                          ? MAP_FIXED : MAP_FIXED_NOREPLACE);
169+
170+            test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
171+            if (unlikely(test != want)) {
172+                /* shmat returns EINVAL not EEXIST like mmap. */
173+                ret = (test == MAP_FAILED && errno != EEXIST
174+                       ? get_errno(-1) : -TARGET_EINVAL);
175+                if (mapped) {
176+                    do_munmap(want, m_len);
177+                }
178+                return ret;
179             }
180-            host_raddr = shmat(shmid, g2h_untagged(mmap_start),
181-                               shmflg | SHM_REMAP);
182+            mapped = true;
183         }
184
185-        if (host_raddr == (void *)-1) {
186-            return get_errno(-1);
187+        if (reserved_va || mapped) {
188+            shmflg |= SHM_REMAP;
189+        }
190+        test = shmat(shmid, want, shmflg);
191+        if (test == MAP_FAILED) {
192+            ret = get_errno(-1);
193+            if (mapped) {
194+                do_munmap(want, m_len);
195+            }
196+            return ret;
197         }
198-        raddr = h2g(host_raddr);
199-        last = raddr + shm_info.shm_segsz - 1;
200+        assert(test == want);
201
202-        page_set_flags(raddr, last,
203+        last = shmaddr + m_len - 1;
204+        page_set_flags(shmaddr, last,
205                        PAGE_VALID | PAGE_RESET | PAGE_READ |
206-                       (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
207+                       (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
208+                       (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
209
210-        shm_region_rm_complete(raddr, last);
211-        shm_region_add(raddr, last);
212+        shm_region_rm_complete(shmaddr, last);
213+        shm_region_add(shmaddr, last);
214     }
215
216     /*
217@@ -1138,7 +1230,15 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
218         tb_flush(cpu);
219     }
220
221-    return raddr;
222+    if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
223+        FILE *f = qemu_log_trylock();
224+        if (f) {
225+            fprintf(f, "page layout changed following shmat\n");
226+            page_dump(f);
227+            qemu_log_unlock(f);
228+        }
229+    }
230+    return shmaddr;
231 }
232
233 abi_long target_shmdt(abi_ulong shmaddr)
234--
2352.34.1
236
237