xref: /openbmc/qemu/hw/display/apple-gfx.m (revision 2352159c97a1fd245e998daafa08fcaaf57d4fa8)
1/*
2 * QEMU Apple ParavirtualizedGraphics.framework device
3 *
4 * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 *
8 * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
9 * which implements 3d graphics passthrough to the host as well as a
10 * proprietary guest communication channel to drive it. This device model
11 * implements support to drive that library from within QEMU.
12 */
13
14#include "qemu/osdep.h"
15#include "qemu/lockable.h"
16#include "qemu/cutils.h"
17#include "qemu/log.h"
18#include "qapi/visitor.h"
19#include "qapi/error.h"
20#include "block/aio-wait.h"
21#include "exec/address-spaces.h"
22#include "system/dma.h"
23#include "migration/blocker.h"
24#include "ui/console.h"
25#include "apple-gfx.h"
26#include "trace.h"
27
28#include <mach/mach.h>
29#include <mach/mach_vm.h>
30#include <dispatch/dispatch.h>
31
32#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
33
34static const PGDisplayCoord_t apple_gfx_modes[] = {
35    { .x = 1440, .y = 1080 },
36    { .x = 1280, .y = 1024 },
37};
38
39static Error *apple_gfx_mig_blocker;
40static uint32_t next_pgdisplay_serial_num = 1;
41
42static dispatch_queue_t get_background_queue(void)
43{
44    return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
45}
46
47/* ------ PGTask and task operations: new/destroy/map/unmap ------ */
48
49/*
50 * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h>
51 * which is opaque from the framework's point of view. It is used in callbacks
52 * in the form of its typedef PGTask_t, which also already exists in the
53 * framework headers.
54 *
55 * A "task" in PVG terminology represents a host-virtual contiguous address
56 * range which is reserved in a large chunk on task creation. The mapMemory
57 * callback then requests ranges of guest system memory (identified by their
58 * GPA) to be mapped into subranges of this reserved address space.
59 * This type of operation isn't well-supported by QEMU's memory subsystem,
60 * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call,
61 * which allows us to refer to the same backing memory via multiple virtual
62 * address ranges. The Mach VM APIs are therefore used throughout for managing
63 * task memory.
64 */
65struct PGTask_s {
66    QTAILQ_ENTRY(PGTask_s) node;
67    AppleGFXState *s;
68    mach_vm_address_t address;
69    uint64_t len;
70    /*
71     * All unique MemoryRegions for which a mapping has been created in in this
72     * task, and on which we have thus called memory_region_ref(). There are
73     * usually very few regions of system RAM in total, so we expect this array
74     * to be very short. Therefore, no need for sorting or fancy search
75     * algorithms, linear search will do.
76     * Protected by AppleGFXState's task_mutex.
77     */
78    GPtrArray *mapped_regions;
79};
80
81static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len)
82{
83    mach_vm_address_t task_mem;
84    PGTask_t *task;
85    kern_return_t r;
86
87    r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE);
88    if (r != KERN_SUCCESS) {
89        return NULL;
90    }
91
92    task = g_new0(PGTask_t, 1);
93    task->s = s;
94    task->address = task_mem;
95    task->len = len;
96    task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */);
97
98    QEMU_LOCK_GUARD(&s->task_mutex);
99    QTAILQ_INSERT_TAIL(&s->tasks, task, node);
100
101    return task;
102}
103
104static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task)
105{
106    GPtrArray *regions = task->mapped_regions;
107    MemoryRegion *region;
108    size_t i;
109
110    for (i = 0; i < regions->len; ++i) {
111        region = g_ptr_array_index(regions, i);
112        memory_region_unref(region);
113    }
114    g_ptr_array_unref(regions);
115
116    mach_vm_deallocate(mach_task_self(), task->address, task->len);
117
118    QEMU_LOCK_GUARD(&s->task_mutex);
119    QTAILQ_REMOVE(&s->tasks, task, node);
120    g_free(task);
121}
122
123void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical,
124                                       uint64_t length, bool read_only,
125                                       MemoryRegion **mapping_in_region)
126{
127    MemoryRegion *ram_region;
128    char *host_ptr;
129    hwaddr ram_region_offset = 0;
130    hwaddr ram_region_length = length;
131
132    ram_region = address_space_translate(&address_space_memory,
133                                         guest_physical,
134                                         &ram_region_offset,
135                                         &ram_region_length, !read_only,
136                                         MEMTXATTRS_UNSPECIFIED);
137
138    if (!ram_region || ram_region_length < length ||
139        !memory_access_is_direct(ram_region, !read_only)) {
140        return NULL;
141    }
142
143    host_ptr = memory_region_get_ram_ptr(ram_region);
144    if (!host_ptr) {
145        return NULL;
146    }
147    host_ptr += ram_region_offset;
148    *mapping_in_region = ram_region;
149    return host_ptr;
150}
151
152static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task,
153                                      uint64_t virtual_offset,
154                                      PGPhysicalMemoryRange_t *ranges,
155                                      uint32_t range_count, bool read_only)
156{
157    kern_return_t r;
158    void *source_ptr;
159    mach_vm_address_t target;
160    vm_prot_t cur_protection, max_protection;
161    bool success = true;
162    MemoryRegion *region;
163
164    RCU_READ_LOCK_GUARD();
165    QEMU_LOCK_GUARD(&s->task_mutex);
166
167    trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only);
168    for (int i = 0; i < range_count; i++) {
169        PGPhysicalMemoryRange_t *range = &ranges[i];
170
171        target = task->address + virtual_offset;
172        virtual_offset += range->physicalLength;
173
174        trace_apple_gfx_map_memory_range(i, range->physicalAddress,
175                                         range->physicalLength);
176
177        region = NULL;
178        source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress,
179                                                      range->physicalLength,
180                                                      read_only, &region);
181        if (!source_ptr) {
182            success = false;
183            continue;
184        }
185
186        if (!g_ptr_array_find(task->mapped_regions, region, NULL)) {
187            g_ptr_array_add(task->mapped_regions, region);
188            memory_region_ref(region);
189        }
190
191        cur_protection = 0;
192        max_protection = 0;
193        /* Map guest RAM at range->physicalAddress into PG task memory range */
194        r = mach_vm_remap(mach_task_self(),
195                          &target, range->physicalLength, vm_page_size - 1,
196                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
197                          mach_task_self(), (mach_vm_address_t)source_ptr,
198                          false /* shared mapping, no copy */,
199                          &cur_protection, &max_protection,
200                          VM_INHERIT_COPY);
201        trace_apple_gfx_remap(r, source_ptr, target);
202        g_assert(r == KERN_SUCCESS);
203    }
204
205    return success;
206}
207
208static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task,
209                                        uint64_t virtual_offset, uint64_t length)
210{
211    kern_return_t r;
212    mach_vm_address_t range_address;
213
214    trace_apple_gfx_unmap_memory(task, virtual_offset, length);
215
216    /*
217     * Replace task memory range with fresh 0 pages, undoing the mapping
218     * from guest RAM.
219     */
220    range_address = task->address + virtual_offset;
221    r = mach_vm_allocate(mach_task_self(), &range_address, length,
222                         VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
223    g_assert(r == KERN_SUCCESS);
224}
225
226/* ------ Rendering and frame management ------ */
227
228static void apple_gfx_render_frame_completed_bh(void *opaque);
229
230static void apple_gfx_render_new_frame(AppleGFXState *s)
231{
232    bool managed_texture = s->using_managed_texture_storage;
233    uint32_t width = surface_width(s->surface);
234    uint32_t height = surface_height(s->surface);
235    MTLRegion region = MTLRegionMake2D(0, 0, width, height);
236    id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer];
237    id<MTLTexture> texture = s->texture;
238
239    assert(bql_locked());
240    [texture retain];
241    [command_buffer retain];
242
243    s->rendering_frame_width = width;
244    s->rendering_frame_height = height;
245
246    dispatch_async(get_background_queue(), ^{
247        /*
248         * This is not safe to call from the BQL/BH due to PVG-internal locks
249         * causing deadlocks.
250         */
251        bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer
252                                                 texture:texture
253                                                  region:region];
254        if (!r) {
255            [texture release];
256            [command_buffer release];
257            qemu_log_mask(LOG_GUEST_ERROR,
258                          "%s: encodeCurrentFrameToCommandBuffer:texture:region: "
259                          "failed\n", __func__);
260            bql_lock();
261            --s->pending_frames;
262            if (s->pending_frames > 0) {
263                apple_gfx_render_new_frame(s);
264            }
265            bql_unlock();
266            return;
267        }
268
269        if (managed_texture) {
270            /* "Managed" textures exist in both VRAM and RAM and must be synced. */
271            id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder];
272            [blit synchronizeResource:texture];
273            [blit endEncoding];
274        }
275        [texture release];
276        [command_buffer addCompletedHandler:
277            ^(id<MTLCommandBuffer> cb)
278            {
279                aio_bh_schedule_oneshot(qemu_get_aio_context(),
280                                        apple_gfx_render_frame_completed_bh, s);
281            }];
282        [command_buffer commit];
283        [command_buffer release];
284    });
285}
286
287static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram)
288{
289    /*
290     * TODO: Skip this entirely on a pure Metal or headless/guest-only
291     * rendering path, else use a blit command encoder? Needs careful
292     * (double?) buffering design.
293     */
294    size_t width = texture.width, height = texture.height;
295    MTLRegion region = MTLRegionMake2D(0, 0, width, height);
296    [texture getBytes:vram
297          bytesPerRow:(width * 4)
298        bytesPerImage:(width * height * 4)
299           fromRegion:region
300          mipmapLevel:0
301                slice:0];
302}
303
304static void apple_gfx_render_frame_completed_bh(void *opaque)
305{
306    AppleGFXState *s = opaque;
307
308    @autoreleasepool {
309        --s->pending_frames;
310        assert(s->pending_frames >= 0);
311
312        /* Only update display if mode hasn't changed since we started rendering. */
313        if (s->rendering_frame_width == surface_width(s->surface) &&
314            s->rendering_frame_height == surface_height(s->surface)) {
315            copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface));
316            if (s->gfx_update_requested) {
317                s->gfx_update_requested = false;
318                dpy_gfx_update_full(s->con);
319                graphic_hw_update_done(s->con);
320                s->new_frame_ready = false;
321            } else {
322                s->new_frame_ready = true;
323            }
324        }
325        if (s->pending_frames > 0) {
326            apple_gfx_render_new_frame(s);
327        }
328    }
329}
330
331static void apple_gfx_fb_update_display(void *opaque)
332{
333    AppleGFXState *s = opaque;
334
335    assert(bql_locked());
336    if (s->new_frame_ready) {
337        dpy_gfx_update_full(s->con);
338        s->new_frame_ready = false;
339        graphic_hw_update_done(s->con);
340    } else if (s->pending_frames > 0) {
341        s->gfx_update_requested = true;
342    } else {
343        graphic_hw_update_done(s->con);
344    }
345}
346
347static const GraphicHwOps apple_gfx_fb_ops = {
348    .gfx_update = apple_gfx_fb_update_display,
349    .gfx_update_async = true,
350};
351
352/* ------ Mouse cursor and display mode setting ------ */
353
354static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height)
355{
356    MTLTextureDescriptor *textureDescriptor;
357
358    if (s->surface &&
359        width == surface_width(s->surface) &&
360        height == surface_height(s->surface)) {
361        return;
362    }
363
364    [s->texture release];
365
366    s->surface = qemu_create_displaysurface(width, height);
367
368    @autoreleasepool {
369        textureDescriptor =
370            [MTLTextureDescriptor
371                texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm
372                                             width:width
373                                            height:height
374                                         mipmapped:NO];
375        textureDescriptor.usage = s->pgdisp.minimumTextureUsage;
376        s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor];
377        s->using_managed_texture_storage =
378            (s->texture.storageMode == MTLStorageModeManaged);
379    }
380
381    dpy_gfx_replace_surface(s->con, s->surface);
382}
383
384static void update_cursor(AppleGFXState *s)
385{
386    assert(bql_locked());
387    dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x,
388                  s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show));
389}
390
391static void update_cursor_bh(void *opaque)
392{
393    AppleGFXState *s = opaque;
394    update_cursor(s);
395}
396
397typedef struct AppleGFXSetCursorGlyphJob {
398    AppleGFXState *s;
399    NSBitmapImageRep *glyph;
400    PGDisplayCoord_t hotspot;
401} AppleGFXSetCursorGlyphJob;
402
403static void set_cursor_glyph(void *opaque)
404{
405    AppleGFXSetCursorGlyphJob *job = opaque;
406    AppleGFXState *s = job->s;
407    NSBitmapImageRep *glyph = job->glyph;
408    uint32_t bpp = glyph.bitsPerPixel;
409    size_t width = glyph.pixelsWide;
410    size_t height = glyph.pixelsHigh;
411    size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4;
412    const uint8_t* px_data = glyph.bitmapData;
413
414    trace_apple_gfx_cursor_set(bpp, width, height);
415
416    if (s->cursor) {
417        cursor_unref(s->cursor);
418        s->cursor = NULL;
419    }
420
421    if (bpp == 32) { /* Shouldn't be anything else, but just to be safe... */
422        s->cursor = cursor_alloc(width, height);
423        s->cursor->hot_x = job->hotspot.x;
424        s->cursor->hot_y = job->hotspot.y;
425
426        uint32_t *dest_px = s->cursor->data;
427
428        for (size_t y = 0; y < height; ++y) {
429            for (size_t x = 0; x < width; ++x) {
430                /*
431                 * NSBitmapImageRep's red & blue channels are swapped
432                 * compared to QEMUCursor's.
433                 */
434                *dest_px =
435                    (px_data[0] << 16u) |
436                    (px_data[1] <<  8u) |
437                    (px_data[2] <<  0u) |
438                    (px_data[3] << 24u);
439                ++dest_px;
440                px_data += 4;
441            }
442            px_data += padding_bytes_per_row;
443        }
444        dpy_cursor_define(s->con, s->cursor);
445        update_cursor(s);
446    }
447    [glyph release];
448
449    g_free(job);
450}
451
452/* ------ DMA (device reading system memory) ------ */
453
454typedef struct AppleGFXReadMemoryJob {
455    QemuSemaphore sem;
456    hwaddr physical_address;
457    uint64_t length;
458    void *dst;
459    bool success;
460} AppleGFXReadMemoryJob;
461
462static void apple_gfx_do_read_memory(void *opaque)
463{
464    AppleGFXReadMemoryJob *job = opaque;
465    MemTxResult r;
466
467    r = dma_memory_read(&address_space_memory, job->physical_address,
468                        job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
469    job->success = (r == MEMTX_OK);
470
471    qemu_sem_post(&job->sem);
472}
473
474static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
475                                  uint64_t length, void *dst)
476{
477    AppleGFXReadMemoryJob job = {
478        .physical_address = physical_address, .length = length, .dst = dst
479    };
480
481    trace_apple_gfx_read_memory(physical_address, length, dst);
482
483    /* Performing DMA requires BQL, so do it in a BH. */
484    qemu_sem_init(&job.sem, 0);
485    aio_bh_schedule_oneshot(qemu_get_aio_context(),
486                            apple_gfx_do_read_memory, &job);
487    qemu_sem_wait(&job.sem);
488    qemu_sem_destroy(&job.sem);
489    return job.success;
490}
491
492/* ------ Memory-mapped device I/O operations ------ */
493
494typedef struct AppleGFXIOJob {
495    AppleGFXState *state;
496    uint64_t offset;
497    uint64_t value;
498    bool completed;
499} AppleGFXIOJob;
500
501static void apple_gfx_do_read(void *opaque)
502{
503    AppleGFXIOJob *job = opaque;
504    job->value = [job->state->pgdev mmioReadAtOffset:job->offset];
505    qatomic_set(&job->completed, true);
506    aio_wait_kick();
507}
508
509static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size)
510{
511    AppleGFXIOJob job = {
512        .state = opaque,
513        .offset = offset,
514        .completed = false,
515    };
516    dispatch_queue_t queue = get_background_queue();
517
518    dispatch_async_f(queue, &job, apple_gfx_do_read);
519    AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
520
521    trace_apple_gfx_read(offset, job.value);
522    return job.value;
523}
524
525static void apple_gfx_do_write(void *opaque)
526{
527    AppleGFXIOJob *job = opaque;
528    [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value];
529    qatomic_set(&job->completed, true);
530    aio_wait_kick();
531}
532
533static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val,
534                            unsigned size)
535{
536    /*
537     * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can
538     * trigger synchronous operations on other dispatch queues, which in turn
539     * may call back out on one or more of the callback blocks. For this reason,
540     * and as we are holding the BQL, we invoke the I/O methods on a pool
541     * thread and handle AIO tasks while we wait. Any work in the callbacks
542     * requiring the BQL will in turn schedule BHs which this thread will
543     * process while waiting.
544     */
545    AppleGFXIOJob job = {
546        .state = opaque,
547        .offset = offset,
548        .value = val,
549        .completed = false,
550    };
551    dispatch_queue_t queue = get_background_queue();
552
553    dispatch_async_f(queue, &job, apple_gfx_do_write);
554    AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
555
556    trace_apple_gfx_write(offset, val);
557}
558
559static const MemoryRegionOps apple_gfx_ops = {
560    .read = apple_gfx_read,
561    .write = apple_gfx_write,
562    .endianness = DEVICE_LITTLE_ENDIAN,
563    .valid = {
564        .min_access_size = 4,
565        .max_access_size = 8,
566    },
567    .impl = {
568        .min_access_size = 4,
569        .max_access_size = 4,
570    },
571};
572
573static size_t apple_gfx_get_default_mmio_range_size(void)
574{
575    size_t mmio_range_size;
576    @autoreleasepool {
577        PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
578        mmio_range_size = desc.mmioLength;
579        [desc release];
580    }
581    return mmio_range_size;
582}
583
584/* ------ Initialisation and startup ------ */
585
586void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name)
587{
588    size_t mmio_range_size = apple_gfx_get_default_mmio_range_size();
589
590    trace_apple_gfx_common_init(obj_name, mmio_range_size);
591    memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name,
592                          mmio_range_size);
593
594    /* TODO: PVG framework supports serialising device state: integrate it! */
595}
596
597static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s,
598                                                     PGDeviceDescriptor *desc)
599{
600    desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) {
601        PGTask_t *task = apple_gfx_new_task(s, vmSize);
602        *baseAddress = (void *)task->address;
603        trace_apple_gfx_create_task(vmSize, *baseAddress);
604        return task;
605    };
606
607    desc.destroyTask = ^(PGTask_t * _Nonnull task) {
608        trace_apple_gfx_destroy_task(task, task->mapped_regions->len);
609
610        apple_gfx_destroy_task(s, task);
611    };
612
613    desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count,
614                           uint64_t virtual_offset, bool read_only,
615                           PGPhysicalMemoryRange_t * _Nonnull ranges) {
616        return apple_gfx_task_map_memory(s, task, virtual_offset,
617                                         ranges, range_count, read_only);
618    };
619
620    desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset,
621                             uint64_t length) {
622        apple_gfx_task_unmap_memory(s, task, virtual_offset, length);
623        return true;
624    };
625
626    desc.readMemory = ^bool(uint64_t physical_address, uint64_t length,
627                            void * _Nonnull dst) {
628        return apple_gfx_read_memory(s, physical_address, length, dst);
629    };
630}
631
632static void new_frame_handler_bh(void *opaque)
633{
634    AppleGFXState *s = opaque;
635
636    /* Drop frames if guest gets too far ahead. */
637    if (s->pending_frames >= 2) {
638        return;
639    }
640    ++s->pending_frames;
641    if (s->pending_frames > 1) {
642        return;
643    }
644
645    @autoreleasepool {
646        apple_gfx_render_new_frame(s);
647    }
648}
649
650static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s)
651{
652    PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new];
653
654    disp_desc.name = @"QEMU display";
655    disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */
656    disp_desc.queue = dispatch_get_main_queue();
657    disp_desc.newFrameEventHandler = ^(void) {
658        trace_apple_gfx_new_frame();
659        aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s);
660    };
661    disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels,
662                                    OSType pixelFormat) {
663        trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y);
664
665        BQL_LOCK_GUARD();
666        set_mode(s, sizeInPixels.x, sizeInPixels.y);
667    };
668    disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph,
669                                     PGDisplayCoord_t hotspot) {
670        AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job));
671        job->s = s;
672        job->glyph = glyph;
673        job->hotspot = hotspot;
674        [glyph retain];
675        aio_bh_schedule_oneshot(qemu_get_aio_context(),
676                                set_cursor_glyph, job);
677    };
678    disp_desc.cursorShowHandler = ^(BOOL show) {
679        trace_apple_gfx_cursor_show(show);
680        qatomic_set(&s->cursor_show, show);
681        aio_bh_schedule_oneshot(qemu_get_aio_context(),
682                                update_cursor_bh, s);
683    };
684    disp_desc.cursorMoveHandler = ^(void) {
685        trace_apple_gfx_cursor_move();
686        aio_bh_schedule_oneshot(qemu_get_aio_context(),
687                                update_cursor_bh, s);
688    };
689
690    return disp_desc;
691}
692
693static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void)
694{
695    PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)];
696    NSArray<PGDisplayMode*>* mode_array;
697    int i;
698
699    for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) {
700        modes[i] =
701            [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.];
702    }
703
704    mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)];
705
706    for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) {
707        [modes[i] release];
708        modes[i] = nil;
709    }
710
711    return mode_array;
712}
713
714static id<MTLDevice> copy_suitable_metal_device(void)
715{
716    id<MTLDevice> dev = nil;
717    NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices();
718
719    /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */
720    for (size_t i = 0; i < devs.count; ++i) {
721        if (devs[i].hasUnifiedMemory) {
722            dev = devs[i];
723            break;
724        }
725        if (!devs[i].removable) {
726            dev = devs[i];
727        }
728    }
729
730    if (dev != nil) {
731        [dev retain];
732    } else {
733        dev = MTLCreateSystemDefaultDevice();
734    }
735    [devs release];
736
737    return dev;
738}
739
740bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev,
741                              PGDeviceDescriptor *desc, Error **errp)
742{
743    PGDisplayDescriptor *disp_desc;
744
745    if (apple_gfx_mig_blocker == NULL) {
746        error_setg(&apple_gfx_mig_blocker,
747                  "Migration state blocked by apple-gfx display device");
748        if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) {
749            return false;
750        }
751    }
752
753    qemu_mutex_init(&s->task_mutex);
754    QTAILQ_INIT(&s->tasks);
755    s->mtl = copy_suitable_metal_device();
756    s->mtl_queue = [s->mtl newCommandQueue];
757
758    desc.device = s->mtl;
759
760    apple_gfx_register_task_mapping_handlers(s, desc);
761
762    s->cursor_show = true;
763
764    s->pgdev = PGNewDeviceWithDescriptor(desc);
765
766    disp_desc = apple_gfx_prepare_display_descriptor(s);
767    /*
768     * Although the framework does, this integration currently does not support
769     * multiple virtual displays connected to a single PV graphics device.
770     * It is however possible to create
771     * more than one instance of the device, each with one display. The macOS
772     * guest will ignore these displays if they share the same serial number,
773     * so ensure each instance gets a unique one.
774     */
775    s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc
776                                              port:0
777                                         serialNum:next_pgdisplay_serial_num++];
778    [disp_desc release];
779    s->pgdisp.modeList = apple_gfx_prepare_display_mode_array();
780
781    s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s);
782    return true;
783}
784