1 // Copyright 2024 Red Hat, Inc.
2 // Author(s): Paolo Bonzini <pbonzini@redhat.com>
3 // SPDX-License-Identifier: GPL-2.0-or-later
4
5 //! Bindings for `MemoryRegion`, `MemoryRegionOps` and `MemTxAttrs`
6
7 use std::{
8 ffi::{CStr, CString},
9 marker::PhantomData,
10 os::raw::{c_uint, c_void},
11 };
12
13 pub use bindings::{hwaddr, MemTxAttrs};
14
15 use crate::{
16 bindings::{self, device_endian, memory_region_init_io},
17 callbacks::FnCall,
18 cell::Opaque,
19 prelude::*,
20 zeroable::Zeroable,
21 };
22
23 pub struct MemoryRegionOps<T>(
24 bindings::MemoryRegionOps,
25 // Note: quite often you'll see PhantomData<fn(&T)> mentioned when discussing
26 // covariance and contravariance; you don't need any of those to understand
27 // this usage of PhantomData. Quite simply, MemoryRegionOps<T> *logically*
28 // holds callbacks that take an argument of type &T, except the type is erased
29 // before the callback is stored in the bindings::MemoryRegionOps field.
30 // The argument of PhantomData is a function pointer in order to represent
31 // that relationship; while that will also provide desirable and safe variance
32 // for T, variance is not the point but just a consequence.
33 PhantomData<fn(&T)>,
34 );
35
36 // SAFETY: When a *const T is passed to the callbacks, the call itself
37 // is done in a thread-safe manner. The invocation is okay as long as
38 // T itself is `Sync`.
39 unsafe impl<T: Sync> Sync for MemoryRegionOps<T> {}
40
41 #[derive(Clone)]
42 pub struct MemoryRegionOpsBuilder<T>(bindings::MemoryRegionOps, PhantomData<fn(&T)>);
43
memory_region_ops_read_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>( opaque: *mut c_void, addr: hwaddr, size: c_uint, ) -> u6444 unsafe extern "C" fn memory_region_ops_read_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(
45 opaque: *mut c_void,
46 addr: hwaddr,
47 size: c_uint,
48 ) -> u64 {
49 F::call((unsafe { &*(opaque.cast::<T>()) }, addr, size))
50 }
51
memory_region_ops_write_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>( opaque: *mut c_void, addr: hwaddr, data: u64, size: c_uint, )52 unsafe extern "C" fn memory_region_ops_write_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(
53 opaque: *mut c_void,
54 addr: hwaddr,
55 data: u64,
56 size: c_uint,
57 ) {
58 F::call((unsafe { &*(opaque.cast::<T>()) }, addr, data, size))
59 }
60
61 impl<T> MemoryRegionOpsBuilder<T> {
62 #[must_use]
read<F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self63 pub const fn read<F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self {
64 self.0.read = Some(memory_region_ops_read_cb::<T, F>);
65 self
66 }
67
68 #[must_use]
write<F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self69 pub const fn write<F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self {
70 self.0.write = Some(memory_region_ops_write_cb::<T, F>);
71 self
72 }
73
74 #[must_use]
big_endian(mut self) -> Self75 pub const fn big_endian(mut self) -> Self {
76 self.0.endianness = device_endian::DEVICE_BIG_ENDIAN;
77 self
78 }
79
80 #[must_use]
little_endian(mut self) -> Self81 pub const fn little_endian(mut self) -> Self {
82 self.0.endianness = device_endian::DEVICE_LITTLE_ENDIAN;
83 self
84 }
85
86 #[must_use]
native_endian(mut self) -> Self87 pub const fn native_endian(mut self) -> Self {
88 self.0.endianness = device_endian::DEVICE_NATIVE_ENDIAN;
89 self
90 }
91
92 #[must_use]
valid_sizes(mut self, min: u32, max: u32) -> Self93 pub const fn valid_sizes(mut self, min: u32, max: u32) -> Self {
94 self.0.valid.min_access_size = min;
95 self.0.valid.max_access_size = max;
96 self
97 }
98
99 #[must_use]
valid_unaligned(mut self) -> Self100 pub const fn valid_unaligned(mut self) -> Self {
101 self.0.valid.unaligned = true;
102 self
103 }
104
105 #[must_use]
impl_sizes(mut self, min: u32, max: u32) -> Self106 pub const fn impl_sizes(mut self, min: u32, max: u32) -> Self {
107 self.0.impl_.min_access_size = min;
108 self.0.impl_.max_access_size = max;
109 self
110 }
111
112 #[must_use]
impl_unaligned(mut self) -> Self113 pub const fn impl_unaligned(mut self) -> Self {
114 self.0.impl_.unaligned = true;
115 self
116 }
117
118 #[must_use]
build(self) -> MemoryRegionOps<T>119 pub const fn build(self) -> MemoryRegionOps<T> {
120 MemoryRegionOps::<T>(self.0, PhantomData)
121 }
122
123 #[must_use]
new() -> Self124 pub const fn new() -> Self {
125 Self(bindings::MemoryRegionOps::ZERO, PhantomData)
126 }
127 }
128
129 impl<T> Default for MemoryRegionOpsBuilder<T> {
default() -> Self130 fn default() -> Self {
131 Self::new()
132 }
133 }
134
135 /// A safe wrapper around [`bindings::MemoryRegion`].
136 #[repr(transparent)]
137 #[derive(qemu_api_macros::Wrapper)]
138 pub struct MemoryRegion(Opaque<bindings::MemoryRegion>);
139
140 unsafe impl Send for MemoryRegion {}
141 unsafe impl Sync for MemoryRegion {}
142
143 impl MemoryRegion {
144 // inline to ensure that it is not included in tests, which only
145 // link to hwcore and qom. FIXME: inlining is actually the opposite
146 // of what we want, since this is the type-erased version of the
147 // init_io function below. Look into splitting the qemu_api crate.
148 #[inline(always)]
do_init_io( slot: *mut bindings::MemoryRegion, owner: *mut Object, ops: &'static bindings::MemoryRegionOps, name: &'static str, size: u64, )149 unsafe fn do_init_io(
150 slot: *mut bindings::MemoryRegion,
151 owner: *mut Object,
152 ops: &'static bindings::MemoryRegionOps,
153 name: &'static str,
154 size: u64,
155 ) {
156 unsafe {
157 let cstr = CString::new(name).unwrap();
158 memory_region_init_io(
159 slot,
160 owner.cast::<bindings::Object>(),
161 ops,
162 owner.cast::<c_void>(),
163 cstr.as_ptr(),
164 size,
165 );
166 }
167 }
168
init_io<T: IsA<Object>>( &mut self, owner: *mut T, ops: &'static MemoryRegionOps<T>, name: &'static str, size: u64, )169 pub fn init_io<T: IsA<Object>>(
170 &mut self,
171 owner: *mut T,
172 ops: &'static MemoryRegionOps<T>,
173 name: &'static str,
174 size: u64,
175 ) {
176 unsafe {
177 Self::do_init_io(
178 self.0.as_mut_ptr(),
179 owner.cast::<Object>(),
180 &ops.0,
181 name,
182 size,
183 );
184 }
185 }
186 }
187
188 unsafe impl ObjectType for MemoryRegion {
189 type Class = bindings::MemoryRegionClass;
190 const TYPE_NAME: &'static CStr =
191 unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_MEMORY_REGION) };
192 }
193 qom_isa!(MemoryRegion: Object);
194
195 /// A special `MemTxAttrs` constant, used to indicate that no memory
196 /// attributes are specified.
197 ///
198 /// Bus masters which don't specify any attributes will get this,
199 /// which has all attribute bits clear except the topmost one
200 /// (so that we can distinguish "all attributes deliberately clear"
201 /// from "didn't specify" if necessary).
202 pub const MEMTXATTRS_UNSPECIFIED: MemTxAttrs = MemTxAttrs {
203 unspecified: true,
204 ..Zeroable::ZERO
205 };
206