1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use core::ptr::{addr_of, addr_of_mut, NonNull}; 6 use std::{ 7 ffi::CStr, 8 os::raw::{c_int, c_uchar, c_uint, c_void}, 9 }; 10 11 use qemu_api::{ 12 bindings::{self, *}, 13 definitions::ObjectImpl, 14 }; 15 16 use crate::{ 17 memory_ops::PL011_OPS, 18 registers::{self, Interrupt}, 19 RegisterOffset, 20 }; 21 22 /// Integer Baud Rate Divider, `UARTIBRD` 23 const IBRD_MASK: u32 = 0xffff; 24 25 /// Fractional Baud Rate Divider, `UARTFBRD` 26 const FBRD_MASK: u32 = 0x3f; 27 28 const DATA_BREAK: u32 = 1 << 10; 29 30 /// QEMU sourced constant. 31 pub const PL011_FIFO_DEPTH: usize = 16_usize; 32 33 #[derive(Clone, Copy, Debug)] 34 enum DeviceId { 35 #[allow(dead_code)] 36 Arm = 0, 37 Luminary, 38 } 39 40 impl std::ops::Index<hwaddr> for DeviceId { 41 type Output = c_uchar; 42 43 fn index(&self, idx: hwaddr) -> &Self::Output { 44 match self { 45 Self::Arm => &Self::PL011_ID_ARM[idx as usize], 46 Self::Luminary => &Self::PL011_ID_LUMINARY[idx as usize], 47 } 48 } 49 } 50 51 impl DeviceId { 52 const PL011_ID_ARM: [c_uchar; 8] = [0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]; 53 const PL011_ID_LUMINARY: [c_uchar; 8] = [0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]; 54 } 55 56 #[repr(C)] 57 #[derive(Debug, qemu_api_macros::Object)] 58 /// PL011 Device Model in QEMU 59 pub struct PL011State { 60 pub parent_obj: SysBusDevice, 61 pub iomem: MemoryRegion, 62 #[doc(alias = "fr")] 63 pub flags: registers::Flags, 64 #[doc(alias = "lcr")] 65 pub line_control: registers::LineControl, 66 #[doc(alias = "rsr")] 67 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 68 #[doc(alias = "cr")] 69 pub control: registers::Control, 70 pub dmacr: u32, 71 pub int_enabled: u32, 72 pub int_level: u32, 73 pub read_fifo: [u32; PL011_FIFO_DEPTH], 74 pub ilpr: u32, 75 pub ibrd: u32, 76 pub fbrd: u32, 77 pub ifl: u32, 78 pub read_pos: usize, 79 pub read_count: usize, 80 pub read_trigger: usize, 81 #[doc(alias = "chr")] 82 pub char_backend: CharBackend, 83 /// QEMU interrupts 84 /// 85 /// ```text 86 /// * sysbus MMIO region 0: device registers 87 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 88 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 89 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 90 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 91 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 92 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 93 /// ``` 94 #[doc(alias = "irq")] 95 pub interrupts: [qemu_irq; 6usize], 96 #[doc(alias = "clk")] 97 pub clock: NonNull<Clock>, 98 #[doc(alias = "migrate_clk")] 99 pub migrate_clock: bool, 100 /// The byte string that identifies the device. 101 device_id: DeviceId, 102 } 103 104 impl ObjectImpl for PL011State { 105 type Class = PL011Class; 106 const TYPE_INFO: qemu_api::bindings::TypeInfo = qemu_api::type_info! { Self }; 107 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 108 const PARENT_TYPE_NAME: Option<&'static CStr> = Some(TYPE_SYS_BUS_DEVICE); 109 const ABSTRACT: bool = false; 110 const INSTANCE_INIT: Option<unsafe extern "C" fn(obj: *mut Object)> = Some(pl011_init); 111 const INSTANCE_POST_INIT: Option<unsafe extern "C" fn(obj: *mut Object)> = None; 112 const INSTANCE_FINALIZE: Option<unsafe extern "C" fn(obj: *mut Object)> = None; 113 } 114 115 #[repr(C)] 116 pub struct PL011Class { 117 _inner: [u8; 0], 118 } 119 120 impl qemu_api::definitions::Class for PL011Class { 121 const CLASS_INIT: Option<unsafe extern "C" fn(klass: *mut ObjectClass, data: *mut c_void)> = 122 Some(crate::device_class::pl011_class_init); 123 const CLASS_BASE_INIT: Option< 124 unsafe extern "C" fn(klass: *mut ObjectClass, data: *mut c_void), 125 > = None; 126 } 127 128 impl PL011State { 129 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 130 /// 131 /// # Safety 132 /// 133 /// `self` must point to a correctly sized and aligned location for the 134 /// `PL011State` type. It must not be called more than once on the same 135 /// location/instance. All its fields are expected to hold unitialized 136 /// values with the sole exception of `parent_obj`. 137 unsafe fn init(&mut self) { 138 const CLK_NAME: &CStr = c"clk"; 139 140 let dev = addr_of_mut!(*self).cast::<DeviceState>(); 141 // SAFETY: 142 // 143 // self and self.iomem are guaranteed to be valid at this point since callers 144 // must make sure the `self` reference is valid. 145 unsafe { 146 memory_region_init_io( 147 addr_of_mut!(self.iomem), 148 addr_of_mut!(*self).cast::<Object>(), 149 &PL011_OPS, 150 addr_of_mut!(*self).cast::<c_void>(), 151 Self::TYPE_INFO.name, 152 0x1000, 153 ); 154 let sbd = addr_of_mut!(*self).cast::<SysBusDevice>(); 155 sysbus_init_mmio(sbd, addr_of_mut!(self.iomem)); 156 for irq in self.interrupts.iter_mut() { 157 sysbus_init_irq(sbd, irq); 158 } 159 } 160 // SAFETY: 161 // 162 // self.clock is not initialized at this point; but since `NonNull<_>` is Copy, 163 // we can overwrite the undefined value without side effects. This is 164 // safe since all PL011State instances are created by QOM code which 165 // calls this function to initialize the fields; therefore no code is 166 // able to access an invalid self.clock value. 167 unsafe { 168 self.clock = NonNull::new(qdev_init_clock_in( 169 dev, 170 CLK_NAME.as_ptr(), 171 None, /* pl011_clock_update */ 172 addr_of_mut!(*self).cast::<c_void>(), 173 ClockEvent::ClockUpdate.0, 174 )) 175 .unwrap(); 176 } 177 } 178 179 pub fn read(&mut self, offset: hwaddr, _size: c_uint) -> std::ops::ControlFlow<u64, u64> { 180 use RegisterOffset::*; 181 182 std::ops::ControlFlow::Break(match RegisterOffset::try_from(offset) { 183 Err(v) if (0x3f8..0x400).contains(&v) => { 184 u64::from(self.device_id[(offset - 0xfe0) >> 2]) 185 } 186 Err(_) => { 187 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 188 0 189 } 190 Ok(DR) => { 191 self.flags.set_receive_fifo_full(false); 192 let c = self.read_fifo[self.read_pos]; 193 if self.read_count > 0 { 194 self.read_count -= 1; 195 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 196 } 197 if self.read_count == 0 { 198 self.flags.set_receive_fifo_empty(true); 199 } 200 if self.read_count + 1 == self.read_trigger { 201 self.int_level &= !registers::INT_RX; 202 } 203 // Update error bits. 204 self.receive_status_error_clear = c.to_be_bytes()[3].into(); 205 self.update(); 206 // Must call qemu_chr_fe_accept_input, so return Continue: 207 return std::ops::ControlFlow::Continue(c.into()); 208 } 209 Ok(RSR) => u8::from(self.receive_status_error_clear).into(), 210 Ok(FR) => u16::from(self.flags).into(), 211 Ok(FBRD) => self.fbrd.into(), 212 Ok(ILPR) => self.ilpr.into(), 213 Ok(IBRD) => self.ibrd.into(), 214 Ok(LCR_H) => u16::from(self.line_control).into(), 215 Ok(CR) => { 216 // We exercise our self-control. 217 u16::from(self.control).into() 218 } 219 Ok(FLS) => self.ifl.into(), 220 Ok(IMSC) => self.int_enabled.into(), 221 Ok(RIS) => self.int_level.into(), 222 Ok(MIS) => u64::from(self.int_level & self.int_enabled), 223 Ok(ICR) => { 224 // "The UARTICR Register is the interrupt clear register and is write-only" 225 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 226 0 227 } 228 Ok(DMACR) => self.dmacr.into(), 229 }) 230 } 231 232 pub fn write(&mut self, offset: hwaddr, value: u64) { 233 // eprintln!("write offset {offset} value {value}"); 234 use RegisterOffset::*; 235 let value: u32 = value as u32; 236 match RegisterOffset::try_from(offset) { 237 Err(_bad_offset) => { 238 eprintln!("write bad offset {offset} value {value}"); 239 } 240 Ok(DR) => { 241 // ??? Check if transmitter is enabled. 242 let ch: u8 = value as u8; 243 // XXX this blocks entire thread. Rewrite to use 244 // qemu_chr_fe_write and background I/O callbacks 245 246 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 247 // initialized in realize(). 248 unsafe { 249 qemu_chr_fe_write_all(addr_of_mut!(self.char_backend), &ch, 1); 250 } 251 self.loopback_tx(value); 252 self.int_level |= registers::INT_TX; 253 self.update(); 254 } 255 Ok(RSR) => { 256 self.receive_status_error_clear = 0.into(); 257 } 258 Ok(FR) => { 259 // flag writes are ignored 260 } 261 Ok(ILPR) => { 262 self.ilpr = value; 263 } 264 Ok(IBRD) => { 265 self.ibrd = value; 266 } 267 Ok(FBRD) => { 268 self.fbrd = value; 269 } 270 Ok(LCR_H) => { 271 let value = value as u16; 272 let new_val: registers::LineControl = value.into(); 273 // Reset the FIFO state on FIFO enable or disable 274 if bool::from(self.line_control.fifos_enabled()) 275 ^ bool::from(new_val.fifos_enabled()) 276 { 277 self.reset_fifo(); 278 } 279 if self.line_control.send_break() ^ new_val.send_break() { 280 let mut break_enable: c_int = new_val.send_break().into(); 281 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 282 // initialized in realize(). 283 unsafe { 284 qemu_chr_fe_ioctl( 285 addr_of_mut!(self.char_backend), 286 CHR_IOCTL_SERIAL_SET_BREAK as i32, 287 addr_of_mut!(break_enable).cast::<c_void>(), 288 ); 289 } 290 self.loopback_break(break_enable > 0); 291 } 292 self.line_control = new_val; 293 self.set_read_trigger(); 294 } 295 Ok(CR) => { 296 // ??? Need to implement the enable bit. 297 let value = value as u16; 298 self.control = value.into(); 299 self.loopback_mdmctrl(); 300 } 301 Ok(FLS) => { 302 self.ifl = value; 303 self.set_read_trigger(); 304 } 305 Ok(IMSC) => { 306 self.int_enabled = value; 307 self.update(); 308 } 309 Ok(RIS) => {} 310 Ok(MIS) => {} 311 Ok(ICR) => { 312 self.int_level &= !value; 313 self.update(); 314 } 315 Ok(DMACR) => { 316 self.dmacr = value; 317 if value & 3 > 0 { 318 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 319 eprintln!("pl011: DMA not implemented"); 320 } 321 } 322 } 323 } 324 325 #[inline] 326 fn loopback_tx(&mut self, value: u32) { 327 if !self.loopback_enabled() { 328 return; 329 } 330 331 // Caveat: 332 // 333 // In real hardware, TX loopback happens at the serial-bit level 334 // and then reassembled by the RX logics back into bytes and placed 335 // into the RX fifo. That is, loopback happens after TX fifo. 336 // 337 // Because the real hardware TX fifo is time-drained at the frame 338 // rate governed by the configured serial format, some loopback 339 // bytes in TX fifo may still be able to get into the RX fifo 340 // that could be full at times while being drained at software 341 // pace. 342 // 343 // In such scenario, the RX draining pace is the major factor 344 // deciding which loopback bytes get into the RX fifo, unless 345 // hardware flow-control is enabled. 346 // 347 // For simplicity, the above described is not emulated. 348 self.put_fifo(value); 349 } 350 351 fn loopback_mdmctrl(&mut self) { 352 if !self.loopback_enabled() { 353 return; 354 } 355 356 /* 357 * Loopback software-driven modem control outputs to modem status inputs: 358 * FR.RI <= CR.Out2 359 * FR.DCD <= CR.Out1 360 * FR.CTS <= CR.RTS 361 * FR.DSR <= CR.DTR 362 * 363 * The loopback happens immediately even if this call is triggered 364 * by setting only CR.LBE. 365 * 366 * CTS/RTS updates due to enabled hardware flow controls are not 367 * dealt with here. 368 */ 369 370 self.flags.set_ring_indicator(self.control.out_2()); 371 self.flags.set_data_carrier_detect(self.control.out_1()); 372 self.flags.set_clear_to_send(self.control.request_to_send()); 373 self.flags 374 .set_data_set_ready(self.control.data_transmit_ready()); 375 376 // Change interrupts based on updated FR 377 let mut il = self.int_level; 378 379 il &= !Interrupt::MS; 380 381 if self.flags.data_set_ready() { 382 il |= Interrupt::DSR as u32; 383 } 384 if self.flags.data_carrier_detect() { 385 il |= Interrupt::DCD as u32; 386 } 387 if self.flags.clear_to_send() { 388 il |= Interrupt::CTS as u32; 389 } 390 if self.flags.ring_indicator() { 391 il |= Interrupt::RI as u32; 392 } 393 self.int_level = il; 394 self.update(); 395 } 396 397 fn loopback_break(&mut self, enable: bool) { 398 if enable { 399 self.loopback_tx(DATA_BREAK); 400 } 401 } 402 403 fn set_read_trigger(&mut self) { 404 self.read_trigger = 1; 405 } 406 407 pub fn realize(&mut self) { 408 // SAFETY: self.char_backend has the correct size and alignment for a 409 // CharBackend object, and its callbacks are of the correct types. 410 unsafe { 411 qemu_chr_fe_set_handlers( 412 addr_of_mut!(self.char_backend), 413 Some(pl011_can_receive), 414 Some(pl011_receive), 415 Some(pl011_event), 416 None, 417 addr_of_mut!(*self).cast::<c_void>(), 418 core::ptr::null_mut(), 419 true, 420 ); 421 } 422 } 423 424 pub fn reset(&mut self) { 425 self.line_control.reset(); 426 self.receive_status_error_clear.reset(); 427 self.dmacr = 0; 428 self.int_enabled = 0; 429 self.int_level = 0; 430 self.ilpr = 0; 431 self.ibrd = 0; 432 self.fbrd = 0; 433 self.read_trigger = 1; 434 self.ifl = 0x12; 435 self.control.reset(); 436 self.flags = 0.into(); 437 self.reset_fifo(); 438 } 439 440 pub fn reset_fifo(&mut self) { 441 self.read_count = 0; 442 self.read_pos = 0; 443 444 /* Reset FIFO flags */ 445 self.flags.reset(); 446 } 447 448 pub fn can_receive(&self) -> bool { 449 // trace_pl011_can_receive(s->lcr, s->read_count, r); 450 self.read_count < self.fifo_depth() 451 } 452 453 pub fn event(&mut self, event: QEMUChrEvent) { 454 if event == bindings::QEMUChrEvent::CHR_EVENT_BREAK && !self.fifo_enabled() { 455 self.put_fifo(DATA_BREAK); 456 self.receive_status_error_clear.set_break_error(true); 457 } 458 } 459 460 #[inline] 461 pub fn fifo_enabled(&self) -> bool { 462 matches!(self.line_control.fifos_enabled(), registers::Mode::FIFO) 463 } 464 465 #[inline] 466 pub fn loopback_enabled(&self) -> bool { 467 self.control.enable_loopback() 468 } 469 470 #[inline] 471 pub fn fifo_depth(&self) -> usize { 472 // Note: FIFO depth is expected to be power-of-2 473 if self.fifo_enabled() { 474 return PL011_FIFO_DEPTH; 475 } 476 1 477 } 478 479 pub fn put_fifo(&mut self, value: c_uint) { 480 let depth = self.fifo_depth(); 481 assert!(depth > 0); 482 let slot = (self.read_pos + self.read_count) & (depth - 1); 483 self.read_fifo[slot] = value; 484 self.read_count += 1; 485 self.flags.set_receive_fifo_empty(false); 486 if self.read_count == depth { 487 self.flags.set_receive_fifo_full(true); 488 } 489 490 if self.read_count == self.read_trigger { 491 self.int_level |= registers::INT_RX; 492 self.update(); 493 } 494 } 495 496 pub fn update(&self) { 497 let flags = self.int_level & self.int_enabled; 498 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 499 // SAFETY: self.interrupts have been initialized in init(). 500 unsafe { qemu_set_irq(*irq, i32::from(flags & i != 0)) }; 501 } 502 } 503 504 pub fn post_load(&mut self, _version_id: u32) -> Result<(), ()> { 505 /* Sanity-check input state */ 506 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 507 return Err(()); 508 } 509 510 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 511 // Older versions of PL011 didn't ensure that the single 512 // character in the FIFO in FIFO-disabled mode is in 513 // element 0 of the array; convert to follow the current 514 // code's assumptions. 515 self.read_fifo[0] = self.read_fifo[self.read_pos]; 516 self.read_pos = 0; 517 } 518 519 self.ibrd &= IBRD_MASK; 520 self.fbrd &= FBRD_MASK; 521 522 Ok(()) 523 } 524 } 525 526 /// Which bits in the interrupt status matter for each outbound IRQ line ? 527 pub const IRQMASK: [u32; 6] = [ 528 /* combined IRQ */ 529 Interrupt::E 530 | Interrupt::MS 531 | Interrupt::RT as u32 532 | Interrupt::TX as u32 533 | Interrupt::RX as u32, 534 Interrupt::RX as u32, 535 Interrupt::TX as u32, 536 Interrupt::RT as u32, 537 Interrupt::MS, 538 Interrupt::E, 539 ]; 540 541 /// # Safety 542 /// 543 /// We expect the FFI user of this function to pass a valid pointer, that has 544 /// the same size as [`PL011State`]. We also expect the device is 545 /// readable/writeable from one thread at any time. 546 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 547 unsafe { 548 debug_assert!(!opaque.is_null()); 549 let state = NonNull::new_unchecked(opaque.cast::<PL011State>()); 550 state.as_ref().can_receive().into() 551 } 552 } 553 554 /// # Safety 555 /// 556 /// We expect the FFI user of this function to pass a valid pointer, that has 557 /// the same size as [`PL011State`]. We also expect the device is 558 /// readable/writeable from one thread at any time. 559 /// 560 /// The buffer and size arguments must also be valid. 561 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 562 unsafe { 563 debug_assert!(!opaque.is_null()); 564 let mut state = NonNull::new_unchecked(opaque.cast::<PL011State>()); 565 if state.as_ref().loopback_enabled() { 566 return; 567 } 568 if size > 0 { 569 debug_assert!(!buf.is_null()); 570 state.as_mut().put_fifo(c_uint::from(buf.read_volatile())) 571 } 572 } 573 } 574 575 /// # Safety 576 /// 577 /// We expect the FFI user of this function to pass a valid pointer, that has 578 /// the same size as [`PL011State`]. We also expect the device is 579 /// readable/writeable from one thread at any time. 580 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 581 unsafe { 582 debug_assert!(!opaque.is_null()); 583 let mut state = NonNull::new_unchecked(opaque.cast::<PL011State>()); 584 state.as_mut().event(event) 585 } 586 } 587 588 /// # Safety 589 /// 590 /// We expect the FFI user of this function to pass a valid pointer for `chr`. 591 #[no_mangle] 592 pub unsafe extern "C" fn pl011_create( 593 addr: u64, 594 irq: qemu_irq, 595 chr: *mut Chardev, 596 ) -> *mut DeviceState { 597 unsafe { 598 let dev: *mut DeviceState = qdev_new(PL011State::TYPE_INFO.name); 599 let sysbus: *mut SysBusDevice = dev.cast::<SysBusDevice>(); 600 601 qdev_prop_set_chr(dev, c"chardev".as_ptr(), chr); 602 sysbus_realize_and_unref(sysbus, addr_of!(error_fatal) as *mut *mut Error); 603 sysbus_mmio_map(sysbus, 0, addr); 604 sysbus_connect_irq(sysbus, 0, irq); 605 dev 606 } 607 } 608 609 /// # Safety 610 /// 611 /// We expect the FFI user of this function to pass a valid pointer, that has 612 /// the same size as [`PL011State`]. We also expect the device is 613 /// readable/writeable from one thread at any time. 614 pub unsafe extern "C" fn pl011_init(obj: *mut Object) { 615 unsafe { 616 debug_assert!(!obj.is_null()); 617 let mut state = NonNull::new_unchecked(obj.cast::<PL011State>()); 618 state.as_mut().init(); 619 } 620 } 621 622 #[repr(C)] 623 #[derive(Debug, qemu_api_macros::Object)] 624 /// PL011 Luminary device model. 625 pub struct PL011Luminary { 626 parent_obj: PL011State, 627 } 628 629 #[repr(C)] 630 pub struct PL011LuminaryClass { 631 _inner: [u8; 0], 632 } 633 634 /// Initializes a pre-allocated, unitialized instance of `PL011Luminary`. 635 /// 636 /// # Safety 637 /// 638 /// We expect the FFI user of this function to pass a valid pointer, that has 639 /// the same size as [`PL011Luminary`]. We also expect the device is 640 /// readable/writeable from one thread at any time. 641 pub unsafe extern "C" fn pl011_luminary_init(obj: *mut Object) { 642 unsafe { 643 debug_assert!(!obj.is_null()); 644 let mut state = NonNull::new_unchecked(obj.cast::<PL011Luminary>()); 645 let state = state.as_mut(); 646 state.parent_obj.device_id = DeviceId::Luminary; 647 } 648 } 649 650 impl qemu_api::definitions::Class for PL011LuminaryClass { 651 const CLASS_INIT: Option<unsafe extern "C" fn(klass: *mut ObjectClass, data: *mut c_void)> = 652 None; 653 const CLASS_BASE_INIT: Option< 654 unsafe extern "C" fn(klass: *mut ObjectClass, data: *mut c_void), 655 > = None; 656 } 657 658 impl ObjectImpl for PL011Luminary { 659 type Class = PL011LuminaryClass; 660 const TYPE_INFO: qemu_api::bindings::TypeInfo = qemu_api::type_info! { Self }; 661 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 662 const PARENT_TYPE_NAME: Option<&'static CStr> = Some(crate::TYPE_PL011); 663 const ABSTRACT: bool = false; 664 const INSTANCE_INIT: Option<unsafe extern "C" fn(obj: *mut Object)> = Some(pl011_luminary_init); 665 const INSTANCE_POST_INIT: Option<unsafe extern "C" fn(obj: *mut Object)> = None; 666 const INSTANCE_FINALIZE: Option<unsafe extern "C" fn(obj: *mut Object)> = None; 667 } 668