1 /*
2 * QEMU Parallel PORT emulation
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 * Copyright (c) 2007 Marko Kohtala
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "qapi/error.h"
28 #include "qemu/module.h"
29 #include "chardev/char-parallel.h"
30 #include "hw/acpi/acpi_aml_interface.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/qdev-properties-system.h"
33 #include "migration/vmstate.h"
34 #include "hw/char/parallel-isa.h"
35 #include "hw/char/parallel.h"
36 #include "sysemu/reset.h"
37 #include "sysemu/sysemu.h"
38 #include "trace.h"
39 #include "qom/object.h"
40
41 //#define DEBUG_PARALLEL
42
43 #ifdef DEBUG_PARALLEL
44 #define pdebug(fmt, ...) printf("pp: " fmt, ## __VA_ARGS__)
45 #else
46 #define pdebug(fmt, ...) ((void)0)
47 #endif
48
49 #define PARA_REG_DATA 0
50 #define PARA_REG_STS 1
51 #define PARA_REG_CTR 2
52 #define PARA_REG_EPP_ADDR 3
53 #define PARA_REG_EPP_DATA 4
54
55 /*
56 * These are the definitions for the Printer Status Register
57 */
58 #define PARA_STS_BUSY 0x80 /* Busy complement */
59 #define PARA_STS_ACK 0x40 /* Acknowledge */
60 #define PARA_STS_PAPER 0x20 /* Out of paper */
61 #define PARA_STS_ONLINE 0x10 /* Online */
62 #define PARA_STS_ERROR 0x08 /* Error complement */
63 #define PARA_STS_TMOUT 0x01 /* EPP timeout */
64
65 /*
66 * These are the definitions for the Printer Control Register
67 */
68 #define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */
69 #define PARA_CTR_INTEN 0x10 /* IRQ Enable */
70 #define PARA_CTR_SELECT 0x08 /* Select In complement */
71 #define PARA_CTR_INIT 0x04 /* Initialize Printer complement */
72 #define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */
73 #define PARA_CTR_STROBE 0x01 /* Strobe complement */
74
75 #define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE)
76
parallel_update_irq(ParallelState * s)77 static void parallel_update_irq(ParallelState *s)
78 {
79 if (s->irq_pending)
80 qemu_irq_raise(s->irq);
81 else
82 qemu_irq_lower(s->irq);
83 }
84
85 static void
parallel_ioport_write_sw(void * opaque,uint32_t addr,uint32_t val)86 parallel_ioport_write_sw(void *opaque, uint32_t addr, uint32_t val)
87 {
88 ParallelState *s = opaque;
89
90 addr &= 7;
91 trace_parallel_ioport_write("SW", addr, val);
92 switch(addr) {
93 case PARA_REG_DATA:
94 s->dataw = val;
95 parallel_update_irq(s);
96 break;
97 case PARA_REG_CTR:
98 val |= 0xc0;
99 if ((val & PARA_CTR_INIT) == 0 ) {
100 s->status = PARA_STS_BUSY;
101 s->status |= PARA_STS_ACK;
102 s->status |= PARA_STS_ONLINE;
103 s->status |= PARA_STS_ERROR;
104 }
105 else if (val & PARA_CTR_SELECT) {
106 if (val & PARA_CTR_STROBE) {
107 s->status &= ~PARA_STS_BUSY;
108 if ((s->control & PARA_CTR_STROBE) == 0)
109 /* XXX this blocks entire thread. Rewrite to use
110 * qemu_chr_fe_write and background I/O callbacks */
111 qemu_chr_fe_write_all(&s->chr, &s->dataw, 1);
112 } else {
113 if (s->control & PARA_CTR_INTEN) {
114 s->irq_pending = 1;
115 }
116 }
117 }
118 parallel_update_irq(s);
119 s->control = val;
120 break;
121 }
122 }
123
parallel_ioport_write_hw(void * opaque,uint32_t addr,uint32_t val)124 static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
125 {
126 ParallelState *s = opaque;
127 uint8_t parm = val;
128 int dir;
129
130 /* Sometimes programs do several writes for timing purposes on old
131 HW. Take care not to waste time on writes that do nothing. */
132
133 s->last_read_offset = ~0U;
134
135 addr &= 7;
136 trace_parallel_ioport_write("HW", addr, val);
137 switch(addr) {
138 case PARA_REG_DATA:
139 if (s->dataw == val)
140 return;
141 pdebug("wd%02x\n", val);
142 qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_DATA, &parm);
143 s->dataw = val;
144 break;
145 case PARA_REG_STS:
146 pdebug("ws%02x\n", val);
147 if (val & PARA_STS_TMOUT)
148 s->epp_timeout = 0;
149 break;
150 case PARA_REG_CTR:
151 val |= 0xc0;
152 if (s->control == val)
153 return;
154 pdebug("wc%02x\n", val);
155
156 if ((val & PARA_CTR_DIR) != (s->control & PARA_CTR_DIR)) {
157 if (val & PARA_CTR_DIR) {
158 dir = 1;
159 } else {
160 dir = 0;
161 }
162 qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_DATA_DIR, &dir);
163 parm &= ~PARA_CTR_DIR;
164 }
165
166 qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_CONTROL, &parm);
167 s->control = val;
168 break;
169 case PARA_REG_EPP_ADDR:
170 if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT)
171 /* Controls not correct for EPP address cycle, so do nothing */
172 pdebug("wa%02x s\n", val);
173 else {
174 struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
175 if (qemu_chr_fe_ioctl(&s->chr,
176 CHR_IOCTL_PP_EPP_WRITE_ADDR, &ioarg)) {
177 s->epp_timeout = 1;
178 pdebug("wa%02x t\n", val);
179 }
180 else
181 pdebug("wa%02x\n", val);
182 }
183 break;
184 case PARA_REG_EPP_DATA:
185 if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT)
186 /* Controls not correct for EPP data cycle, so do nothing */
187 pdebug("we%02x s\n", val);
188 else {
189 struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
190 if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg)) {
191 s->epp_timeout = 1;
192 pdebug("we%02x t\n", val);
193 }
194 else
195 pdebug("we%02x\n", val);
196 }
197 break;
198 }
199 }
200
201 static void
parallel_ioport_eppdata_write_hw2(void * opaque,uint32_t addr,uint32_t val)202 parallel_ioport_eppdata_write_hw2(void *opaque, uint32_t addr, uint32_t val)
203 {
204 ParallelState *s = opaque;
205 uint16_t eppdata = cpu_to_le16(val);
206 int err;
207 struct ParallelIOArg ioarg = {
208 .buffer = &eppdata, .count = sizeof(eppdata)
209 };
210
211 trace_parallel_ioport_write("EPP", addr, val);
212 if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) {
213 /* Controls not correct for EPP data cycle, so do nothing */
214 pdebug("we%04x s\n", val);
215 return;
216 }
217 err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
218 if (err) {
219 s->epp_timeout = 1;
220 pdebug("we%04x t\n", val);
221 }
222 else
223 pdebug("we%04x\n", val);
224 }
225
226 static void
parallel_ioport_eppdata_write_hw4(void * opaque,uint32_t addr,uint32_t val)227 parallel_ioport_eppdata_write_hw4(void *opaque, uint32_t addr, uint32_t val)
228 {
229 ParallelState *s = opaque;
230 uint32_t eppdata = cpu_to_le32(val);
231 int err;
232 struct ParallelIOArg ioarg = {
233 .buffer = &eppdata, .count = sizeof(eppdata)
234 };
235
236 trace_parallel_ioport_write("EPP", addr, val);
237 if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) {
238 /* Controls not correct for EPP data cycle, so do nothing */
239 pdebug("we%08x s\n", val);
240 return;
241 }
242 err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
243 if (err) {
244 s->epp_timeout = 1;
245 pdebug("we%08x t\n", val);
246 }
247 else
248 pdebug("we%08x\n", val);
249 }
250
parallel_ioport_read_sw(void * opaque,uint32_t addr)251 static uint32_t parallel_ioport_read_sw(void *opaque, uint32_t addr)
252 {
253 ParallelState *s = opaque;
254 uint32_t ret = 0xff;
255
256 addr &= 7;
257 switch(addr) {
258 case PARA_REG_DATA:
259 if (s->control & PARA_CTR_DIR)
260 ret = s->datar;
261 else
262 ret = s->dataw;
263 break;
264 case PARA_REG_STS:
265 ret = s->status;
266 s->irq_pending = 0;
267 if ((s->status & PARA_STS_BUSY) == 0 && (s->control & PARA_CTR_STROBE) == 0) {
268 /* XXX Fixme: wait 5 microseconds */
269 if (s->status & PARA_STS_ACK)
270 s->status &= ~PARA_STS_ACK;
271 else {
272 /* XXX Fixme: wait 5 microseconds */
273 s->status |= PARA_STS_ACK;
274 s->status |= PARA_STS_BUSY;
275 }
276 }
277 parallel_update_irq(s);
278 break;
279 case PARA_REG_CTR:
280 ret = s->control;
281 break;
282 }
283 trace_parallel_ioport_read("SW", addr, ret);
284 return ret;
285 }
286
parallel_ioport_read_hw(void * opaque,uint32_t addr)287 static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
288 {
289 ParallelState *s = opaque;
290 uint8_t ret = 0xff;
291 addr &= 7;
292 switch(addr) {
293 case PARA_REG_DATA:
294 qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_DATA, &ret);
295 if (s->last_read_offset != addr || s->datar != ret)
296 pdebug("rd%02x\n", ret);
297 s->datar = ret;
298 break;
299 case PARA_REG_STS:
300 qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &ret);
301 ret &= ~PARA_STS_TMOUT;
302 if (s->epp_timeout)
303 ret |= PARA_STS_TMOUT;
304 if (s->last_read_offset != addr || s->status != ret)
305 pdebug("rs%02x\n", ret);
306 s->status = ret;
307 break;
308 case PARA_REG_CTR:
309 /* s->control has some bits fixed to 1. It is zero only when
310 it has not been yet written to. */
311 if (s->control == 0) {
312 qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_CONTROL, &ret);
313 if (s->last_read_offset != addr)
314 pdebug("rc%02x\n", ret);
315 s->control = ret;
316 }
317 else {
318 ret = s->control;
319 if (s->last_read_offset != addr)
320 pdebug("rc%02x\n", ret);
321 }
322 break;
323 case PARA_REG_EPP_ADDR:
324 if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) !=
325 (PARA_CTR_DIR | PARA_CTR_INIT))
326 /* Controls not correct for EPP addr cycle, so do nothing */
327 pdebug("ra%02x s\n", ret);
328 else {
329 struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
330 if (qemu_chr_fe_ioctl(&s->chr,
331 CHR_IOCTL_PP_EPP_READ_ADDR, &ioarg)) {
332 s->epp_timeout = 1;
333 pdebug("ra%02x t\n", ret);
334 }
335 else
336 pdebug("ra%02x\n", ret);
337 }
338 break;
339 case PARA_REG_EPP_DATA:
340 if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) !=
341 (PARA_CTR_DIR | PARA_CTR_INIT))
342 /* Controls not correct for EPP data cycle, so do nothing */
343 pdebug("re%02x s\n", ret);
344 else {
345 struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
346 if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg)) {
347 s->epp_timeout = 1;
348 pdebug("re%02x t\n", ret);
349 }
350 else
351 pdebug("re%02x\n", ret);
352 }
353 break;
354 }
355 trace_parallel_ioport_read("HW", addr, ret);
356 s->last_read_offset = addr;
357 return ret;
358 }
359
360 static uint32_t
parallel_ioport_eppdata_read_hw2(void * opaque,uint32_t addr)361 parallel_ioport_eppdata_read_hw2(void *opaque, uint32_t addr)
362 {
363 ParallelState *s = opaque;
364 uint32_t ret;
365 uint16_t eppdata = ~0;
366 int err;
367 struct ParallelIOArg ioarg = {
368 .buffer = &eppdata, .count = sizeof(eppdata)
369 };
370 if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) {
371 /* Controls not correct for EPP data cycle, so do nothing */
372 pdebug("re%04x s\n", eppdata);
373 return eppdata;
374 }
375 err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
376 ret = le16_to_cpu(eppdata);
377
378 if (err) {
379 s->epp_timeout = 1;
380 pdebug("re%04x t\n", ret);
381 }
382 else
383 pdebug("re%04x\n", ret);
384 trace_parallel_ioport_read("EPP", addr, ret);
385 return ret;
386 }
387
388 static uint32_t
parallel_ioport_eppdata_read_hw4(void * opaque,uint32_t addr)389 parallel_ioport_eppdata_read_hw4(void *opaque, uint32_t addr)
390 {
391 ParallelState *s = opaque;
392 uint32_t ret;
393 uint32_t eppdata = ~0U;
394 int err;
395 struct ParallelIOArg ioarg = {
396 .buffer = &eppdata, .count = sizeof(eppdata)
397 };
398 if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) {
399 /* Controls not correct for EPP data cycle, so do nothing */
400 pdebug("re%08x s\n", eppdata);
401 return eppdata;
402 }
403 err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
404 ret = le32_to_cpu(eppdata);
405
406 if (err) {
407 s->epp_timeout = 1;
408 pdebug("re%08x t\n", ret);
409 }
410 else
411 pdebug("re%08x\n", ret);
412 trace_parallel_ioport_read("EPP", addr, ret);
413 return ret;
414 }
415
parallel_ioport_ecp_write(void * opaque,uint32_t addr,uint32_t val)416 static void parallel_ioport_ecp_write(void *opaque, uint32_t addr, uint32_t val)
417 {
418 trace_parallel_ioport_write("ECP", addr & 7, val);
419 pdebug("wecp%d=%02x\n", addr & 7, val);
420 }
421
parallel_ioport_ecp_read(void * opaque,uint32_t addr)422 static uint32_t parallel_ioport_ecp_read(void *opaque, uint32_t addr)
423 {
424 uint8_t ret = 0xff;
425
426 trace_parallel_ioport_read("ECP", addr & 7, ret);
427 pdebug("recp%d:%02x\n", addr & 7, ret);
428 return ret;
429 }
430
parallel_reset(void * opaque)431 static void parallel_reset(void *opaque)
432 {
433 ParallelState *s = opaque;
434
435 s->datar = ~0;
436 s->dataw = ~0;
437 s->status = PARA_STS_BUSY;
438 s->status |= PARA_STS_ACK;
439 s->status |= PARA_STS_ONLINE;
440 s->status |= PARA_STS_ERROR;
441 s->status |= PARA_STS_TMOUT;
442 s->control = PARA_CTR_SELECT;
443 s->control |= PARA_CTR_INIT;
444 s->control |= 0xc0;
445 s->irq_pending = 0;
446 s->hw_driver = 0;
447 s->epp_timeout = 0;
448 s->last_read_offset = ~0U;
449 }
450
451 static const int isa_parallel_io[MAX_PARALLEL_PORTS] = { 0x378, 0x278, 0x3bc };
452
453 static const MemoryRegionPortio isa_parallel_portio_hw_list[] = {
454 { 0, 8, 1,
455 .read = parallel_ioport_read_hw,
456 .write = parallel_ioport_write_hw },
457 { 4, 1, 2,
458 .read = parallel_ioport_eppdata_read_hw2,
459 .write = parallel_ioport_eppdata_write_hw2 },
460 { 4, 1, 4,
461 .read = parallel_ioport_eppdata_read_hw4,
462 .write = parallel_ioport_eppdata_write_hw4 },
463 { 0x400, 8, 1,
464 .read = parallel_ioport_ecp_read,
465 .write = parallel_ioport_ecp_write },
466 PORTIO_END_OF_LIST(),
467 };
468
469 static const MemoryRegionPortio isa_parallel_portio_sw_list[] = {
470 { 0, 8, 1,
471 .read = parallel_ioport_read_sw,
472 .write = parallel_ioport_write_sw },
473 PORTIO_END_OF_LIST(),
474 };
475
476
477 static const VMStateDescription vmstate_parallel_isa = {
478 .name = "parallel_isa",
479 .version_id = 1,
480 .minimum_version_id = 1,
481 .fields = (const VMStateField[]) {
482 VMSTATE_UINT8(state.dataw, ISAParallelState),
483 VMSTATE_UINT8(state.datar, ISAParallelState),
484 VMSTATE_UINT8(state.status, ISAParallelState),
485 VMSTATE_UINT8(state.control, ISAParallelState),
486 VMSTATE_INT32(state.irq_pending, ISAParallelState),
487 VMSTATE_INT32(state.epp_timeout, ISAParallelState),
488 VMSTATE_END_OF_LIST()
489 }
490 };
491
parallel_can_receive(void * opaque)492 static int parallel_can_receive(void *opaque)
493 {
494 return 1;
495 }
496
parallel_isa_realizefn(DeviceState * dev,Error ** errp)497 static void parallel_isa_realizefn(DeviceState *dev, Error **errp)
498 {
499 static int index;
500 ISADevice *isadev = ISA_DEVICE(dev);
501 ISAParallelState *isa = ISA_PARALLEL(dev);
502 ParallelState *s = &isa->state;
503 int base;
504 uint8_t dummy;
505
506 if (!qemu_chr_fe_backend_connected(&s->chr)) {
507 error_setg(errp, "Can't create parallel device, empty char device");
508 return;
509 }
510
511 if (isa->index == -1) {
512 isa->index = index;
513 }
514 if (isa->index >= MAX_PARALLEL_PORTS) {
515 error_setg(errp, "Max. supported number of parallel ports is %d.",
516 MAX_PARALLEL_PORTS);
517 return;
518 }
519 if (isa->iobase == -1) {
520 isa->iobase = isa_parallel_io[isa->index];
521 }
522 index++;
523
524 base = isa->iobase;
525 s->irq = isa_get_irq(isadev, isa->isairq);
526 qemu_register_reset(parallel_reset, s);
527
528 qemu_chr_fe_set_handlers(&s->chr, parallel_can_receive, NULL,
529 NULL, NULL, s, NULL, true);
530 if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) {
531 s->hw_driver = 1;
532 s->status = dummy;
533 }
534
535 isa_register_portio_list(isadev, &isa->portio_list, base,
536 (s->hw_driver
537 ? &isa_parallel_portio_hw_list[0]
538 : &isa_parallel_portio_sw_list[0]),
539 s, "parallel");
540 }
541
parallel_isa_build_aml(AcpiDevAmlIf * adev,Aml * scope)542 static void parallel_isa_build_aml(AcpiDevAmlIf *adev, Aml *scope)
543 {
544 ISAParallelState *isa = ISA_PARALLEL(adev);
545 Aml *dev;
546 Aml *crs;
547
548 crs = aml_resource_template();
549 aml_append(crs, aml_io(AML_DECODE16, isa->iobase, isa->iobase, 0x08, 0x08));
550 aml_append(crs, aml_irq_no_flags(isa->isairq));
551
552 dev = aml_device("LPT%d", isa->index + 1);
553 aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0400")));
554 aml_append(dev, aml_name_decl("_UID", aml_int(isa->index + 1)));
555 aml_append(dev, aml_name_decl("_STA", aml_int(0xf)));
556 aml_append(dev, aml_name_decl("_CRS", crs));
557
558 aml_append(scope, dev);
559 }
560
561 /* Memory mapped interface */
parallel_mm_readfn(void * opaque,hwaddr addr,unsigned size)562 static uint64_t parallel_mm_readfn(void *opaque, hwaddr addr, unsigned size)
563 {
564 ParallelState *s = opaque;
565
566 return parallel_ioport_read_sw(s, addr >> s->it_shift) &
567 MAKE_64BIT_MASK(0, size * 8);
568 }
569
parallel_mm_writefn(void * opaque,hwaddr addr,uint64_t value,unsigned size)570 static void parallel_mm_writefn(void *opaque, hwaddr addr,
571 uint64_t value, unsigned size)
572 {
573 ParallelState *s = opaque;
574
575 parallel_ioport_write_sw(s, addr >> s->it_shift,
576 value & MAKE_64BIT_MASK(0, size * 8));
577 }
578
579 static const MemoryRegionOps parallel_mm_ops = {
580 .read = parallel_mm_readfn,
581 .write = parallel_mm_writefn,
582 .valid.min_access_size = 1,
583 .valid.max_access_size = 4,
584 .endianness = DEVICE_NATIVE_ENDIAN,
585 };
586
587 /* If fd is zero, it means that the parallel device uses the console */
parallel_mm_init(MemoryRegion * address_space,hwaddr base,int it_shift,qemu_irq irq,Chardev * chr)588 bool parallel_mm_init(MemoryRegion *address_space,
589 hwaddr base, int it_shift, qemu_irq irq,
590 Chardev *chr)
591 {
592 ParallelState *s;
593
594 s = g_new0(ParallelState, 1);
595 s->irq = irq;
596 qemu_chr_fe_init(&s->chr, chr, &error_abort);
597 s->it_shift = it_shift;
598 qemu_register_reset(parallel_reset, s);
599
600 memory_region_init_io(&s->iomem, NULL, ¶llel_mm_ops, s,
601 "parallel", 8 << it_shift);
602 memory_region_add_subregion(address_space, base, &s->iomem);
603 return true;
604 }
605
606 static Property parallel_isa_properties[] = {
607 DEFINE_PROP_UINT32("index", ISAParallelState, index, -1),
608 DEFINE_PROP_UINT32("iobase", ISAParallelState, iobase, -1),
609 DEFINE_PROP_UINT32("irq", ISAParallelState, isairq, 7),
610 DEFINE_PROP_CHR("chardev", ISAParallelState, state.chr),
611 DEFINE_PROP_END_OF_LIST(),
612 };
613
parallel_isa_class_initfn(ObjectClass * klass,void * data)614 static void parallel_isa_class_initfn(ObjectClass *klass, void *data)
615 {
616 DeviceClass *dc = DEVICE_CLASS(klass);
617 AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
618
619 dc->realize = parallel_isa_realizefn;
620 dc->vmsd = &vmstate_parallel_isa;
621 adevc->build_dev_aml = parallel_isa_build_aml;
622 device_class_set_props(dc, parallel_isa_properties);
623 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
624 }
625
626 static const TypeInfo parallel_isa_info = {
627 .name = TYPE_ISA_PARALLEL,
628 .parent = TYPE_ISA_DEVICE,
629 .instance_size = sizeof(ISAParallelState),
630 .class_init = parallel_isa_class_initfn,
631 .interfaces = (InterfaceInfo[]) {
632 { TYPE_ACPI_DEV_AML_IF },
633 { },
634 },
635 };
636
parallel_register_types(void)637 static void parallel_register_types(void)
638 {
639 type_register_static(¶llel_isa_info);
640 }
641
642 type_init(parallel_register_types)
643