xref: /openbmc/qemu/hw/ide/pci.c (revision d4842052100a3b44167e34ebdce0e7b3bf7512cf)
1 /*
2  * QEMU IDE Emulation: PCI Bus support.
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/hw.h"
28 #include "hw/pci/pci.h"
29 #include "migration/vmstate.h"
30 #include "sysemu/dma.h"
31 #include "qemu/error-report.h"
32 #include "qemu/module.h"
33 #include "hw/ide/pci.h"
34 #include "trace.h"
35 
36 #define BMDMA_PAGE_SIZE 4096
37 
38 #define BM_MIGRATION_COMPAT_STATUS_BITS \
39         (IDE_RETRY_DMA | IDE_RETRY_PIO | \
40         IDE_RETRY_READ | IDE_RETRY_FLUSH)
41 
42 static uint64_t pci_ide_cmd_read(void *opaque, hwaddr addr, unsigned size)
43 {
44     IDEBus *bus = opaque;
45 
46     if (addr != 2 || size != 1) {
47         return ((uint64_t)1 << (size * 8)) - 1;
48     }
49     return ide_status_read(bus, addr + 2);
50 }
51 
52 static void pci_ide_cmd_write(void *opaque, hwaddr addr,
53                               uint64_t data, unsigned size)
54 {
55     IDEBus *bus = opaque;
56 
57     if (addr != 2 || size != 1) {
58         return;
59     }
60     ide_cmd_write(bus, addr + 2, data);
61 }
62 
63 const MemoryRegionOps pci_ide_cmd_le_ops = {
64     .read = pci_ide_cmd_read,
65     .write = pci_ide_cmd_write,
66     .endianness = DEVICE_LITTLE_ENDIAN,
67 };
68 
69 static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size)
70 {
71     IDEBus *bus = opaque;
72 
73     if (size == 1) {
74         return ide_ioport_read(bus, addr);
75     } else if (addr == 0) {
76         if (size == 2) {
77             return ide_data_readw(bus, addr);
78         } else {
79             return ide_data_readl(bus, addr);
80         }
81     }
82     return ((uint64_t)1 << (size * 8)) - 1;
83 }
84 
85 static void pci_ide_data_write(void *opaque, hwaddr addr,
86                                uint64_t data, unsigned size)
87 {
88     IDEBus *bus = opaque;
89 
90     if (size == 1) {
91         ide_ioport_write(bus, addr, data);
92     } else if (addr == 0) {
93         if (size == 2) {
94             ide_data_writew(bus, addr, data);
95         } else {
96             ide_data_writel(bus, addr, data);
97         }
98     }
99 }
100 
101 const MemoryRegionOps pci_ide_data_le_ops = {
102     .read = pci_ide_data_read,
103     .write = pci_ide_data_write,
104     .endianness = DEVICE_LITTLE_ENDIAN,
105 };
106 
107 static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
108                             BlockCompletionFunc *dma_cb)
109 {
110     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
111 
112     bm->dma_cb = dma_cb;
113     bm->cur_prd_last = 0;
114     bm->cur_prd_addr = 0;
115     bm->cur_prd_len = 0;
116 
117     if (bm->status & BM_STATUS_DMAING) {
118         bm->dma_cb(bmdma_active_if(bm), 0);
119     }
120 }
121 
122 /**
123  * Prepare an sglist based on available PRDs.
124  * @limit: How many bytes to prepare total.
125  *
126  * Returns the number of bytes prepared, -1 on error.
127  * IDEState.io_buffer_size will contain the number of bytes described
128  * by the PRDs, whether or not we added them to the sglist.
129  */
130 static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit)
131 {
132     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
133     IDEState *s = bmdma_active_if(bm);
134     PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
135     struct {
136         uint32_t addr;
137         uint32_t size;
138     } prd;
139     int l, len;
140 
141     pci_dma_sglist_init(&s->sg, pci_dev,
142                         s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
143     s->io_buffer_size = 0;
144     for(;;) {
145         if (bm->cur_prd_len == 0) {
146             /* end of table (with a fail safe of one page) */
147             if (bm->cur_prd_last ||
148                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
149                 return s->sg.size;
150             }
151             pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
152             bm->cur_addr += 8;
153             prd.addr = le32_to_cpu(prd.addr);
154             prd.size = le32_to_cpu(prd.size);
155             len = prd.size & 0xfffe;
156             if (len == 0)
157                 len = 0x10000;
158             bm->cur_prd_len = len;
159             bm->cur_prd_addr = prd.addr;
160             bm->cur_prd_last = (prd.size & 0x80000000);
161         }
162         l = bm->cur_prd_len;
163         if (l > 0) {
164             uint64_t sg_len;
165 
166             /* Don't add extra bytes to the SGList; consume any remaining
167              * PRDs from the guest, but ignore them. */
168             sg_len = MIN(limit - s->sg.size, bm->cur_prd_len);
169             if (sg_len) {
170                 qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
171             }
172 
173             bm->cur_prd_addr += l;
174             bm->cur_prd_len -= l;
175             s->io_buffer_size += l;
176         }
177     }
178 
179     qemu_sglist_destroy(&s->sg);
180     s->io_buffer_size = 0;
181     return -1;
182 }
183 
184 /* return 0 if buffer completed */
185 static int bmdma_rw_buf(IDEDMA *dma, int is_write)
186 {
187     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
188     IDEState *s = bmdma_active_if(bm);
189     PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
190     struct {
191         uint32_t addr;
192         uint32_t size;
193     } prd;
194     int l, len;
195 
196     for(;;) {
197         l = s->io_buffer_size - s->io_buffer_index;
198         if (l <= 0)
199             break;
200         if (bm->cur_prd_len == 0) {
201             /* end of table (with a fail safe of one page) */
202             if (bm->cur_prd_last ||
203                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
204                 return 0;
205             pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
206             bm->cur_addr += 8;
207             prd.addr = le32_to_cpu(prd.addr);
208             prd.size = le32_to_cpu(prd.size);
209             len = prd.size & 0xfffe;
210             if (len == 0)
211                 len = 0x10000;
212             bm->cur_prd_len = len;
213             bm->cur_prd_addr = prd.addr;
214             bm->cur_prd_last = (prd.size & 0x80000000);
215         }
216         if (l > bm->cur_prd_len)
217             l = bm->cur_prd_len;
218         if (l > 0) {
219             if (is_write) {
220                 pci_dma_write(pci_dev, bm->cur_prd_addr,
221                               s->io_buffer + s->io_buffer_index, l);
222             } else {
223                 pci_dma_read(pci_dev, bm->cur_prd_addr,
224                              s->io_buffer + s->io_buffer_index, l);
225             }
226             bm->cur_prd_addr += l;
227             bm->cur_prd_len -= l;
228             s->io_buffer_index += l;
229         }
230     }
231     return 1;
232 }
233 
234 static void bmdma_set_inactive(IDEDMA *dma, bool more)
235 {
236     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
237 
238     bm->dma_cb = NULL;
239     if (more) {
240         bm->status |= BM_STATUS_DMAING;
241     } else {
242         bm->status &= ~BM_STATUS_DMAING;
243     }
244 }
245 
246 static void bmdma_restart_dma(IDEDMA *dma)
247 {
248     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
249 
250     bm->cur_addr = bm->addr;
251 }
252 
253 static void bmdma_cancel(BMDMAState *bm)
254 {
255     if (bm->status & BM_STATUS_DMAING) {
256         /* cancel DMA request */
257         bmdma_set_inactive(&bm->dma, false);
258     }
259 }
260 
261 static void bmdma_reset(IDEDMA *dma)
262 {
263     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
264 
265     trace_bmdma_reset();
266     bmdma_cancel(bm);
267     bm->cmd = 0;
268     bm->status = 0;
269     bm->addr = 0;
270     bm->cur_addr = 0;
271     bm->cur_prd_last = 0;
272     bm->cur_prd_addr = 0;
273     bm->cur_prd_len = 0;
274 }
275 
276 static void bmdma_irq(void *opaque, int n, int level)
277 {
278     BMDMAState *bm = opaque;
279 
280     if (!level) {
281         /* pass through lower */
282         qemu_set_irq(bm->irq, level);
283         return;
284     }
285 
286     bm->status |= BM_STATUS_INT;
287 
288     /* trigger the real irq */
289     qemu_set_irq(bm->irq, level);
290 }
291 
292 void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
293 {
294     trace_bmdma_cmd_writeb(val);
295 
296     /* Ignore writes to SSBM if it keeps the old value */
297     if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
298         if (!(val & BM_CMD_START)) {
299             ide_cancel_dma_sync(idebus_active_if(bm->bus));
300             bm->status &= ~BM_STATUS_DMAING;
301         } else {
302             bm->cur_addr = bm->addr;
303             if (!(bm->status & BM_STATUS_DMAING)) {
304                 bm->status |= BM_STATUS_DMAING;
305                 /* start dma transfer if possible */
306                 if (bm->dma_cb)
307                     bm->dma_cb(bmdma_active_if(bm), 0);
308             }
309         }
310     }
311 
312     bm->cmd = val & 0x09;
313 }
314 
315 static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
316                                 unsigned width)
317 {
318     BMDMAState *bm = opaque;
319     uint32_t mask = (1ULL << (width * 8)) - 1;
320     uint64_t data;
321 
322     data = (bm->addr >> (addr * 8)) & mask;
323     trace_bmdma_addr_read(data);
324     return data;
325 }
326 
327 static void bmdma_addr_write(void *opaque, hwaddr addr,
328                              uint64_t data, unsigned width)
329 {
330     BMDMAState *bm = opaque;
331     int shift = addr * 8;
332     uint32_t mask = (1ULL << (width * 8)) - 1;
333 
334     trace_bmdma_addr_write(data);
335     bm->addr &= ~(mask << shift);
336     bm->addr |= ((data & mask) << shift) & ~3;
337 }
338 
339 MemoryRegionOps bmdma_addr_ioport_ops = {
340     .read = bmdma_addr_read,
341     .write = bmdma_addr_write,
342     .endianness = DEVICE_LITTLE_ENDIAN,
343 };
344 
345 static bool ide_bmdma_current_needed(void *opaque)
346 {
347     BMDMAState *bm = opaque;
348 
349     return (bm->cur_prd_len != 0);
350 }
351 
352 static bool ide_bmdma_status_needed(void *opaque)
353 {
354     BMDMAState *bm = opaque;
355 
356     /* Older versions abused some bits in the status register for internal
357      * error state. If any of these bits are set, we must add a subsection to
358      * transfer the real status register */
359     uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
360 
361     return ((bm->status & abused_bits) != 0);
362 }
363 
364 static int ide_bmdma_pre_save(void *opaque)
365 {
366     BMDMAState *bm = opaque;
367     uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
368 
369     if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) {
370         bm->bus->error_status =
371             ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd);
372     }
373     bm->migration_retry_unit = bm->bus->retry_unit;
374     bm->migration_retry_sector_num = bm->bus->retry_sector_num;
375     bm->migration_retry_nsector = bm->bus->retry_nsector;
376     bm->migration_compat_status =
377         (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
378 
379     return 0;
380 }
381 
382 /* This function accesses bm->bus->error_status which is loaded only after
383  * BMDMA itself. This is why the function is called from ide_pci_post_load
384  * instead of being registered with VMState where it would run too early. */
385 static int ide_bmdma_post_load(void *opaque, int version_id)
386 {
387     BMDMAState *bm = opaque;
388     uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
389 
390     if (bm->status == 0) {
391         bm->status = bm->migration_compat_status & ~abused_bits;
392         bm->bus->error_status |= bm->migration_compat_status & abused_bits;
393     }
394     if (bm->bus->error_status) {
395         bm->bus->retry_sector_num = bm->migration_retry_sector_num;
396         bm->bus->retry_nsector = bm->migration_retry_nsector;
397         bm->bus->retry_unit = bm->migration_retry_unit;
398     }
399 
400     return 0;
401 }
402 
403 static const VMStateDescription vmstate_bmdma_current = {
404     .name = "ide bmdma_current",
405     .version_id = 1,
406     .minimum_version_id = 1,
407     .needed = ide_bmdma_current_needed,
408     .fields = (VMStateField[]) {
409         VMSTATE_UINT32(cur_addr, BMDMAState),
410         VMSTATE_UINT32(cur_prd_last, BMDMAState),
411         VMSTATE_UINT32(cur_prd_addr, BMDMAState),
412         VMSTATE_UINT32(cur_prd_len, BMDMAState),
413         VMSTATE_END_OF_LIST()
414     }
415 };
416 
417 static const VMStateDescription vmstate_bmdma_status = {
418     .name ="ide bmdma/status",
419     .version_id = 1,
420     .minimum_version_id = 1,
421     .needed = ide_bmdma_status_needed,
422     .fields = (VMStateField[]) {
423         VMSTATE_UINT8(status, BMDMAState),
424         VMSTATE_END_OF_LIST()
425     }
426 };
427 
428 static const VMStateDescription vmstate_bmdma = {
429     .name = "ide bmdma",
430     .version_id = 3,
431     .minimum_version_id = 0,
432     .pre_save  = ide_bmdma_pre_save,
433     .fields = (VMStateField[]) {
434         VMSTATE_UINT8(cmd, BMDMAState),
435         VMSTATE_UINT8(migration_compat_status, BMDMAState),
436         VMSTATE_UINT32(addr, BMDMAState),
437         VMSTATE_INT64(migration_retry_sector_num, BMDMAState),
438         VMSTATE_UINT32(migration_retry_nsector, BMDMAState),
439         VMSTATE_UINT8(migration_retry_unit, BMDMAState),
440         VMSTATE_END_OF_LIST()
441     },
442     .subsections = (const VMStateDescription*[]) {
443         &vmstate_bmdma_current,
444         &vmstate_bmdma_status,
445         NULL
446     }
447 };
448 
449 static int ide_pci_post_load(void *opaque, int version_id)
450 {
451     PCIIDEState *d = opaque;
452     int i;
453 
454     for(i = 0; i < 2; i++) {
455         /* current versions always store 0/1, but older version
456            stored bigger values. We only need last bit */
457         d->bmdma[i].migration_retry_unit &= 1;
458         ide_bmdma_post_load(&d->bmdma[i], -1);
459     }
460 
461     return 0;
462 }
463 
464 const VMStateDescription vmstate_ide_pci = {
465     .name = "ide",
466     .version_id = 3,
467     .minimum_version_id = 0,
468     .post_load = ide_pci_post_load,
469     .fields = (VMStateField[]) {
470         VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState),
471         VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
472                              vmstate_bmdma, BMDMAState),
473         VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
474         VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
475         VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
476         VMSTATE_END_OF_LIST()
477     }
478 };
479 
480 void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
481 {
482     PCIIDEState *d = PCI_IDE(dev);
483     static const int bus[4]  = { 0, 0, 1, 1 };
484     static const int unit[4] = { 0, 1, 0, 1 };
485     int i;
486 
487     for (i = 0; i < 4; i++) {
488         if (hd_table[i] == NULL)
489             continue;
490         ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
491     }
492 }
493 
494 static const struct IDEDMAOps bmdma_ops = {
495     .start_dma = bmdma_start_dma,
496     .prepare_buf = bmdma_prepare_buf,
497     .rw_buf = bmdma_rw_buf,
498     .restart_dma = bmdma_restart_dma,
499     .set_inactive = bmdma_set_inactive,
500     .reset = bmdma_reset,
501 };
502 
503 void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
504 {
505     if (bus->dma == &bm->dma) {
506         return;
507     }
508 
509     bm->dma.ops = &bmdma_ops;
510     bus->dma = &bm->dma;
511     bm->irq = bus->irq;
512     bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
513     bm->pci_dev = d;
514 }
515 
516 static const TypeInfo pci_ide_type_info = {
517     .name = TYPE_PCI_IDE,
518     .parent = TYPE_PCI_DEVICE,
519     .instance_size = sizeof(PCIIDEState),
520     .abstract = true,
521     .interfaces = (InterfaceInfo[]) {
522         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
523         { },
524     },
525 };
526 
527 static void pci_ide_register_types(void)
528 {
529     type_register_static(&pci_ide_type_info);
530 }
531 
532 type_init(pci_ide_register_types)
533