1 /* 2 * QEMU IDE Emulation: PCI Bus support. 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * Copyright (c) 2006 Openedhand Ltd. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 #include <hw/hw.h> 26 #include <hw/i386/pc.h> 27 #include <hw/pci/pci.h> 28 #include <hw/isa/isa.h> 29 #include "sysemu/block-backend.h" 30 #include "sysemu/dma.h" 31 #include "qemu/error-report.h" 32 #include <hw/ide/pci.h> 33 34 #define BMDMA_PAGE_SIZE 4096 35 36 #define BM_MIGRATION_COMPAT_STATUS_BITS \ 37 (IDE_RETRY_DMA | IDE_RETRY_PIO | \ 38 IDE_RETRY_READ | IDE_RETRY_FLUSH) 39 40 static void bmdma_start_dma(IDEDMA *dma, IDEState *s, 41 BlockCompletionFunc *dma_cb) 42 { 43 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 44 45 bm->dma_cb = dma_cb; 46 bm->cur_prd_last = 0; 47 bm->cur_prd_addr = 0; 48 bm->cur_prd_len = 0; 49 50 if (bm->status & BM_STATUS_DMAING) { 51 bm->dma_cb(bmdma_active_if(bm), 0); 52 } 53 } 54 55 /** 56 * Prepare an sglist based on available PRDs. 57 * @limit: How many bytes to prepare total. 58 * 59 * Returns the number of bytes prepared, -1 on error. 60 * IDEState.io_buffer_size will contain the number of bytes described 61 * by the PRDs, whether or not we added them to the sglist. 62 */ 63 static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit) 64 { 65 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 66 IDEState *s = bmdma_active_if(bm); 67 PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); 68 struct { 69 uint32_t addr; 70 uint32_t size; 71 } prd; 72 int l, len; 73 74 pci_dma_sglist_init(&s->sg, pci_dev, 75 s->nsector / (BMDMA_PAGE_SIZE / 512) + 1); 76 s->io_buffer_size = 0; 77 for(;;) { 78 if (bm->cur_prd_len == 0) { 79 /* end of table (with a fail safe of one page) */ 80 if (bm->cur_prd_last || 81 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { 82 return s->sg.size; 83 } 84 pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); 85 bm->cur_addr += 8; 86 prd.addr = le32_to_cpu(prd.addr); 87 prd.size = le32_to_cpu(prd.size); 88 len = prd.size & 0xfffe; 89 if (len == 0) 90 len = 0x10000; 91 bm->cur_prd_len = len; 92 bm->cur_prd_addr = prd.addr; 93 bm->cur_prd_last = (prd.size & 0x80000000); 94 } 95 l = bm->cur_prd_len; 96 if (l > 0) { 97 uint64_t sg_len; 98 99 /* Don't add extra bytes to the SGList; consume any remaining 100 * PRDs from the guest, but ignore them. */ 101 sg_len = MIN(limit - s->sg.size, bm->cur_prd_len); 102 if (sg_len) { 103 qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len); 104 } 105 106 bm->cur_prd_addr += l; 107 bm->cur_prd_len -= l; 108 s->io_buffer_size += l; 109 } 110 } 111 112 qemu_sglist_destroy(&s->sg); 113 s->io_buffer_size = 0; 114 return -1; 115 } 116 117 /* return 0 if buffer completed */ 118 static int bmdma_rw_buf(IDEDMA *dma, int is_write) 119 { 120 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 121 IDEState *s = bmdma_active_if(bm); 122 PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); 123 struct { 124 uint32_t addr; 125 uint32_t size; 126 } prd; 127 int l, len; 128 129 for(;;) { 130 l = s->io_buffer_size - s->io_buffer_index; 131 if (l <= 0) 132 break; 133 if (bm->cur_prd_len == 0) { 134 /* end of table (with a fail safe of one page) */ 135 if (bm->cur_prd_last || 136 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) 137 return 0; 138 pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); 139 bm->cur_addr += 8; 140 prd.addr = le32_to_cpu(prd.addr); 141 prd.size = le32_to_cpu(prd.size); 142 len = prd.size & 0xfffe; 143 if (len == 0) 144 len = 0x10000; 145 bm->cur_prd_len = len; 146 bm->cur_prd_addr = prd.addr; 147 bm->cur_prd_last = (prd.size & 0x80000000); 148 } 149 if (l > bm->cur_prd_len) 150 l = bm->cur_prd_len; 151 if (l > 0) { 152 if (is_write) { 153 pci_dma_write(pci_dev, bm->cur_prd_addr, 154 s->io_buffer + s->io_buffer_index, l); 155 } else { 156 pci_dma_read(pci_dev, bm->cur_prd_addr, 157 s->io_buffer + s->io_buffer_index, l); 158 } 159 bm->cur_prd_addr += l; 160 bm->cur_prd_len -= l; 161 s->io_buffer_index += l; 162 } 163 } 164 return 1; 165 } 166 167 static void bmdma_set_inactive(IDEDMA *dma, bool more) 168 { 169 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 170 171 bm->dma_cb = NULL; 172 if (more) { 173 bm->status |= BM_STATUS_DMAING; 174 } else { 175 bm->status &= ~BM_STATUS_DMAING; 176 } 177 } 178 179 static void bmdma_restart_dma(IDEDMA *dma) 180 { 181 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 182 183 bm->cur_addr = bm->addr; 184 } 185 186 static void bmdma_cancel(BMDMAState *bm) 187 { 188 if (bm->status & BM_STATUS_DMAING) { 189 /* cancel DMA request */ 190 bmdma_set_inactive(&bm->dma, false); 191 } 192 } 193 194 static void bmdma_reset(IDEDMA *dma) 195 { 196 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 197 198 #ifdef DEBUG_IDE 199 printf("ide: dma_reset\n"); 200 #endif 201 bmdma_cancel(bm); 202 bm->cmd = 0; 203 bm->status = 0; 204 bm->addr = 0; 205 bm->cur_addr = 0; 206 bm->cur_prd_last = 0; 207 bm->cur_prd_addr = 0; 208 bm->cur_prd_len = 0; 209 } 210 211 static void bmdma_irq(void *opaque, int n, int level) 212 { 213 BMDMAState *bm = opaque; 214 215 if (!level) { 216 /* pass through lower */ 217 qemu_set_irq(bm->irq, level); 218 return; 219 } 220 221 bm->status |= BM_STATUS_INT; 222 223 /* trigger the real irq */ 224 qemu_set_irq(bm->irq, level); 225 } 226 227 void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) 228 { 229 #ifdef DEBUG_IDE 230 printf("%s: 0x%08x\n", __func__, val); 231 #endif 232 233 /* Ignore writes to SSBM if it keeps the old value */ 234 if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { 235 if (!(val & BM_CMD_START)) { 236 /* 237 * We can't cancel Scatter Gather DMA in the middle of the 238 * operation or a partial (not full) DMA transfer would reach 239 * the storage so we wait for completion instead (we beahve 240 * like if the DMA was completed by the time the guest trying 241 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not 242 * set). 243 * 244 * In the future we'll be able to safely cancel the I/O if the 245 * whole DMA operation will be submitted to disk with a single 246 * aio operation with preadv/pwritev. 247 */ 248 if (bm->bus->dma->aiocb) { 249 blk_drain_all(); 250 assert(bm->bus->dma->aiocb == NULL); 251 } 252 bm->status &= ~BM_STATUS_DMAING; 253 } else { 254 bm->cur_addr = bm->addr; 255 if (!(bm->status & BM_STATUS_DMAING)) { 256 bm->status |= BM_STATUS_DMAING; 257 /* start dma transfer if possible */ 258 if (bm->dma_cb) 259 bm->dma_cb(bmdma_active_if(bm), 0); 260 } 261 } 262 } 263 264 bm->cmd = val & 0x09; 265 } 266 267 static uint64_t bmdma_addr_read(void *opaque, hwaddr addr, 268 unsigned width) 269 { 270 BMDMAState *bm = opaque; 271 uint32_t mask = (1ULL << (width * 8)) - 1; 272 uint64_t data; 273 274 data = (bm->addr >> (addr * 8)) & mask; 275 #ifdef DEBUG_IDE 276 printf("%s: 0x%08x\n", __func__, (unsigned)data); 277 #endif 278 return data; 279 } 280 281 static void bmdma_addr_write(void *opaque, hwaddr addr, 282 uint64_t data, unsigned width) 283 { 284 BMDMAState *bm = opaque; 285 int shift = addr * 8; 286 uint32_t mask = (1ULL << (width * 8)) - 1; 287 288 #ifdef DEBUG_IDE 289 printf("%s: 0x%08x\n", __func__, (unsigned)data); 290 #endif 291 bm->addr &= ~(mask << shift); 292 bm->addr |= ((data & mask) << shift) & ~3; 293 } 294 295 MemoryRegionOps bmdma_addr_ioport_ops = { 296 .read = bmdma_addr_read, 297 .write = bmdma_addr_write, 298 .endianness = DEVICE_LITTLE_ENDIAN, 299 }; 300 301 static bool ide_bmdma_current_needed(void *opaque) 302 { 303 BMDMAState *bm = opaque; 304 305 return (bm->cur_prd_len != 0); 306 } 307 308 static bool ide_bmdma_status_needed(void *opaque) 309 { 310 BMDMAState *bm = opaque; 311 312 /* Older versions abused some bits in the status register for internal 313 * error state. If any of these bits are set, we must add a subsection to 314 * transfer the real status register */ 315 uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; 316 317 return ((bm->status & abused_bits) != 0); 318 } 319 320 static void ide_bmdma_pre_save(void *opaque) 321 { 322 BMDMAState *bm = opaque; 323 uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; 324 325 bm->migration_retry_unit = bm->bus->retry_unit; 326 bm->migration_retry_sector_num = bm->bus->retry_sector_num; 327 bm->migration_retry_nsector = bm->bus->retry_nsector; 328 bm->migration_compat_status = 329 (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); 330 } 331 332 /* This function accesses bm->bus->error_status which is loaded only after 333 * BMDMA itself. This is why the function is called from ide_pci_post_load 334 * instead of being registered with VMState where it would run too early. */ 335 static int ide_bmdma_post_load(void *opaque, int version_id) 336 { 337 BMDMAState *bm = opaque; 338 uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; 339 340 if (bm->status == 0) { 341 bm->status = bm->migration_compat_status & ~abused_bits; 342 bm->bus->error_status |= bm->migration_compat_status & abused_bits; 343 } 344 if (bm->bus->error_status) { 345 bm->bus->retry_sector_num = bm->migration_retry_sector_num; 346 bm->bus->retry_nsector = bm->migration_retry_nsector; 347 bm->bus->retry_unit = bm->migration_retry_unit; 348 } 349 350 return 0; 351 } 352 353 static const VMStateDescription vmstate_bmdma_current = { 354 .name = "ide bmdma_current", 355 .version_id = 1, 356 .minimum_version_id = 1, 357 .needed = ide_bmdma_current_needed, 358 .fields = (VMStateField[]) { 359 VMSTATE_UINT32(cur_addr, BMDMAState), 360 VMSTATE_UINT32(cur_prd_last, BMDMAState), 361 VMSTATE_UINT32(cur_prd_addr, BMDMAState), 362 VMSTATE_UINT32(cur_prd_len, BMDMAState), 363 VMSTATE_END_OF_LIST() 364 } 365 }; 366 367 static const VMStateDescription vmstate_bmdma_status = { 368 .name ="ide bmdma/status", 369 .version_id = 1, 370 .minimum_version_id = 1, 371 .needed = ide_bmdma_status_needed, 372 .fields = (VMStateField[]) { 373 VMSTATE_UINT8(status, BMDMAState), 374 VMSTATE_END_OF_LIST() 375 } 376 }; 377 378 static const VMStateDescription vmstate_bmdma = { 379 .name = "ide bmdma", 380 .version_id = 3, 381 .minimum_version_id = 0, 382 .pre_save = ide_bmdma_pre_save, 383 .fields = (VMStateField[]) { 384 VMSTATE_UINT8(cmd, BMDMAState), 385 VMSTATE_UINT8(migration_compat_status, BMDMAState), 386 VMSTATE_UINT32(addr, BMDMAState), 387 VMSTATE_INT64(migration_retry_sector_num, BMDMAState), 388 VMSTATE_UINT32(migration_retry_nsector, BMDMAState), 389 VMSTATE_UINT8(migration_retry_unit, BMDMAState), 390 VMSTATE_END_OF_LIST() 391 }, 392 .subsections = (const VMStateDescription*[]) { 393 &vmstate_bmdma_current, 394 &vmstate_bmdma_status, 395 NULL 396 } 397 }; 398 399 static int ide_pci_post_load(void *opaque, int version_id) 400 { 401 PCIIDEState *d = opaque; 402 int i; 403 404 for(i = 0; i < 2; i++) { 405 /* current versions always store 0/1, but older version 406 stored bigger values. We only need last bit */ 407 d->bmdma[i].migration_retry_unit &= 1; 408 ide_bmdma_post_load(&d->bmdma[i], -1); 409 } 410 411 return 0; 412 } 413 414 const VMStateDescription vmstate_ide_pci = { 415 .name = "ide", 416 .version_id = 3, 417 .minimum_version_id = 0, 418 .post_load = ide_pci_post_load, 419 .fields = (VMStateField[]) { 420 VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), 421 VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, 422 vmstate_bmdma, BMDMAState), 423 VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2), 424 VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState), 425 VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState), 426 VMSTATE_END_OF_LIST() 427 } 428 }; 429 430 void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table) 431 { 432 PCIIDEState *d = PCI_IDE(dev); 433 static const int bus[4] = { 0, 0, 1, 1 }; 434 static const int unit[4] = { 0, 1, 0, 1 }; 435 int i; 436 437 for (i = 0; i < 4; i++) { 438 if (hd_table[i] == NULL) 439 continue; 440 ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]); 441 } 442 } 443 444 static const struct IDEDMAOps bmdma_ops = { 445 .start_dma = bmdma_start_dma, 446 .prepare_buf = bmdma_prepare_buf, 447 .rw_buf = bmdma_rw_buf, 448 .restart_dma = bmdma_restart_dma, 449 .set_inactive = bmdma_set_inactive, 450 .reset = bmdma_reset, 451 }; 452 453 void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d) 454 { 455 if (bus->dma == &bm->dma) { 456 return; 457 } 458 459 bm->dma.ops = &bmdma_ops; 460 bus->dma = &bm->dma; 461 bm->irq = bus->irq; 462 bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0); 463 bm->pci_dev = d; 464 } 465 466 static const TypeInfo pci_ide_type_info = { 467 .name = TYPE_PCI_IDE, 468 .parent = TYPE_PCI_DEVICE, 469 .instance_size = sizeof(PCIIDEState), 470 .abstract = true, 471 }; 472 473 static void pci_ide_register_types(void) 474 { 475 type_register_static(&pci_ide_type_info); 476 } 477 478 type_init(pci_ide_register_types) 479