1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29 #include "drmP.h" 30 #include "vmwgfx_drv.h" 31 32 #include "ttm/ttm_placement.h" 33 34 #include "svga_overlay.h" 35 #include "svga_escape.h" 36 37 #define VMW_MAX_NUM_STREAMS 1 38 39 struct vmw_stream { 40 struct vmw_dma_buffer *buf; 41 bool claimed; 42 bool paused; 43 struct drm_vmw_control_stream_arg saved; 44 }; 45 46 /** 47 * Overlay control 48 */ 49 struct vmw_overlay { 50 /* 51 * Each stream is a single overlay. In Xv these are called ports. 52 */ 53 struct mutex mutex; 54 struct vmw_stream stream[VMW_MAX_NUM_STREAMS]; 55 }; 56 57 static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev) 58 { 59 struct vmw_private *dev_priv = vmw_priv(dev); 60 return dev_priv ? dev_priv->overlay_priv : NULL; 61 } 62 63 struct vmw_escape_header { 64 uint32_t cmd; 65 SVGAFifoCmdEscape body; 66 }; 67 68 struct vmw_escape_video_flush { 69 struct vmw_escape_header escape; 70 SVGAEscapeVideoFlush flush; 71 }; 72 73 static inline void fill_escape(struct vmw_escape_header *header, 74 uint32_t size) 75 { 76 header->cmd = SVGA_CMD_ESCAPE; 77 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE; 78 header->body.size = size; 79 } 80 81 static inline void fill_flush(struct vmw_escape_video_flush *cmd, 82 uint32_t stream_id) 83 { 84 fill_escape(&cmd->escape, sizeof(cmd->flush)); 85 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; 86 cmd->flush.streamId = stream_id; 87 } 88 89 /** 90 * Pin or unpin a buffer in vram. 91 * 92 * @dev_priv: Driver private. 93 * @buf: DMA buffer to pin or unpin. 94 * @pin: Pin buffer in vram if true. 95 * @interruptible: Use interruptible wait. 96 * 97 * Takes the current masters ttm lock in read. 98 * 99 * Returns 100 * -ERESTARTSYS if interrupted by a signal. 101 */ 102 static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, 103 struct vmw_dma_buffer *buf, 104 bool pin, bool interruptible) 105 { 106 struct ttm_buffer_object *bo = &buf->base; 107 struct ttm_bo_global *glob = bo->glob; 108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 109 int ret; 110 111 ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible); 112 if (unlikely(ret != 0)) 113 return ret; 114 115 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 116 if (unlikely(ret != 0)) 117 goto err; 118 119 if (buf->gmr_bound) { 120 vmw_gmr_unbind(dev_priv, buf->gmr_id); 121 spin_lock(&glob->lru_lock); 122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id); 123 spin_unlock(&glob->lru_lock); 124 buf->gmr_bound = NULL; 125 } 126 127 if (pin) 128 overlay_placement = &vmw_vram_ne_placement; 129 130 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); 131 132 ttm_bo_unreserve(bo); 133 134 err: 135 ttm_read_unlock(&dev_priv->active_master->lock); 136 137 return ret; 138 } 139 140 /** 141 * Send put command to hw. 142 * 143 * Returns 144 * -ERESTARTSYS if interrupted by a signal. 145 */ 146 static int vmw_overlay_send_put(struct vmw_private *dev_priv, 147 struct vmw_dma_buffer *buf, 148 struct drm_vmw_control_stream_arg *arg, 149 bool interruptible) 150 { 151 struct { 152 struct vmw_escape_header escape; 153 struct { 154 struct { 155 uint32_t cmdType; 156 uint32_t streamId; 157 } header; 158 struct { 159 uint32_t registerId; 160 uint32_t value; 161 } items[SVGA_VIDEO_PITCH_3 + 1]; 162 } body; 163 struct vmw_escape_video_flush flush; 164 } *cmds; 165 uint32_t offset; 166 int i, ret; 167 168 for (;;) { 169 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); 170 if (cmds) 171 break; 172 173 ret = vmw_fallback_wait(dev_priv, false, true, 0, 174 interruptible, 3*HZ); 175 if (interruptible && ret == -ERESTARTSYS) 176 return ret; 177 else 178 BUG_ON(ret != 0); 179 } 180 181 fill_escape(&cmds->escape, sizeof(cmds->body)); 182 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; 183 cmds->body.header.streamId = arg->stream_id; 184 185 for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++) 186 cmds->body.items[i].registerId = i; 187 188 offset = buf->base.offset + arg->offset; 189 190 cmds->body.items[SVGA_VIDEO_ENABLED].value = true; 191 cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags; 192 cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset; 193 cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format; 194 cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key; 195 cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size; 196 cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width; 197 cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height; 198 cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x; 199 cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y; 200 cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; 201 cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; 202 cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x; 203 cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y; 204 cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; 205 cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; 206 cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; 207 cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; 208 cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; 209 210 fill_flush(&cmds->flush, arg->stream_id); 211 212 vmw_fifo_commit(dev_priv, sizeof(*cmds)); 213 214 return 0; 215 } 216 217 /** 218 * Send stop command to hw. 219 * 220 * Returns 221 * -ERESTARTSYS if interrupted by a signal. 222 */ 223 static int vmw_overlay_send_stop(struct vmw_private *dev_priv, 224 uint32_t stream_id, 225 bool interruptible) 226 { 227 struct { 228 struct vmw_escape_header escape; 229 SVGAEscapeVideoSetRegs body; 230 struct vmw_escape_video_flush flush; 231 } *cmds; 232 int ret; 233 234 for (;;) { 235 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); 236 if (cmds) 237 break; 238 239 ret = vmw_fallback_wait(dev_priv, false, true, 0, 240 interruptible, 3*HZ); 241 if (interruptible && ret == -ERESTARTSYS) 242 return ret; 243 else 244 BUG_ON(ret != 0); 245 } 246 247 fill_escape(&cmds->escape, sizeof(cmds->body)); 248 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; 249 cmds->body.header.streamId = stream_id; 250 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED; 251 cmds->body.items[0].value = false; 252 fill_flush(&cmds->flush, stream_id); 253 254 vmw_fifo_commit(dev_priv, sizeof(*cmds)); 255 256 return 0; 257 } 258 259 /** 260 * Stop or pause a stream. 261 * 262 * If the stream is paused the no evict flag is removed from the buffer 263 * but left in vram. This allows for instance mode_set to evict it 264 * should it need to. 265 * 266 * The caller must hold the overlay lock. 267 * 268 * @stream_id which stream to stop/pause. 269 * @pause true to pause, false to stop completely. 270 */ 271 static int vmw_overlay_stop(struct vmw_private *dev_priv, 272 uint32_t stream_id, bool pause, 273 bool interruptible) 274 { 275 struct vmw_overlay *overlay = dev_priv->overlay_priv; 276 struct vmw_stream *stream = &overlay->stream[stream_id]; 277 int ret; 278 279 /* no buffer attached the stream is completely stopped */ 280 if (!stream->buf) 281 return 0; 282 283 /* If the stream is paused this is already done */ 284 if (!stream->paused) { 285 ret = vmw_overlay_send_stop(dev_priv, stream_id, 286 interruptible); 287 if (ret) 288 return ret; 289 290 /* We just remove the NO_EVICT flag so no -ENOMEM */ 291 ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false, 292 interruptible); 293 if (interruptible && ret == -ERESTARTSYS) 294 return ret; 295 else 296 BUG_ON(ret != 0); 297 } 298 299 if (!pause) { 300 vmw_dmabuf_unreference(&stream->buf); 301 stream->paused = false; 302 } else { 303 stream->paused = true; 304 } 305 306 return 0; 307 } 308 309 /** 310 * Update a stream and send any put or stop fifo commands needed. 311 * 312 * The caller must hold the overlay lock. 313 * 314 * Returns 315 * -ENOMEM if buffer doesn't fit in vram. 316 * -ERESTARTSYS if interrupted. 317 */ 318 static int vmw_overlay_update_stream(struct vmw_private *dev_priv, 319 struct vmw_dma_buffer *buf, 320 struct drm_vmw_control_stream_arg *arg, 321 bool interruptible) 322 { 323 struct vmw_overlay *overlay = dev_priv->overlay_priv; 324 struct vmw_stream *stream = &overlay->stream[arg->stream_id]; 325 int ret = 0; 326 327 if (!buf) 328 return -EINVAL; 329 330 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__, 331 stream->buf, buf, stream->paused ? "" : "not "); 332 333 if (stream->buf != buf) { 334 ret = vmw_overlay_stop(dev_priv, arg->stream_id, 335 false, interruptible); 336 if (ret) 337 return ret; 338 } else if (!stream->paused) { 339 /* If the buffers match and not paused then just send 340 * the put command, no need to do anything else. 341 */ 342 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); 343 if (ret == 0) 344 stream->saved = *arg; 345 else 346 BUG_ON(!interruptible); 347 348 return ret; 349 } 350 351 /* We don't start the old stream if we are interrupted. 352 * Might return -ENOMEM if it can't fit the buffer in vram. 353 */ 354 ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible); 355 if (ret) 356 return ret; 357 358 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); 359 if (ret) { 360 /* This one needs to happen no matter what. We only remove 361 * the NO_EVICT flag so this is safe from -ENOMEM. 362 */ 363 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0); 364 return ret; 365 } 366 367 if (stream->buf != buf) 368 stream->buf = vmw_dmabuf_reference(buf); 369 stream->saved = *arg; 370 371 return 0; 372 } 373 374 /** 375 * Stop all streams. 376 * 377 * Used by the fb code when starting. 378 * 379 * Takes the overlay lock. 380 */ 381 int vmw_overlay_stop_all(struct vmw_private *dev_priv) 382 { 383 struct vmw_overlay *overlay = dev_priv->overlay_priv; 384 int i, ret; 385 386 if (!overlay) 387 return 0; 388 389 mutex_lock(&overlay->mutex); 390 391 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { 392 struct vmw_stream *stream = &overlay->stream[i]; 393 if (!stream->buf) 394 continue; 395 396 ret = vmw_overlay_stop(dev_priv, i, false, false); 397 WARN_ON(ret != 0); 398 } 399 400 mutex_unlock(&overlay->mutex); 401 402 return 0; 403 } 404 405 /** 406 * Try to resume all paused streams. 407 * 408 * Used by the kms code after moving a new scanout buffer to vram. 409 * 410 * Takes the overlay lock. 411 */ 412 int vmw_overlay_resume_all(struct vmw_private *dev_priv) 413 { 414 struct vmw_overlay *overlay = dev_priv->overlay_priv; 415 int i, ret; 416 417 if (!overlay) 418 return 0; 419 420 mutex_lock(&overlay->mutex); 421 422 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { 423 struct vmw_stream *stream = &overlay->stream[i]; 424 if (!stream->paused) 425 continue; 426 427 ret = vmw_overlay_update_stream(dev_priv, stream->buf, 428 &stream->saved, false); 429 if (ret != 0) 430 DRM_INFO("%s: *warning* failed to resume stream %i\n", 431 __func__, i); 432 } 433 434 mutex_unlock(&overlay->mutex); 435 436 return 0; 437 } 438 439 /** 440 * Pauses all active streams. 441 * 442 * Used by the kms code when moving a new scanout buffer to vram. 443 * 444 * Takes the overlay lock. 445 */ 446 int vmw_overlay_pause_all(struct vmw_private *dev_priv) 447 { 448 struct vmw_overlay *overlay = dev_priv->overlay_priv; 449 int i, ret; 450 451 if (!overlay) 452 return 0; 453 454 mutex_lock(&overlay->mutex); 455 456 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { 457 if (overlay->stream[i].paused) 458 DRM_INFO("%s: *warning* stream %i already paused\n", 459 __func__, i); 460 ret = vmw_overlay_stop(dev_priv, i, true, false); 461 WARN_ON(ret != 0); 462 } 463 464 mutex_unlock(&overlay->mutex); 465 466 return 0; 467 } 468 469 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 470 struct drm_file *file_priv) 471 { 472 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 473 struct vmw_private *dev_priv = vmw_priv(dev); 474 struct vmw_overlay *overlay = dev_priv->overlay_priv; 475 struct drm_vmw_control_stream_arg *arg = 476 (struct drm_vmw_control_stream_arg *)data; 477 struct vmw_dma_buffer *buf; 478 struct vmw_resource *res; 479 int ret; 480 481 if (!overlay) 482 return -ENOSYS; 483 484 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); 485 if (ret) 486 return ret; 487 488 mutex_lock(&overlay->mutex); 489 490 if (!arg->enabled) { 491 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true); 492 goto out_unlock; 493 } 494 495 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); 496 if (ret) 497 goto out_unlock; 498 499 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); 500 501 vmw_dmabuf_unreference(&buf); 502 503 out_unlock: 504 mutex_unlock(&overlay->mutex); 505 vmw_resource_unreference(&res); 506 507 return ret; 508 } 509 510 int vmw_overlay_num_overlays(struct vmw_private *dev_priv) 511 { 512 if (!dev_priv->overlay_priv) 513 return 0; 514 515 return VMW_MAX_NUM_STREAMS; 516 } 517 518 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) 519 { 520 struct vmw_overlay *overlay = dev_priv->overlay_priv; 521 int i, k; 522 523 if (!overlay) 524 return 0; 525 526 mutex_lock(&overlay->mutex); 527 528 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++) 529 if (!overlay->stream[i].claimed) 530 k++; 531 532 mutex_unlock(&overlay->mutex); 533 534 return k; 535 } 536 537 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out) 538 { 539 struct vmw_overlay *overlay = dev_priv->overlay_priv; 540 int i; 541 542 if (!overlay) 543 return -ENOSYS; 544 545 mutex_lock(&overlay->mutex); 546 547 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { 548 549 if (overlay->stream[i].claimed) 550 continue; 551 552 overlay->stream[i].claimed = true; 553 *out = i; 554 mutex_unlock(&overlay->mutex); 555 return 0; 556 } 557 558 mutex_unlock(&overlay->mutex); 559 return -ESRCH; 560 } 561 562 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id) 563 { 564 struct vmw_overlay *overlay = dev_priv->overlay_priv; 565 566 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS); 567 568 if (!overlay) 569 return -ENOSYS; 570 571 mutex_lock(&overlay->mutex); 572 573 WARN_ON(!overlay->stream[stream_id].claimed); 574 vmw_overlay_stop(dev_priv, stream_id, false, false); 575 overlay->stream[stream_id].claimed = false; 576 577 mutex_unlock(&overlay->mutex); 578 return 0; 579 } 580 581 int vmw_overlay_init(struct vmw_private *dev_priv) 582 { 583 struct vmw_overlay *overlay; 584 int i; 585 586 if (dev_priv->overlay_priv) 587 return -EINVAL; 588 589 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && 590 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { 591 DRM_INFO("hardware doesn't support overlays\n"); 592 return -ENOSYS; 593 } 594 595 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); 596 if (!overlay) 597 return -ENOMEM; 598 599 memset(overlay, 0, sizeof(*overlay)); 600 mutex_init(&overlay->mutex); 601 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { 602 overlay->stream[i].buf = NULL; 603 overlay->stream[i].paused = false; 604 overlay->stream[i].claimed = false; 605 } 606 607 dev_priv->overlay_priv = overlay; 608 609 return 0; 610 } 611 612 int vmw_overlay_close(struct vmw_private *dev_priv) 613 { 614 struct vmw_overlay *overlay = dev_priv->overlay_priv; 615 bool forgotten_buffer = false; 616 int i; 617 618 if (!overlay) 619 return -ENOSYS; 620 621 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { 622 if (overlay->stream[i].buf) { 623 forgotten_buffer = true; 624 vmw_overlay_stop(dev_priv, i, false, false); 625 } 626 } 627 628 WARN_ON(forgotten_buffer); 629 630 dev_priv->overlay_priv = NULL; 631 kfree(overlay); 632 633 return 0; 634 } 635