1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 3 /* 4 * Xen para-virtual DRM device 5 * 6 * Copyright (C) 2016-2018 EPAM Systems Inc. 7 * 8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9 */ 10 11 #include <drm/drmP.h> 12 #include <drm/drm_atomic_helper.h> 13 #include <drm/drm_crtc_helper.h> 14 #include <drm/drm_gem.h> 15 16 #include <linux/of_device.h> 17 18 #include <xen/platform_pci.h> 19 #include <xen/xen.h> 20 #include <xen/xenbus.h> 21 22 #include <xen/interface/io/displif.h> 23 24 #include "xen_drm_front.h" 25 #include "xen_drm_front_cfg.h" 26 #include "xen_drm_front_evtchnl.h" 27 #include "xen_drm_front_gem.h" 28 #include "xen_drm_front_kms.h" 29 #include "xen_drm_front_shbuf.h" 30 31 struct xen_drm_front_dbuf { 32 struct list_head list; 33 u64 dbuf_cookie; 34 u64 fb_cookie; 35 struct xen_drm_front_shbuf *shbuf; 36 }; 37 38 static int dbuf_add_to_list(struct xen_drm_front_info *front_info, 39 struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie) 40 { 41 struct xen_drm_front_dbuf *dbuf; 42 43 dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); 44 if (!dbuf) 45 return -ENOMEM; 46 47 dbuf->dbuf_cookie = dbuf_cookie; 48 dbuf->shbuf = shbuf; 49 list_add(&dbuf->list, &front_info->dbuf_list); 50 return 0; 51 } 52 53 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, 54 u64 dbuf_cookie) 55 { 56 struct xen_drm_front_dbuf *buf, *q; 57 58 list_for_each_entry_safe(buf, q, dbuf_list, list) 59 if (buf->dbuf_cookie == dbuf_cookie) 60 return buf; 61 62 return NULL; 63 } 64 65 static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie) 66 { 67 struct xen_drm_front_dbuf *buf, *q; 68 69 list_for_each_entry_safe(buf, q, dbuf_list, list) 70 if (buf->fb_cookie == fb_cookie) 71 xen_drm_front_shbuf_flush(buf->shbuf); 72 } 73 74 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) 75 { 76 struct xen_drm_front_dbuf *buf, *q; 77 78 list_for_each_entry_safe(buf, q, dbuf_list, list) 79 if (buf->dbuf_cookie == dbuf_cookie) { 80 list_del(&buf->list); 81 xen_drm_front_shbuf_unmap(buf->shbuf); 82 xen_drm_front_shbuf_free(buf->shbuf); 83 kfree(buf); 84 break; 85 } 86 } 87 88 static void dbuf_free_all(struct list_head *dbuf_list) 89 { 90 struct xen_drm_front_dbuf *buf, *q; 91 92 list_for_each_entry_safe(buf, q, dbuf_list, list) { 93 list_del(&buf->list); 94 xen_drm_front_shbuf_unmap(buf->shbuf); 95 xen_drm_front_shbuf_free(buf->shbuf); 96 kfree(buf); 97 } 98 } 99 100 static struct xendispl_req * 101 be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) 102 { 103 struct xendispl_req *req; 104 105 req = RING_GET_REQUEST(&evtchnl->u.req.ring, 106 evtchnl->u.req.ring.req_prod_pvt); 107 req->operation = operation; 108 req->id = evtchnl->evt_next_id++; 109 evtchnl->evt_id = req->id; 110 return req; 111 } 112 113 static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, 114 struct xendispl_req *req) 115 { 116 reinit_completion(&evtchnl->u.req.completion); 117 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) 118 return -EIO; 119 120 xen_drm_front_evtchnl_flush(evtchnl); 121 return 0; 122 } 123 124 static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) 125 { 126 if (wait_for_completion_timeout(&evtchnl->u.req.completion, 127 msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) 128 return -ETIMEDOUT; 129 130 return evtchnl->u.req.resp_status; 131 } 132 133 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, 134 u32 x, u32 y, u32 width, u32 height, 135 u32 bpp, u64 fb_cookie) 136 { 137 struct xen_drm_front_evtchnl *evtchnl; 138 struct xen_drm_front_info *front_info; 139 struct xendispl_req *req; 140 unsigned long flags; 141 int ret; 142 143 front_info = pipeline->drm_info->front_info; 144 evtchnl = &front_info->evt_pairs[pipeline->index].req; 145 if (unlikely(!evtchnl)) 146 return -EIO; 147 148 mutex_lock(&evtchnl->u.req.req_io_lock); 149 150 spin_lock_irqsave(&front_info->io_lock, flags); 151 req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); 152 req->op.set_config.x = x; 153 req->op.set_config.y = y; 154 req->op.set_config.width = width; 155 req->op.set_config.height = height; 156 req->op.set_config.bpp = bpp; 157 req->op.set_config.fb_cookie = fb_cookie; 158 159 ret = be_stream_do_io(evtchnl, req); 160 spin_unlock_irqrestore(&front_info->io_lock, flags); 161 162 if (ret == 0) 163 ret = be_stream_wait_io(evtchnl); 164 165 mutex_unlock(&evtchnl->u.req.req_io_lock); 166 return ret; 167 } 168 169 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, 170 u64 dbuf_cookie, u32 width, u32 height, 171 u32 bpp, u64 size, struct page **pages) 172 { 173 struct xen_drm_front_evtchnl *evtchnl; 174 struct xen_drm_front_shbuf *shbuf; 175 struct xendispl_req *req; 176 struct xen_drm_front_shbuf_cfg buf_cfg; 177 unsigned long flags; 178 int ret; 179 180 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 181 if (unlikely(!evtchnl)) 182 return -EIO; 183 184 memset(&buf_cfg, 0, sizeof(buf_cfg)); 185 buf_cfg.xb_dev = front_info->xb_dev; 186 buf_cfg.pages = pages; 187 buf_cfg.size = size; 188 buf_cfg.be_alloc = front_info->cfg.be_alloc; 189 190 shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); 191 if (IS_ERR(shbuf)) 192 return PTR_ERR(shbuf); 193 194 ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie); 195 if (ret < 0) { 196 xen_drm_front_shbuf_free(shbuf); 197 return ret; 198 } 199 200 mutex_lock(&evtchnl->u.req.req_io_lock); 201 202 spin_lock_irqsave(&front_info->io_lock, flags); 203 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); 204 req->op.dbuf_create.gref_directory = 205 xen_drm_front_shbuf_get_dir_start(shbuf); 206 req->op.dbuf_create.buffer_sz = size; 207 req->op.dbuf_create.dbuf_cookie = dbuf_cookie; 208 req->op.dbuf_create.width = width; 209 req->op.dbuf_create.height = height; 210 req->op.dbuf_create.bpp = bpp; 211 if (buf_cfg.be_alloc) 212 req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; 213 214 ret = be_stream_do_io(evtchnl, req); 215 spin_unlock_irqrestore(&front_info->io_lock, flags); 216 217 if (ret < 0) 218 goto fail; 219 220 ret = be_stream_wait_io(evtchnl); 221 if (ret < 0) 222 goto fail; 223 224 ret = xen_drm_front_shbuf_map(shbuf); 225 if (ret < 0) 226 goto fail; 227 228 mutex_unlock(&evtchnl->u.req.req_io_lock); 229 return 0; 230 231 fail: 232 mutex_unlock(&evtchnl->u.req.req_io_lock); 233 dbuf_free(&front_info->dbuf_list, dbuf_cookie); 234 return ret; 235 } 236 237 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, 238 u64 dbuf_cookie) 239 { 240 struct xen_drm_front_evtchnl *evtchnl; 241 struct xendispl_req *req; 242 unsigned long flags; 243 bool be_alloc; 244 int ret; 245 246 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 247 if (unlikely(!evtchnl)) 248 return -EIO; 249 250 be_alloc = front_info->cfg.be_alloc; 251 252 /* 253 * For the backend allocated buffer release references now, so backend 254 * can free the buffer. 255 */ 256 if (be_alloc) 257 dbuf_free(&front_info->dbuf_list, dbuf_cookie); 258 259 mutex_lock(&evtchnl->u.req.req_io_lock); 260 261 spin_lock_irqsave(&front_info->io_lock, flags); 262 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); 263 req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; 264 265 ret = be_stream_do_io(evtchnl, req); 266 spin_unlock_irqrestore(&front_info->io_lock, flags); 267 268 if (ret == 0) 269 ret = be_stream_wait_io(evtchnl); 270 271 /* 272 * Do this regardless of communication status with the backend: 273 * if we cannot remove remote resources remove what we can locally. 274 */ 275 if (!be_alloc) 276 dbuf_free(&front_info->dbuf_list, dbuf_cookie); 277 278 mutex_unlock(&evtchnl->u.req.req_io_lock); 279 return ret; 280 } 281 282 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, 283 u64 dbuf_cookie, u64 fb_cookie, u32 width, 284 u32 height, u32 pixel_format) 285 { 286 struct xen_drm_front_evtchnl *evtchnl; 287 struct xen_drm_front_dbuf *buf; 288 struct xendispl_req *req; 289 unsigned long flags; 290 int ret; 291 292 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 293 if (unlikely(!evtchnl)) 294 return -EIO; 295 296 buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie); 297 if (!buf) 298 return -EINVAL; 299 300 buf->fb_cookie = fb_cookie; 301 302 mutex_lock(&evtchnl->u.req.req_io_lock); 303 304 spin_lock_irqsave(&front_info->io_lock, flags); 305 req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); 306 req->op.fb_attach.dbuf_cookie = dbuf_cookie; 307 req->op.fb_attach.fb_cookie = fb_cookie; 308 req->op.fb_attach.width = width; 309 req->op.fb_attach.height = height; 310 req->op.fb_attach.pixel_format = pixel_format; 311 312 ret = be_stream_do_io(evtchnl, req); 313 spin_unlock_irqrestore(&front_info->io_lock, flags); 314 315 if (ret == 0) 316 ret = be_stream_wait_io(evtchnl); 317 318 mutex_unlock(&evtchnl->u.req.req_io_lock); 319 return ret; 320 } 321 322 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, 323 u64 fb_cookie) 324 { 325 struct xen_drm_front_evtchnl *evtchnl; 326 struct xendispl_req *req; 327 unsigned long flags; 328 int ret; 329 330 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 331 if (unlikely(!evtchnl)) 332 return -EIO; 333 334 mutex_lock(&evtchnl->u.req.req_io_lock); 335 336 spin_lock_irqsave(&front_info->io_lock, flags); 337 req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); 338 req->op.fb_detach.fb_cookie = fb_cookie; 339 340 ret = be_stream_do_io(evtchnl, req); 341 spin_unlock_irqrestore(&front_info->io_lock, flags); 342 343 if (ret == 0) 344 ret = be_stream_wait_io(evtchnl); 345 346 mutex_unlock(&evtchnl->u.req.req_io_lock); 347 return ret; 348 } 349 350 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, 351 int conn_idx, u64 fb_cookie) 352 { 353 struct xen_drm_front_evtchnl *evtchnl; 354 struct xendispl_req *req; 355 unsigned long flags; 356 int ret; 357 358 if (unlikely(conn_idx >= front_info->num_evt_pairs)) 359 return -EINVAL; 360 361 dbuf_flush_fb(&front_info->dbuf_list, fb_cookie); 362 evtchnl = &front_info->evt_pairs[conn_idx].req; 363 364 mutex_lock(&evtchnl->u.req.req_io_lock); 365 366 spin_lock_irqsave(&front_info->io_lock, flags); 367 req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); 368 req->op.pg_flip.fb_cookie = fb_cookie; 369 370 ret = be_stream_do_io(evtchnl, req); 371 spin_unlock_irqrestore(&front_info->io_lock, flags); 372 373 if (ret == 0) 374 ret = be_stream_wait_io(evtchnl); 375 376 mutex_unlock(&evtchnl->u.req.req_io_lock); 377 return ret; 378 } 379 380 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, 381 int conn_idx, u64 fb_cookie) 382 { 383 struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 384 385 if (unlikely(conn_idx >= front_info->cfg.num_connectors)) 386 return; 387 388 xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx], 389 fb_cookie); 390 } 391 392 static int xen_drm_drv_dumb_create(struct drm_file *filp, 393 struct drm_device *dev, 394 struct drm_mode_create_dumb *args) 395 { 396 struct xen_drm_front_drm_info *drm_info = dev->dev_private; 397 struct drm_gem_object *obj; 398 int ret; 399 400 /* 401 * Dumb creation is a two stage process: first we create a fully 402 * constructed GEM object which is communicated to the backend, and 403 * only after that we can create GEM's handle. This is done so, 404 * because of the possible races: once you create a handle it becomes 405 * immediately visible to user-space, so the latter can try accessing 406 * object without pages etc. 407 * For details also see drm_gem_handle_create 408 */ 409 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 410 args->size = args->pitch * args->height; 411 412 obj = xen_drm_front_gem_create(dev, args->size); 413 if (IS_ERR_OR_NULL(obj)) { 414 ret = PTR_ERR(obj); 415 goto fail; 416 } 417 418 ret = xen_drm_front_dbuf_create(drm_info->front_info, 419 xen_drm_front_dbuf_to_cookie(obj), 420 args->width, args->height, args->bpp, 421 args->size, 422 xen_drm_front_gem_get_pages(obj)); 423 if (ret) 424 goto fail_backend; 425 426 /* This is the tail of GEM object creation */ 427 ret = drm_gem_handle_create(filp, obj, &args->handle); 428 if (ret) 429 goto fail_handle; 430 431 /* Drop reference from allocate - handle holds it now */ 432 drm_gem_object_put_unlocked(obj); 433 return 0; 434 435 fail_handle: 436 xen_drm_front_dbuf_destroy(drm_info->front_info, 437 xen_drm_front_dbuf_to_cookie(obj)); 438 fail_backend: 439 /* drop reference from allocate */ 440 drm_gem_object_put_unlocked(obj); 441 fail: 442 DRM_ERROR("Failed to create dumb buffer: %d\n", ret); 443 return ret; 444 } 445 446 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj) 447 { 448 struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; 449 int idx; 450 451 if (drm_dev_enter(obj->dev, &idx)) { 452 xen_drm_front_dbuf_destroy(drm_info->front_info, 453 xen_drm_front_dbuf_to_cookie(obj)); 454 drm_dev_exit(idx); 455 } else { 456 dbuf_free(&drm_info->front_info->dbuf_list, 457 xen_drm_front_dbuf_to_cookie(obj)); 458 } 459 460 xen_drm_front_gem_free_object_unlocked(obj); 461 } 462 463 static void xen_drm_drv_release(struct drm_device *dev) 464 { 465 struct xen_drm_front_drm_info *drm_info = dev->dev_private; 466 struct xen_drm_front_info *front_info = drm_info->front_info; 467 468 xen_drm_front_kms_fini(drm_info); 469 470 drm_atomic_helper_shutdown(dev); 471 drm_mode_config_cleanup(dev); 472 473 drm_dev_fini(dev); 474 kfree(dev); 475 476 if (front_info->cfg.be_alloc) 477 xenbus_switch_state(front_info->xb_dev, 478 XenbusStateInitialising); 479 480 kfree(drm_info); 481 } 482 483 static const struct file_operations xen_drm_dev_fops = { 484 .owner = THIS_MODULE, 485 .open = drm_open, 486 .release = drm_release, 487 .unlocked_ioctl = drm_ioctl, 488 #ifdef CONFIG_COMPAT 489 .compat_ioctl = drm_compat_ioctl, 490 #endif 491 .poll = drm_poll, 492 .read = drm_read, 493 .llseek = no_llseek, 494 .mmap = xen_drm_front_gem_mmap, 495 }; 496 497 static const struct vm_operations_struct xen_drm_drv_vm_ops = { 498 .open = drm_gem_vm_open, 499 .close = drm_gem_vm_close, 500 }; 501 502 static struct drm_driver xen_drm_driver = { 503 .driver_features = DRIVER_GEM | DRIVER_MODESET | 504 DRIVER_PRIME | DRIVER_ATOMIC, 505 .release = xen_drm_drv_release, 506 .gem_vm_ops = &xen_drm_drv_vm_ops, 507 .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked, 508 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 509 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 510 .gem_prime_import = drm_gem_prime_import, 511 .gem_prime_export = drm_gem_prime_export, 512 .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, 513 .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, 514 .gem_prime_vmap = xen_drm_front_gem_prime_vmap, 515 .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, 516 .gem_prime_mmap = xen_drm_front_gem_prime_mmap, 517 .dumb_create = xen_drm_drv_dumb_create, 518 .fops = &xen_drm_dev_fops, 519 .name = "xendrm-du", 520 .desc = "Xen PV DRM Display Unit", 521 .date = "20180221", 522 .major = 1, 523 .minor = 0, 524 525 }; 526 527 static int xen_drm_drv_init(struct xen_drm_front_info *front_info) 528 { 529 struct device *dev = &front_info->xb_dev->dev; 530 struct xen_drm_front_drm_info *drm_info; 531 struct drm_device *drm_dev; 532 int ret; 533 534 DRM_INFO("Creating %s\n", xen_drm_driver.desc); 535 536 drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); 537 if (!drm_info) { 538 ret = -ENOMEM; 539 goto fail; 540 } 541 542 drm_info->front_info = front_info; 543 front_info->drm_info = drm_info; 544 545 drm_dev = drm_dev_alloc(&xen_drm_driver, dev); 546 if (IS_ERR(drm_dev)) { 547 ret = PTR_ERR(drm_dev); 548 goto fail; 549 } 550 551 drm_info->drm_dev = drm_dev; 552 553 drm_dev->dev_private = drm_info; 554 555 ret = xen_drm_front_kms_init(drm_info); 556 if (ret) { 557 DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret); 558 goto fail_modeset; 559 } 560 561 ret = drm_dev_register(drm_dev, 0); 562 if (ret) 563 goto fail_register; 564 565 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 566 xen_drm_driver.name, xen_drm_driver.major, 567 xen_drm_driver.minor, xen_drm_driver.patchlevel, 568 xen_drm_driver.date, drm_dev->primary->index); 569 570 return 0; 571 572 fail_register: 573 drm_dev_unregister(drm_dev); 574 fail_modeset: 575 drm_kms_helper_poll_fini(drm_dev); 576 drm_mode_config_cleanup(drm_dev); 577 fail: 578 kfree(drm_info); 579 return ret; 580 } 581 582 static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) 583 { 584 struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 585 struct drm_device *dev; 586 587 if (!drm_info) 588 return; 589 590 dev = drm_info->drm_dev; 591 if (!dev) 592 return; 593 594 /* Nothing to do if device is already unplugged */ 595 if (drm_dev_is_unplugged(dev)) 596 return; 597 598 drm_kms_helper_poll_fini(dev); 599 drm_dev_unplug(dev); 600 601 front_info->drm_info = NULL; 602 603 xen_drm_front_evtchnl_free_all(front_info); 604 dbuf_free_all(&front_info->dbuf_list); 605 606 /* 607 * If we are not using backend allocated buffers, then tell the 608 * backend we are ready to (re)initialize. Otherwise, wait for 609 * drm_driver.release. 610 */ 611 if (!front_info->cfg.be_alloc) 612 xenbus_switch_state(front_info->xb_dev, 613 XenbusStateInitialising); 614 } 615 616 static int displback_initwait(struct xen_drm_front_info *front_info) 617 { 618 struct xen_drm_front_cfg *cfg = &front_info->cfg; 619 int ret; 620 621 cfg->front_info = front_info; 622 ret = xen_drm_front_cfg_card(front_info, cfg); 623 if (ret < 0) 624 return ret; 625 626 DRM_INFO("Have %d conector(s)\n", cfg->num_connectors); 627 /* Create event channels for all connectors and publish */ 628 ret = xen_drm_front_evtchnl_create_all(front_info); 629 if (ret < 0) 630 return ret; 631 632 return xen_drm_front_evtchnl_publish_all(front_info); 633 } 634 635 static int displback_connect(struct xen_drm_front_info *front_info) 636 { 637 xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED); 638 return xen_drm_drv_init(front_info); 639 } 640 641 static void displback_disconnect(struct xen_drm_front_info *front_info) 642 { 643 if (!front_info->drm_info) 644 return; 645 646 /* Tell the backend to wait until we release the DRM driver. */ 647 xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring); 648 649 xen_drm_drv_fini(front_info); 650 } 651 652 static void displback_changed(struct xenbus_device *xb_dev, 653 enum xenbus_state backend_state) 654 { 655 struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev); 656 int ret; 657 658 DRM_DEBUG("Backend state is %s, front is %s\n", 659 xenbus_strstate(backend_state), 660 xenbus_strstate(xb_dev->state)); 661 662 switch (backend_state) { 663 case XenbusStateReconfiguring: 664 /* fall through */ 665 case XenbusStateReconfigured: 666 /* fall through */ 667 case XenbusStateInitialised: 668 break; 669 670 case XenbusStateInitialising: 671 if (xb_dev->state == XenbusStateReconfiguring) 672 break; 673 674 /* recovering after backend unexpected closure */ 675 displback_disconnect(front_info); 676 break; 677 678 case XenbusStateInitWait: 679 if (xb_dev->state == XenbusStateReconfiguring) 680 break; 681 682 /* recovering after backend unexpected closure */ 683 displback_disconnect(front_info); 684 if (xb_dev->state != XenbusStateInitialising) 685 break; 686 687 ret = displback_initwait(front_info); 688 if (ret < 0) 689 xenbus_dev_fatal(xb_dev, ret, "initializing frontend"); 690 else 691 xenbus_switch_state(xb_dev, XenbusStateInitialised); 692 break; 693 694 case XenbusStateConnected: 695 if (xb_dev->state != XenbusStateInitialised) 696 break; 697 698 ret = displback_connect(front_info); 699 if (ret < 0) { 700 displback_disconnect(front_info); 701 xenbus_dev_fatal(xb_dev, ret, "connecting backend"); 702 } else { 703 xenbus_switch_state(xb_dev, XenbusStateConnected); 704 } 705 break; 706 707 case XenbusStateClosing: 708 /* 709 * in this state backend starts freeing resources, 710 * so let it go into closed state, so we can also 711 * remove ours 712 */ 713 break; 714 715 case XenbusStateUnknown: 716 /* fall through */ 717 case XenbusStateClosed: 718 if (xb_dev->state == XenbusStateClosed) 719 break; 720 721 displback_disconnect(front_info); 722 break; 723 } 724 } 725 726 static int xen_drv_probe(struct xenbus_device *xb_dev, 727 const struct xenbus_device_id *id) 728 { 729 struct xen_drm_front_info *front_info; 730 struct device *dev = &xb_dev->dev; 731 int ret; 732 733 /* 734 * The device is not spawn from a device tree, so arch_setup_dma_ops 735 * is not called, thus leaving the device with dummy DMA ops. 736 * This makes the device return error on PRIME buffer import, which 737 * is not correct: to fix this call of_dma_configure() with a NULL 738 * node to set default DMA ops. 739 */ 740 dev->coherent_dma_mask = DMA_BIT_MASK(32); 741 ret = of_dma_configure(dev, NULL, true); 742 if (ret < 0) { 743 DRM_ERROR("Cannot setup DMA ops, ret %d", ret); 744 return ret; 745 } 746 747 front_info = devm_kzalloc(&xb_dev->dev, 748 sizeof(*front_info), GFP_KERNEL); 749 if (!front_info) 750 return -ENOMEM; 751 752 front_info->xb_dev = xb_dev; 753 spin_lock_init(&front_info->io_lock); 754 INIT_LIST_HEAD(&front_info->dbuf_list); 755 dev_set_drvdata(&xb_dev->dev, front_info); 756 757 return xenbus_switch_state(xb_dev, XenbusStateInitialising); 758 } 759 760 static int xen_drv_remove(struct xenbus_device *dev) 761 { 762 struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev); 763 int to = 100; 764 765 xenbus_switch_state(dev, XenbusStateClosing); 766 767 /* 768 * On driver removal it is disconnected from XenBus, 769 * so no backend state change events come via .otherend_changed 770 * callback. This prevents us from exiting gracefully, e.g. 771 * signaling the backend to free event channels, waiting for its 772 * state to change to XenbusStateClosed and cleaning at our end. 773 * Normally when front driver removed backend will finally go into 774 * XenbusStateInitWait state. 775 * 776 * Workaround: read backend's state manually and wait with time-out. 777 */ 778 while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", 779 XenbusStateUnknown) != XenbusStateInitWait) && 780 --to) 781 msleep(10); 782 783 if (!to) { 784 unsigned int state; 785 786 state = xenbus_read_unsigned(front_info->xb_dev->otherend, 787 "state", XenbusStateUnknown); 788 DRM_ERROR("Backend state is %s while removing driver\n", 789 xenbus_strstate(state)); 790 } 791 792 xen_drm_drv_fini(front_info); 793 xenbus_frontend_closed(dev); 794 return 0; 795 } 796 797 static const struct xenbus_device_id xen_driver_ids[] = { 798 { XENDISPL_DRIVER_NAME }, 799 { "" } 800 }; 801 802 static struct xenbus_driver xen_driver = { 803 .ids = xen_driver_ids, 804 .probe = xen_drv_probe, 805 .remove = xen_drv_remove, 806 .otherend_changed = displback_changed, 807 }; 808 809 static int __init xen_drv_init(void) 810 { 811 /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ 812 if (XEN_PAGE_SIZE != PAGE_SIZE) { 813 DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n", 814 XEN_PAGE_SIZE, PAGE_SIZE); 815 return -ENODEV; 816 } 817 818 if (!xen_domain()) 819 return -ENODEV; 820 821 if (!xen_has_pv_devices()) 822 return -ENODEV; 823 824 DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n"); 825 return xenbus_register_frontend(&xen_driver); 826 } 827 828 static void __exit xen_drv_fini(void) 829 { 830 DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n"); 831 xenbus_unregister_driver(&xen_driver); 832 } 833 834 module_init(xen_drv_init); 835 module_exit(xen_drv_fini); 836 837 MODULE_DESCRIPTION("Xen para-virtualized display device frontend"); 838 MODULE_LICENSE("GPL"); 839 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME); 840