1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2016 VMware, Inc., Palo Alto, CA., USA 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include <linux/objtool.h> 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/slab.h> 31 #include <linux/cc_platform.h> 32 33 #include <asm/hypervisor.h> 34 #include <drm/drm_ioctl.h> 35 36 #include "vmwgfx_drv.h" 37 #include "vmwgfx_msg_x86.h" 38 #include "vmwgfx_msg_arm64.h" 39 #include "vmwgfx_mksstat.h" 40 41 #define MESSAGE_STATUS_SUCCESS 0x0001 42 #define MESSAGE_STATUS_DORECV 0x0002 43 #define MESSAGE_STATUS_CPT 0x0010 44 #define MESSAGE_STATUS_HB 0x0080 45 46 #define RPCI_PROTOCOL_NUM 0x49435052 47 #define GUESTMSG_FLAG_COOKIE 0x80000000 48 49 #define RETRIES 3 50 51 #define VMW_HYPERVISOR_MAGIC 0x564D5868 52 53 #define VMW_PORT_CMD_MSG 30 54 #define VMW_PORT_CMD_HB_MSG 0 55 #define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG) 56 #define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG) 57 #define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG) 58 #define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG) 59 #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG) 60 61 #define VMW_PORT_CMD_MKS_GUEST_STATS 85 62 #define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) 63 #define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) 64 #define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) 65 66 #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16) 67 68 #define MAX_USER_MSG_LENGTH PAGE_SIZE 69 70 static u32 vmw_msg_enabled = 1; 71 72 enum rpc_msg_type { 73 MSG_TYPE_OPEN, 74 MSG_TYPE_SENDSIZE, 75 MSG_TYPE_SENDPAYLOAD, 76 MSG_TYPE_RECVSIZE, 77 MSG_TYPE_RECVPAYLOAD, 78 MSG_TYPE_RECVSTATUS, 79 MSG_TYPE_CLOSE, 80 }; 81 82 struct rpc_channel { 83 u16 channel_id; 84 u32 cookie_high; 85 u32 cookie_low; 86 }; 87 88 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 89 /* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */ 90 static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] = 91 { 92 { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" }, 93 { "vmw_cotable_resize", "vmw_cotable_resize" }, 94 }; 95 #endif 96 97 /** 98 * vmw_open_channel 99 * 100 * @channel: RPC channel 101 * @protocol: 102 * 103 * Returns: 0 on success 104 */ 105 static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol) 106 { 107 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 108 109 VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL, 110 (protocol | GUESTMSG_FLAG_COOKIE), si, di, 111 0, 112 VMW_HYPERVISOR_MAGIC, 113 eax, ebx, ecx, edx, si, di); 114 115 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) 116 return -EINVAL; 117 118 channel->channel_id = HIGH_WORD(edx); 119 channel->cookie_high = si; 120 channel->cookie_low = di; 121 122 return 0; 123 } 124 125 126 127 /** 128 * vmw_close_channel 129 * 130 * @channel: RPC channel 131 * 132 * Returns: 0 on success 133 */ 134 static int vmw_close_channel(struct rpc_channel *channel) 135 { 136 unsigned long eax, ebx, ecx, edx, si, di; 137 138 /* Set up additional parameters */ 139 si = channel->cookie_high; 140 di = channel->cookie_low; 141 142 VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL, 143 0, si, di, 144 channel->channel_id << 16, 145 VMW_HYPERVISOR_MAGIC, 146 eax, ebx, ecx, edx, si, di); 147 148 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) 149 return -EINVAL; 150 151 return 0; 152 } 153 154 /** 155 * vmw_port_hb_out - Send the message payload either through the 156 * high-bandwidth port if available, or through the backdoor otherwise. 157 * @channel: The rpc channel. 158 * @msg: NULL-terminated message. 159 * @hb: Whether the high-bandwidth port is available. 160 * 161 * Return: The port status. 162 */ 163 static unsigned long vmw_port_hb_out(struct rpc_channel *channel, 164 const char *msg, bool hb) 165 { 166 unsigned long si, di, eax, ebx, ecx, edx; 167 unsigned long msg_len = strlen(msg); 168 169 /* HB port can't access encrypted memory. */ 170 if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 171 unsigned long bp = channel->cookie_high; 172 u32 channel_id = (channel->channel_id << 16); 173 174 si = (uintptr_t) msg; 175 di = channel->cookie_low; 176 177 VMW_PORT_HB_OUT( 178 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, 179 msg_len, si, di, 180 VMWARE_HYPERVISOR_HB | channel_id | 181 VMWARE_HYPERVISOR_OUT, 182 VMW_HYPERVISOR_MAGIC, bp, 183 eax, ebx, ecx, edx, si, di); 184 185 return ebx; 186 } 187 188 /* HB port not available. Send the message 4 bytes at a time. */ 189 ecx = MESSAGE_STATUS_SUCCESS << 16; 190 while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { 191 unsigned int bytes = min_t(size_t, msg_len, 4); 192 unsigned long word = 0; 193 194 memcpy(&word, msg, bytes); 195 msg_len -= bytes; 196 msg += bytes; 197 si = channel->cookie_high; 198 di = channel->cookie_low; 199 200 VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), 201 word, si, di, 202 channel->channel_id << 16, 203 VMW_HYPERVISOR_MAGIC, 204 eax, ebx, ecx, edx, si, di); 205 } 206 207 return ecx; 208 } 209 210 /** 211 * vmw_port_hb_in - Receive the message payload either through the 212 * high-bandwidth port if available, or through the backdoor otherwise. 213 * @channel: The rpc channel. 214 * @reply: Pointer to buffer holding reply. 215 * @reply_len: Length of the reply. 216 * @hb: Whether the high-bandwidth port is available. 217 * 218 * Return: The port status. 219 */ 220 static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, 221 unsigned long reply_len, bool hb) 222 { 223 unsigned long si, di, eax, ebx, ecx, edx; 224 225 /* HB port can't access encrypted memory */ 226 if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 227 unsigned long bp = channel->cookie_low; 228 u32 channel_id = (channel->channel_id << 16); 229 230 si = channel->cookie_high; 231 di = (uintptr_t) reply; 232 233 VMW_PORT_HB_IN( 234 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, 235 reply_len, si, di, 236 VMWARE_HYPERVISOR_HB | channel_id, 237 VMW_HYPERVISOR_MAGIC, bp, 238 eax, ebx, ecx, edx, si, di); 239 240 return ebx; 241 } 242 243 /* HB port not available. Retrieve the message 4 bytes at a time. */ 244 ecx = MESSAGE_STATUS_SUCCESS << 16; 245 while (reply_len) { 246 unsigned int bytes = min_t(unsigned long, reply_len, 4); 247 248 si = channel->cookie_high; 249 di = channel->cookie_low; 250 251 VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), 252 MESSAGE_STATUS_SUCCESS, si, di, 253 channel->channel_id << 16, 254 VMW_HYPERVISOR_MAGIC, 255 eax, ebx, ecx, edx, si, di); 256 257 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) 258 break; 259 260 memcpy(reply, &ebx, bytes); 261 reply_len -= bytes; 262 reply += bytes; 263 } 264 265 return ecx; 266 } 267 268 269 /** 270 * vmw_send_msg: Sends a message to the host 271 * 272 * @channel: RPC channel 273 * @msg: NULL terminated string 274 * 275 * Returns: 0 on success 276 */ 277 static int vmw_send_msg(struct rpc_channel *channel, const char *msg) 278 { 279 unsigned long eax, ebx, ecx, edx, si, di; 280 size_t msg_len = strlen(msg); 281 int retries = 0; 282 283 while (retries < RETRIES) { 284 retries++; 285 286 /* Set up additional parameters */ 287 si = channel->cookie_high; 288 di = channel->cookie_low; 289 290 VMW_PORT(VMW_PORT_CMD_SENDSIZE, 291 msg_len, si, di, 292 channel->channel_id << 16, 293 VMW_HYPERVISOR_MAGIC, 294 eax, ebx, ecx, edx, si, di); 295 296 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 297 /* Expected success. Give up. */ 298 return -EINVAL; 299 } 300 301 /* Send msg */ 302 ebx = vmw_port_hb_out(channel, msg, 303 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); 304 305 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { 306 return 0; 307 } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { 308 /* A checkpoint occurred. Retry. */ 309 continue; 310 } else { 311 break; 312 } 313 } 314 315 return -EINVAL; 316 } 317 STACK_FRAME_NON_STANDARD(vmw_send_msg); 318 319 320 /** 321 * vmw_recv_msg: Receives a message from the host 322 * 323 * Note: It is the caller's responsibility to call kfree() on msg. 324 * 325 * @channel: channel opened by vmw_open_channel 326 * @msg: [OUT] message received from the host 327 * @msg_len: message length 328 */ 329 static int vmw_recv_msg(struct rpc_channel *channel, void **msg, 330 size_t *msg_len) 331 { 332 unsigned long eax, ebx, ecx, edx, si, di; 333 char *reply; 334 size_t reply_len; 335 int retries = 0; 336 337 338 *msg_len = 0; 339 *msg = NULL; 340 341 while (retries < RETRIES) { 342 retries++; 343 344 /* Set up additional parameters */ 345 si = channel->cookie_high; 346 di = channel->cookie_low; 347 348 VMW_PORT(VMW_PORT_CMD_RECVSIZE, 349 0, si, di, 350 channel->channel_id << 16, 351 VMW_HYPERVISOR_MAGIC, 352 eax, ebx, ecx, edx, si, di); 353 354 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 355 DRM_ERROR("Failed to get reply size for host message.\n"); 356 return -EINVAL; 357 } 358 359 /* No reply available. This is okay. */ 360 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0) 361 return 0; 362 363 reply_len = ebx; 364 reply = kzalloc(reply_len + 1, GFP_KERNEL); 365 if (!reply) { 366 DRM_ERROR("Cannot allocate memory for host message reply.\n"); 367 return -ENOMEM; 368 } 369 370 371 /* Receive buffer */ 372 ebx = vmw_port_hb_in(channel, reply, reply_len, 373 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); 374 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { 375 kfree(reply); 376 reply = NULL; 377 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { 378 /* A checkpoint occurred. Retry. */ 379 continue; 380 } 381 382 return -EINVAL; 383 } 384 385 reply[reply_len] = '\0'; 386 387 388 /* Ack buffer */ 389 si = channel->cookie_high; 390 di = channel->cookie_low; 391 392 VMW_PORT(VMW_PORT_CMD_RECVSTATUS, 393 MESSAGE_STATUS_SUCCESS, si, di, 394 channel->channel_id << 16, 395 VMW_HYPERVISOR_MAGIC, 396 eax, ebx, ecx, edx, si, di); 397 398 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 399 kfree(reply); 400 reply = NULL; 401 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { 402 /* A checkpoint occurred. Retry. */ 403 continue; 404 } 405 406 return -EINVAL; 407 } 408 409 break; 410 } 411 412 if (!reply) 413 return -EINVAL; 414 415 *msg_len = reply_len; 416 *msg = reply; 417 418 return 0; 419 } 420 STACK_FRAME_NON_STANDARD(vmw_recv_msg); 421 422 423 /** 424 * vmw_host_get_guestinfo: Gets a GuestInfo parameter 425 * 426 * Gets the value of a GuestInfo.* parameter. The value returned will be in 427 * a string, and it is up to the caller to post-process. 428 * 429 * @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3 430 * @buffer: if NULL, *reply_len will contain reply size. 431 * @length: size of the reply_buf. Set to size of reply upon return 432 * 433 * Returns: 0 on success 434 */ 435 int vmw_host_get_guestinfo(const char *guest_info_param, 436 char *buffer, size_t *length) 437 { 438 struct rpc_channel channel; 439 char *msg, *reply = NULL; 440 size_t reply_len = 0; 441 442 if (!vmw_msg_enabled) 443 return -ENODEV; 444 445 if (!guest_info_param || !length) 446 return -EINVAL; 447 448 msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); 449 if (!msg) { 450 DRM_ERROR("Cannot allocate memory to get guest info \"%s\".", 451 guest_info_param); 452 return -ENOMEM; 453 } 454 455 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) 456 goto out_open; 457 458 if (vmw_send_msg(&channel, msg) || 459 vmw_recv_msg(&channel, (void *) &reply, &reply_len)) 460 goto out_msg; 461 462 vmw_close_channel(&channel); 463 if (buffer && reply && reply_len > 0) { 464 /* Remove reply code, which are the first 2 characters of 465 * the reply 466 */ 467 reply_len = max(reply_len - 2, (size_t) 0); 468 reply_len = min(reply_len, *length); 469 470 if (reply_len > 0) 471 memcpy(buffer, reply + 2, reply_len); 472 } 473 474 *length = reply_len; 475 476 kfree(reply); 477 kfree(msg); 478 479 return 0; 480 481 out_msg: 482 vmw_close_channel(&channel); 483 kfree(reply); 484 out_open: 485 *length = 0; 486 kfree(msg); 487 DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param); 488 489 return -EINVAL; 490 } 491 492 493 /** 494 * vmw_host_printf: Sends a log message to the host 495 * 496 * @fmt: Regular printf format string and arguments 497 * 498 * Returns: 0 on success 499 */ 500 __printf(1, 2) 501 int vmw_host_printf(const char *fmt, ...) 502 { 503 va_list ap; 504 struct rpc_channel channel; 505 char *msg; 506 char *log; 507 int ret = 0; 508 509 if (!vmw_msg_enabled) 510 return -ENODEV; 511 512 if (!fmt) 513 return ret; 514 515 va_start(ap, fmt); 516 log = kvasprintf(GFP_KERNEL, fmt, ap); 517 va_end(ap); 518 if (!log) { 519 DRM_ERROR("Cannot allocate memory for the log message.\n"); 520 return -ENOMEM; 521 } 522 523 msg = kasprintf(GFP_KERNEL, "log %s", log); 524 if (!msg) { 525 DRM_ERROR("Cannot allocate memory for host log message.\n"); 526 kfree(log); 527 return -ENOMEM; 528 } 529 530 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) 531 goto out_open; 532 533 if (vmw_send_msg(&channel, msg)) 534 goto out_msg; 535 536 vmw_close_channel(&channel); 537 kfree(msg); 538 kfree(log); 539 540 return 0; 541 542 out_msg: 543 vmw_close_channel(&channel); 544 out_open: 545 kfree(msg); 546 kfree(log); 547 DRM_ERROR("Failed to send host log message.\n"); 548 549 return -EINVAL; 550 } 551 552 553 /** 554 * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space 555 * 556 * Sends a message from user-space to host. 557 * Can also receive a result from host and return that to user-space. 558 * 559 * @dev: Identifies the drm device. 560 * @data: Pointer to the ioctl argument. 561 * @file_priv: Identifies the caller. 562 * Return: Zero on success, negative error code on error. 563 */ 564 565 int vmw_msg_ioctl(struct drm_device *dev, void *data, 566 struct drm_file *file_priv) 567 { 568 struct drm_vmw_msg_arg *arg = 569 (struct drm_vmw_msg_arg *)data; 570 struct rpc_channel channel; 571 char *msg; 572 int length; 573 574 msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL); 575 if (!msg) { 576 DRM_ERROR("Cannot allocate memory for log message.\n"); 577 return -ENOMEM; 578 } 579 580 length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send), 581 MAX_USER_MSG_LENGTH); 582 if (length < 0 || length >= MAX_USER_MSG_LENGTH) { 583 DRM_ERROR("Userspace message access failure.\n"); 584 kfree(msg); 585 return -EINVAL; 586 } 587 588 589 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) { 590 DRM_ERROR("Failed to open channel.\n"); 591 goto out_open; 592 } 593 594 if (vmw_send_msg(&channel, msg)) { 595 DRM_ERROR("Failed to send message to host.\n"); 596 goto out_msg; 597 } 598 599 if (!arg->send_only) { 600 char *reply = NULL; 601 size_t reply_len = 0; 602 603 if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) { 604 DRM_ERROR("Failed to receive message from host.\n"); 605 goto out_msg; 606 } 607 if (reply && reply_len > 0) { 608 if (copy_to_user((void __user *)((unsigned long)arg->receive), 609 reply, reply_len)) { 610 DRM_ERROR("Failed to copy message to userspace.\n"); 611 kfree(reply); 612 goto out_msg; 613 } 614 arg->receive_len = (__u32)reply_len; 615 } 616 kfree(reply); 617 } 618 619 vmw_close_channel(&channel); 620 kfree(msg); 621 622 return 0; 623 624 out_msg: 625 vmw_close_channel(&channel); 626 out_open: 627 kfree(msg); 628 629 return -EINVAL; 630 } 631 632 /** 633 * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content 634 * 635 * @arr: Array to reset. 636 * @size: Array length. 637 */ 638 static inline void reset_ppn_array(PPN64 *arr, size_t size) 639 { 640 size_t i; 641 642 BUG_ON(!arr || size == 0); 643 644 for (i = 0; i < size; ++i) 645 arr[i] = INVALID_PPN64; 646 } 647 648 /** 649 * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from 650 * the hypervisor. All related pages should be subsequently unpinned or freed. 651 * 652 */ 653 static inline void hypervisor_ppn_reset_all(void) 654 { 655 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 656 657 VMW_PORT(VMW_PORT_CMD_MKSGS_RESET, 658 0, si, di, 659 0, 660 VMW_HYPERVISOR_MAGIC, 661 eax, ebx, ecx, edx, si, di); 662 } 663 664 /** 665 * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the 666 * hypervisor. Any related userspace pages should be pinned in advance. 667 * 668 * @pfn: Physical page number of the instance descriptor 669 */ 670 static inline void hypervisor_ppn_add(PPN64 pfn) 671 { 672 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 673 674 VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN, 675 (unsigned long)pfn, si, di, 676 0, 677 VMW_HYPERVISOR_MAGIC, 678 eax, ebx, ecx, edx, si, di); 679 } 680 681 /** 682 * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from 683 * the hypervisor. All related pages should be subsequently unpinned or freed. 684 * 685 * @pfn: Physical page number of the instance descriptor 686 */ 687 static inline void hypervisor_ppn_remove(PPN64 pfn) 688 { 689 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 690 691 VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN, 692 (unsigned long)pfn, si, di, 693 0, 694 VMW_HYPERVISOR_MAGIC, 695 eax, ebx, ecx, edx, si, di); 696 } 697 698 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 699 700 /* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */ 701 #define MKSSTAT_KERNEL_PAGES_ORDER 2 702 /* Header to the text description of mksGuestStat instance descriptor */ 703 #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx" 704 705 /** 706 * mksstat_init_record: Initializes an MKSGuestStatCounter-based record 707 * for the respective mksGuestStat index. 708 * 709 * @stat_idx: Index of the MKSGuestStatCounter-based mksGuestStat record. 710 * @pstat: Pointer to array of MKSGuestStatCounterTime. 711 * @pinfo: Pointer to array of MKSGuestStatInfoEntry. 712 * @pstrs: Pointer to current end of the name/description sequence. 713 * Return: Pointer to the new end of the names/description sequence. 714 */ 715 716 static inline char *mksstat_init_record(mksstat_kern_stats_t stat_idx, 717 MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs) 718 { 719 char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1; 720 strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]); 721 strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]); 722 723 pinfo[stat_idx].name.s = pstrs; 724 pinfo[stat_idx].description.s = pstrd; 725 pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_NONE; 726 pinfo[stat_idx].stat.counter = (MKSGuestStatCounter *)&pstat[stat_idx]; 727 728 return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1; 729 } 730 731 /** 732 * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record 733 * for the respective mksGuestStat index. 734 * 735 * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record. 736 * @pstat: Pointer to array of MKSGuestStatCounterTime. 737 * @pinfo: Pointer to array of MKSGuestStatInfoEntry. 738 * @pstrs: Pointer to current end of the name/description sequence. 739 * Return: Pointer to the new end of the names/description sequence. 740 */ 741 742 static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx, 743 MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs) 744 { 745 char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1; 746 strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]); 747 strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]); 748 749 pinfo[stat_idx].name.s = pstrs; 750 pinfo[stat_idx].description.s = pstrd; 751 pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME; 752 pinfo[stat_idx].stat.counterTime = &pstat[stat_idx]; 753 754 return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1; 755 } 756 757 /** 758 * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and 759 * kernel-internal counters. Adds PFN mapping to the hypervisor. 760 * 761 * Create a single mksGuestStat instance descriptor and corresponding structures 762 * for all kernel-internal counters. The corresponding PFNs are mapped with the 763 * hypervisor. 764 * 765 * @ppage: Output pointer to page containing the instance descriptor. 766 * Return: Zero on success, negative error code on error. 767 */ 768 769 static int mksstat_init_kern_id(struct page **ppage) 770 { 771 MKSGuestStatInstanceDescriptor *pdesc; 772 MKSGuestStatCounterTime *pstat; 773 MKSGuestStatInfoEntry *pinfo; 774 char *pstrs, *pstrs_acc; 775 776 /* Allocate pages for the kernel-internal instance descriptor */ 777 struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER); 778 779 if (!page) 780 return -ENOMEM; 781 782 pdesc = page_address(page); 783 pstat = vmw_mksstat_get_kern_pstat(pdesc); 784 pinfo = vmw_mksstat_get_kern_pinfo(pdesc); 785 pstrs = vmw_mksstat_get_kern_pstrs(pdesc); 786 787 /* Set up all kernel-internal counters and corresponding structures */ 788 pstrs_acc = pstrs; 789 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc); 790 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc); 791 792 /* Add new counters above, in their order of appearance in mksstat_kern_stats_t */ 793 794 BUG_ON(pstrs_acc - pstrs > PAGE_SIZE); 795 796 /* Set up the kernel-internal instance descriptor */ 797 pdesc->reservedMBZ = 0; 798 pdesc->statStartVA = (uintptr_t)pstat; 799 pdesc->strsStartVA = (uintptr_t)pstrs; 800 pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT; 801 pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT; 802 pdesc->strsLength = pstrs_acc - pstrs; 803 snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d", 804 MKSSTAT_KERNEL_DESCRIPTION, current->pid); 805 806 pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat)); 807 reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1); 808 809 pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo)); 810 reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1); 811 812 pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs)); 813 reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1); 814 815 *ppage = page; 816 817 hypervisor_ppn_add((PPN64)page_to_pfn(page)); 818 819 return 0; 820 } 821 822 /** 823 * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal 824 * mksGuestStat instance descriptor. 825 * 826 * Find a slot for a single kernel-internal mksGuestStat instance descriptor. 827 * In case no such was already present, allocate a new one and set up a kernel- 828 * internal mksGuestStat instance descriptor for the former. 829 * 830 * @pid: Process for which a slot is sought. 831 * @dev_priv: Identifies the drm private device. 832 * Return: Non-negative slot on success, negative error code on error. 833 */ 834 835 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv) 836 { 837 const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2); 838 size_t i; 839 840 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { 841 const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids); 842 843 /* Check if an instance descriptor for this pid is already present */ 844 if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot])) 845 return (int)slot; 846 847 /* Set up a new instance descriptor for this pid */ 848 if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) { 849 const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]); 850 851 if (!ret) { 852 /* Reset top-timer tracking for this slot */ 853 dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT; 854 855 atomic_set(&dev_priv->mksstat_kern_pids[slot], pid); 856 return (int)slot; 857 } 858 859 atomic_set(&dev_priv->mksstat_kern_pids[slot], 0); 860 return ret; 861 } 862 } 863 864 return -ENOSPC; 865 } 866 867 #endif 868 869 /** 870 * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating 871 * mksGuestStat instance-descriptor page and unpins all related user pages. 872 * 873 * Unpin all user pages realated to this instance descriptor and free 874 * the instance-descriptor page itself. 875 * 876 * @page: Page of the instance descriptor. 877 */ 878 879 static void vmw_mksstat_cleanup_descriptor(struct page *page) 880 { 881 MKSGuestStatInstanceDescriptor *pdesc = page_address(page); 882 size_t i; 883 884 for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i) 885 unpin_user_page(pfn_to_page(pdesc->statPPNs[i])); 886 887 for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i) 888 unpin_user_page(pfn_to_page(pdesc->infoPPNs[i])); 889 890 for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i) 891 unpin_user_page(pfn_to_page(pdesc->strsPPNs[i])); 892 893 __free_page(page); 894 } 895 896 /** 897 * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors 898 * from the hypervisor. 899 * 900 * Discard all hypervisor PFN mappings, containing active mksGuestState instance 901 * descriptors, unpin the related userspace pages and free the related kernel pages. 902 * 903 * @dev_priv: Identifies the drm private device. 904 * Return: Zero on success, negative error code on error. 905 */ 906 907 int vmw_mksstat_remove_all(struct vmw_private *dev_priv) 908 { 909 int ret = 0; 910 size_t i; 911 912 /* Discard all PFN mappings with the hypervisor */ 913 hypervisor_ppn_reset_all(); 914 915 /* Discard all userspace-originating instance descriptors and unpin all related pages */ 916 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) { 917 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]); 918 919 if (!pid0) 920 continue; 921 922 if (pid0 != MKSSTAT_PID_RESERVED) { 923 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED); 924 925 if (!pid1) 926 continue; 927 928 if (pid1 == pid0) { 929 struct page *const page = dev_priv->mksstat_user_pages[i]; 930 931 BUG_ON(!page); 932 933 dev_priv->mksstat_user_pages[i] = NULL; 934 atomic_set(&dev_priv->mksstat_user_pids[i], 0); 935 936 vmw_mksstat_cleanup_descriptor(page); 937 continue; 938 } 939 } 940 941 ret = -EAGAIN; 942 } 943 944 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 945 /* Discard all kernel-internal instance descriptors and free all related pages */ 946 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { 947 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]); 948 949 if (!pid0) 950 continue; 951 952 if (pid0 != MKSSTAT_PID_RESERVED) { 953 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED); 954 955 if (!pid1) 956 continue; 957 958 if (pid1 == pid0) { 959 struct page *const page = dev_priv->mksstat_kern_pages[i]; 960 961 BUG_ON(!page); 962 963 dev_priv->mksstat_kern_pages[i] = NULL; 964 atomic_set(&dev_priv->mksstat_kern_pids[i], 0); 965 966 __free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER); 967 continue; 968 } 969 } 970 971 ret = -EAGAIN; 972 } 973 974 #endif 975 return ret; 976 } 977 978 /** 979 * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors 980 * from the hypervisor. 981 * 982 * Discard all hypervisor PFN mappings, containing active mksGuestStat instance 983 * descriptors, unpin the related userspace pages and free the related kernel pages. 984 * 985 * @dev: Identifies the drm device. 986 * @data: Pointer to the ioctl argument. 987 * @file_priv: Identifies the caller; unused. 988 * Return: Zero on success, negative error code on error. 989 */ 990 991 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, 992 struct drm_file *file_priv) 993 { 994 struct vmw_private *const dev_priv = vmw_priv(dev); 995 return vmw_mksstat_remove_all(dev_priv); 996 } 997 998 /** 999 * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat 1000 * instance descriptor and registers that with the hypervisor. 1001 * 1002 * Create a hypervisor PFN mapping, containing a single mksGuestStat instance 1003 * descriptor and pin the corresponding userspace pages. 1004 * 1005 * @dev: Identifies the drm device. 1006 * @data: Pointer to the ioctl argument. 1007 * @file_priv: Identifies the caller; unused. 1008 * Return: Zero on success, negative error code on error. 1009 */ 1010 1011 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, 1012 struct drm_file *file_priv) 1013 { 1014 struct drm_vmw_mksstat_add_arg *arg = 1015 (struct drm_vmw_mksstat_add_arg *) data; 1016 1017 struct vmw_private *const dev_priv = vmw_priv(dev); 1018 1019 const size_t num_pages_stat = PFN_UP(arg->stat_len); 1020 const size_t num_pages_info = PFN_UP(arg->info_len); 1021 const size_t num_pages_strs = PFN_UP(arg->strs_len); 1022 long desc_len; 1023 long nr_pinned_stat; 1024 long nr_pinned_info; 1025 long nr_pinned_strs; 1026 MKSGuestStatInstanceDescriptor *pdesc; 1027 struct page *page = NULL; 1028 struct page **pages_stat = NULL; 1029 struct page **pages_info = NULL; 1030 struct page **pages_strs = NULL; 1031 size_t i, slot; 1032 int ret_err = -ENOMEM; 1033 1034 arg->id = -1; 1035 1036 if (!arg->stat || !arg->info || !arg->strs) 1037 return -EINVAL; 1038 1039 if (!arg->stat_len || !arg->info_len || !arg->strs_len) 1040 return -EINVAL; 1041 1042 if (!arg->description) 1043 return -EINVAL; 1044 1045 if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) || 1046 num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) || 1047 num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs)) 1048 return -EINVAL; 1049 1050 /* Find an available slot in the mksGuestStats user array and reserve it */ 1051 for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot) 1052 if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED)) 1053 break; 1054 1055 if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids)) 1056 return -ENOSPC; 1057 1058 BUG_ON(dev_priv->mksstat_user_pages[slot]); 1059 1060 /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */ 1061 pages_stat = (struct page **)kmalloc_array( 1062 ARRAY_SIZE(pdesc->statPPNs) + 1063 ARRAY_SIZE(pdesc->infoPPNs) + 1064 ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL); 1065 1066 if (!pages_stat) 1067 goto err_nomem; 1068 1069 pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs); 1070 pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs); 1071 1072 /* Allocate a page for the instance descriptor */ 1073 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1074 1075 if (!page) 1076 goto err_nomem; 1077 1078 /* Set up the instance descriptor */ 1079 pdesc = page_address(page); 1080 1081 pdesc->reservedMBZ = 0; 1082 pdesc->statStartVA = arg->stat; 1083 pdesc->strsStartVA = arg->strs; 1084 pdesc->statLength = arg->stat_len; 1085 pdesc->infoLength = arg->info_len; 1086 pdesc->strsLength = arg->strs_len; 1087 desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description), 1088 ARRAY_SIZE(pdesc->description) - 1); 1089 1090 if (desc_len < 0) { 1091 ret_err = -EFAULT; 1092 goto err_nomem; 1093 } 1094 1095 reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs)); 1096 reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs)); 1097 reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs)); 1098 1099 /* Pin mksGuestStat user pages and store those in the instance descriptor */ 1100 nr_pinned_stat = pin_user_pages(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat, NULL); 1101 if (num_pages_stat != nr_pinned_stat) 1102 goto err_pin_stat; 1103 1104 for (i = 0; i < num_pages_stat; ++i) 1105 pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]); 1106 1107 nr_pinned_info = pin_user_pages(arg->info, num_pages_info, FOLL_LONGTERM, pages_info, NULL); 1108 if (num_pages_info != nr_pinned_info) 1109 goto err_pin_info; 1110 1111 for (i = 0; i < num_pages_info; ++i) 1112 pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]); 1113 1114 nr_pinned_strs = pin_user_pages(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs, NULL); 1115 if (num_pages_strs != nr_pinned_strs) 1116 goto err_pin_strs; 1117 1118 for (i = 0; i < num_pages_strs; ++i) 1119 pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]); 1120 1121 /* Send the descriptor to the host via a hypervisor call. The mksGuestStat 1122 pages will remain in use until the user requests a matching remove stats 1123 or a stats reset occurs. */ 1124 hypervisor_ppn_add((PPN64)page_to_pfn(page)); 1125 1126 dev_priv->mksstat_user_pages[slot] = page; 1127 atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current)); 1128 1129 arg->id = slot; 1130 1131 DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot); 1132 1133 kfree(pages_stat); 1134 return 0; 1135 1136 err_pin_strs: 1137 if (nr_pinned_strs > 0) 1138 unpin_user_pages(pages_strs, nr_pinned_strs); 1139 1140 err_pin_info: 1141 if (nr_pinned_info > 0) 1142 unpin_user_pages(pages_info, nr_pinned_info); 1143 1144 err_pin_stat: 1145 if (nr_pinned_stat > 0) 1146 unpin_user_pages(pages_stat, nr_pinned_stat); 1147 1148 err_nomem: 1149 atomic_set(&dev_priv->mksstat_user_pids[slot], 0); 1150 if (page) 1151 __free_page(page); 1152 kfree(pages_stat); 1153 1154 return ret_err; 1155 } 1156 1157 /** 1158 * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat 1159 * instance descriptor from the hypervisor. 1160 * 1161 * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance 1162 * descriptor and unpin the corresponding userspace pages. 1163 * 1164 * @dev: Identifies the drm device. 1165 * @data: Pointer to the ioctl argument. 1166 * @file_priv: Identifies the caller; unused. 1167 * Return: Zero on success, negative error code on error. 1168 */ 1169 1170 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, 1171 struct drm_file *file_priv) 1172 { 1173 struct drm_vmw_mksstat_remove_arg *arg = 1174 (struct drm_vmw_mksstat_remove_arg *) data; 1175 1176 struct vmw_private *const dev_priv = vmw_priv(dev); 1177 1178 const size_t slot = arg->id; 1179 pid_t pgid, pid; 1180 1181 if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids)) 1182 return -EINVAL; 1183 1184 DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot); 1185 1186 pgid = task_pgrp_vnr(current); 1187 pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED); 1188 1189 if (!pid) 1190 return 0; 1191 1192 if (pid == pgid) { 1193 struct page *const page = dev_priv->mksstat_user_pages[slot]; 1194 1195 BUG_ON(!page); 1196 1197 dev_priv->mksstat_user_pages[slot] = NULL; 1198 atomic_set(&dev_priv->mksstat_user_pids[slot], 0); 1199 1200 hypervisor_ppn_remove((PPN64)page_to_pfn(page)); 1201 1202 vmw_mksstat_cleanup_descriptor(page); 1203 return 0; 1204 } 1205 1206 return -EAGAIN; 1207 } 1208