Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial portions
  15 * of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <linux/objtool.h>
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/slab.h>
  31#include <linux/cc_platform.h>
  32
  33#include <asm/hypervisor.h>
  34#include <drm/drm_ioctl.h>
  35
  36#include "vmwgfx_drv.h"
  37#include "vmwgfx_msg_x86.h"
  38#include "vmwgfx_msg_arm64.h"
  39#include "vmwgfx_mksstat.h"
  40
  41#define MESSAGE_STATUS_SUCCESS  0x0001
  42#define MESSAGE_STATUS_DORECV   0x0002
  43#define MESSAGE_STATUS_CPT      0x0010
  44#define MESSAGE_STATUS_HB       0x0080
  45
  46#define RPCI_PROTOCOL_NUM       0x49435052
  47#define GUESTMSG_FLAG_COOKIE    0x80000000
  48
  49#define RETRIES                 3
  50
 
 
  51#define VMW_PORT_CMD_MSG        30
  52#define VMW_PORT_CMD_HB_MSG     0
  53#define VMW_PORT_CMD_OPEN_CHANNEL  (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
  54#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
  55#define VMW_PORT_CMD_SENDSIZE   (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
  56#define VMW_PORT_CMD_RECVSIZE   (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
  57#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
  58
  59#define VMW_PORT_CMD_MKS_GUEST_STATS   85
  60#define VMW_PORT_CMD_MKSGS_RESET       (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
  61#define VMW_PORT_CMD_MKSGS_ADD_PPN     (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
  62#define VMW_PORT_CMD_MKSGS_REMOVE_PPN  (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
  63
  64#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
  65
  66#define MAX_USER_MSG_LENGTH	PAGE_SIZE
  67
  68static u32 vmw_msg_enabled = 1;
  69
  70enum rpc_msg_type {
  71	MSG_TYPE_OPEN,
  72	MSG_TYPE_SENDSIZE,
  73	MSG_TYPE_SENDPAYLOAD,
  74	MSG_TYPE_RECVSIZE,
  75	MSG_TYPE_RECVPAYLOAD,
  76	MSG_TYPE_RECVSTATUS,
  77	MSG_TYPE_CLOSE,
  78};
  79
  80struct rpc_channel {
  81	u16 channel_id;
  82	u32 cookie_high;
  83	u32 cookie_low;
  84};
  85
  86#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
  87/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
  88static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
  89{
  90	{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
  91	{ "vmw_cotable_resize", "vmw_cotable_resize" },
  92};
  93#endif
  94
  95/**
  96 * vmw_open_channel
  97 *
  98 * @channel: RPC channel
  99 * @protocol:
 100 *
 101 * Returns: 0 on success
 102 */
 103static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
 104{
 105	u32 ecx, edx, esi, edi;
 106
 107	vmware_hypercall6(VMW_PORT_CMD_OPEN_CHANNEL,
 108			  (protocol | GUESTMSG_FLAG_COOKIE), 0,
 109			  &ecx, &edx, &esi, &edi);
 
 
 110
 111	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
 112		return -EINVAL;
 113
 114	channel->channel_id  = HIGH_WORD(edx);
 115	channel->cookie_high = esi;
 116	channel->cookie_low  = edi;
 117
 118	return 0;
 119}
 120
 121
 122
 123/**
 124 * vmw_close_channel
 125 *
 126 * @channel: RPC channel
 127 *
 128 * Returns: 0 on success
 129 */
 130static int vmw_close_channel(struct rpc_channel *channel)
 131{
 132	u32 ecx;
 133
 134	vmware_hypercall5(VMW_PORT_CMD_CLOSE_CHANNEL,
 135			  0, channel->channel_id << 16,
 136			  channel->cookie_high,
 137			  channel->cookie_low,
 138			  &ecx);
 
 
 
 
 139
 140	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
 141		return -EINVAL;
 142
 143	return 0;
 144}
 145
 146/**
 147 * vmw_port_hb_out - Send the message payload either through the
 148 * high-bandwidth port if available, or through the backdoor otherwise.
 149 * @channel: The rpc channel.
 150 * @msg: NULL-terminated message.
 151 * @hb: Whether the high-bandwidth port is available.
 152 *
 153 * Return: The port status.
 154 */
 155static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
 156				     const char *msg, bool hb)
 157{
 158	u32 ebx, ecx;
 159	unsigned long msg_len = strlen(msg);
 160
 161	/* HB port can't access encrypted memory. */
 162	if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
 163		vmware_hypercall_hb_out(
 
 
 
 
 
 
 164			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
 165			msg_len,
 166			channel->channel_id << 16,
 167			(uintptr_t) msg, channel->cookie_low,
 168			channel->cookie_high,
 169			&ebx);
 170
 171		return ebx;
 172	}
 173
 174	/* HB port not available. Send the message 4 bytes at a time. */
 175	ecx = MESSAGE_STATUS_SUCCESS << 16;
 176	while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
 177		unsigned int bytes = min_t(size_t, msg_len, 4);
 178		unsigned long word = 0;
 179
 180		memcpy(&word, msg, bytes);
 181		msg_len -= bytes;
 182		msg += bytes;
 
 
 183
 184		vmware_hypercall5(VMW_PORT_CMD_MSG |
 185				  (MSG_TYPE_SENDPAYLOAD << 16),
 186				  word, channel->channel_id << 16,
 187				  channel->cookie_high,
 188				  channel->cookie_low,
 189				  &ecx);
 190	}
 191
 192	return ecx;
 193}
 194
 195/**
 196 * vmw_port_hb_in - Receive the message payload either through the
 197 * high-bandwidth port if available, or through the backdoor otherwise.
 198 * @channel: The rpc channel.
 199 * @reply: Pointer to buffer holding reply.
 200 * @reply_len: Length of the reply.
 201 * @hb: Whether the high-bandwidth port is available.
 202 *
 203 * Return: The port status.
 204 */
 205static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
 206				    unsigned long reply_len, bool hb)
 207{
 208	u32 ebx, ecx, edx;
 209
 210	/* HB port can't access encrypted memory */
 211	if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
 212		vmware_hypercall_hb_in(
 
 
 
 
 
 
 213			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
 214			reply_len,
 215			channel->channel_id << 16,
 216			channel->cookie_high,
 217			(uintptr_t) reply, channel->cookie_low,
 218			&ebx);
 219
 220		return ebx;
 221	}
 222
 223	/* HB port not available. Retrieve the message 4 bytes at a time. */
 224	ecx = MESSAGE_STATUS_SUCCESS << 16;
 225	while (reply_len) {
 226		unsigned int bytes = min_t(unsigned long, reply_len, 4);
 227
 228		vmware_hypercall7(VMW_PORT_CMD_MSG |
 229				  (MSG_TYPE_RECVPAYLOAD << 16),
 230				  MESSAGE_STATUS_SUCCESS,
 231				  channel->channel_id << 16,
 232				  channel->cookie_high,
 233				  channel->cookie_low,
 234				  &ebx, &ecx, &edx);
 
 235
 236		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
 237			break;
 238
 239		memcpy(reply, &ebx, bytes);
 240		reply_len -= bytes;
 241		reply += bytes;
 242	}
 243
 244	return ecx;
 245}
 246
 247
 248/**
 249 * vmw_send_msg: Sends a message to the host
 250 *
 251 * @channel: RPC channel
 252 * @msg: NULL terminated string
 253 *
 254 * Returns: 0 on success
 255 */
 256static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
 257{
 258	u32 ebx, ecx;
 259	size_t msg_len = strlen(msg);
 260	int retries = 0;
 261
 262	while (retries < RETRIES) {
 263		retries++;
 264
 265		vmware_hypercall5(VMW_PORT_CMD_SENDSIZE,
 266				  msg_len, channel->channel_id << 16,
 267				  channel->cookie_high,
 268				  channel->cookie_low,
 269				  &ecx);
 
 
 
 
 270
 271		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 272			/* Expected success. Give up. */
 273			return -EINVAL;
 274		}
 275
 276		/* Send msg */
 277		ebx = vmw_port_hb_out(channel, msg,
 278				      !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
 279
 280		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
 281			return 0;
 282		} else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
 283			/* A checkpoint occurred. Retry. */
 284			continue;
 285		} else {
 286			break;
 287		}
 288	}
 289
 290	return -EINVAL;
 291}
 292STACK_FRAME_NON_STANDARD(vmw_send_msg);
 293
 294
 295/**
 296 * vmw_recv_msg: Receives a message from the host
 297 *
 298 * Note:  It is the caller's responsibility to call kfree() on msg.
 299 *
 300 * @channel:  channel opened by vmw_open_channel
 301 * @msg:  [OUT] message received from the host
 302 * @msg_len: message length
 303 */
 304static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 305			size_t *msg_len)
 306{
 307	u32 ebx, ecx, edx;
 308	char *reply;
 309	size_t reply_len;
 310	int retries = 0;
 311
 312
 313	*msg_len = 0;
 314	*msg = NULL;
 315
 316	while (retries < RETRIES) {
 317		retries++;
 318
 319		vmware_hypercall7(VMW_PORT_CMD_RECVSIZE,
 320				  0, channel->channel_id << 16,
 321				  channel->cookie_high,
 322				  channel->cookie_low,
 323				  &ebx, &ecx, &edx);
 
 
 
 
 324
 325		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 326			DRM_ERROR("Failed to get reply size for host message.\n");
 327			return -EINVAL;
 328		}
 329
 330		/* No reply available.  This is okay. */
 331		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
 332			return 0;
 333
 334		reply_len = ebx;
 335		reply     = kzalloc(reply_len + 1, GFP_KERNEL);
 336		if (!reply) {
 337			DRM_ERROR("Cannot allocate memory for host message reply.\n");
 338			return -ENOMEM;
 339		}
 340
 341
 342		/* Receive buffer */
 343		ebx = vmw_port_hb_in(channel, reply, reply_len,
 344				     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
 345		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
 346			kfree(reply);
 347			reply = NULL;
 348			if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
 349				/* A checkpoint occurred. Retry. */
 350				continue;
 351			}
 352
 353			return -EINVAL;
 354		}
 355
 356		reply[reply_len] = '\0';
 357
 358		vmware_hypercall5(VMW_PORT_CMD_RECVSTATUS,
 359				  MESSAGE_STATUS_SUCCESS,
 360				  channel->channel_id << 16,
 361				  channel->cookie_high,
 362				  channel->cookie_low,
 363				  &ecx);
 
 
 
 
 364
 365		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 366			kfree(reply);
 367			reply = NULL;
 368			if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
 369				/* A checkpoint occurred. Retry. */
 370				continue;
 371			}
 372
 373			return -EINVAL;
 374		}
 375
 376		break;
 377	}
 378
 379	if (!reply)
 380		return -EINVAL;
 381
 382	*msg_len = reply_len;
 383	*msg     = reply;
 384
 385	return 0;
 386}
 387STACK_FRAME_NON_STANDARD(vmw_recv_msg);
 388
 389
 390/**
 391 * vmw_host_get_guestinfo: Gets a GuestInfo parameter
 392 *
 393 * Gets the value of a  GuestInfo.* parameter.  The value returned will be in
 394 * a string, and it is up to the caller to post-process.
 395 *
 396 * @guest_info_param:  Parameter to get, e.g. GuestInfo.svga.gl3
 397 * @buffer: if NULL, *reply_len will contain reply size.
 398 * @length: size of the reply_buf.  Set to size of reply upon return
 399 *
 400 * Returns: 0 on success
 401 */
 402int vmw_host_get_guestinfo(const char *guest_info_param,
 403			   char *buffer, size_t *length)
 404{
 405	struct rpc_channel channel;
 406	char *msg, *reply = NULL;
 407	size_t reply_len = 0;
 408
 409	if (!vmw_msg_enabled)
 410		return -ENODEV;
 411
 412	if (!guest_info_param || !length)
 413		return -EINVAL;
 414
 415	msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
 416	if (!msg) {
 417		DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
 418			  guest_info_param);
 419		return -ENOMEM;
 420	}
 421
 422	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
 423		goto out_open;
 424
 425	if (vmw_send_msg(&channel, msg) ||
 426	    vmw_recv_msg(&channel, (void *) &reply, &reply_len))
 427		goto out_msg;
 428
 429	vmw_close_channel(&channel);
 430	if (buffer && reply && reply_len > 0) {
 431		/* Remove reply code, which are the first 2 characters of
 432		 * the reply
 433		 */
 434		reply_len = max(reply_len - 2, (size_t) 0);
 435		reply_len = min(reply_len, *length);
 436
 437		if (reply_len > 0)
 438			memcpy(buffer, reply + 2, reply_len);
 439	}
 440
 441	*length = reply_len;
 442
 443	kfree(reply);
 444	kfree(msg);
 445
 446	return 0;
 447
 448out_msg:
 449	vmw_close_channel(&channel);
 450	kfree(reply);
 451out_open:
 452	*length = 0;
 453	kfree(msg);
 454	DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
 455
 456	return -EINVAL;
 457}
 458
 459
 460/**
 461 * vmw_host_printf: Sends a log message to the host
 462 *
 463 * @fmt: Regular printf format string and arguments
 464 *
 465 * Returns: 0 on success
 466 */
 467__printf(1, 2)
 468int vmw_host_printf(const char *fmt, ...)
 469{
 470	va_list ap;
 471	struct rpc_channel channel;
 472	char *msg;
 473	char *log;
 474	int ret = 0;
 475
 476	if (!vmw_msg_enabled)
 477		return -ENODEV;
 478
 479	if (!fmt)
 480		return ret;
 481
 482	va_start(ap, fmt);
 483	log = kvasprintf(GFP_KERNEL, fmt, ap);
 484	va_end(ap);
 485	if (!log) {
 486		DRM_ERROR("Cannot allocate memory for the log message.\n");
 487		return -ENOMEM;
 488	}
 489
 490	msg = kasprintf(GFP_KERNEL, "log %s", log);
 491	if (!msg) {
 492		DRM_ERROR("Cannot allocate memory for host log message.\n");
 493		kfree(log);
 494		return -ENOMEM;
 495	}
 496
 497	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
 498		goto out_open;
 499
 500	if (vmw_send_msg(&channel, msg))
 501		goto out_msg;
 502
 503	vmw_close_channel(&channel);
 504	kfree(msg);
 505	kfree(log);
 506
 507	return 0;
 508
 509out_msg:
 510	vmw_close_channel(&channel);
 511out_open:
 512	kfree(msg);
 513	kfree(log);
 514	DRM_ERROR("Failed to send host log message.\n");
 515
 516	return -EINVAL;
 517}
 518
 519
 520/**
 521 * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
 522 *
 523 * Sends a message from user-space to host.
 524 * Can also receive a result from host and return that to user-space.
 525 *
 526 * @dev: Identifies the drm device.
 527 * @data: Pointer to the ioctl argument.
 528 * @file_priv: Identifies the caller.
 529 * Return: Zero on success, negative error code on error.
 530 */
 531
 532int vmw_msg_ioctl(struct drm_device *dev, void *data,
 533		  struct drm_file *file_priv)
 534{
 535	struct drm_vmw_msg_arg *arg =
 536			(struct drm_vmw_msg_arg *)data;
 537	struct rpc_channel channel;
 538	char *msg;
 539	int length;
 540
 541	msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
 542	if (!msg) {
 543		DRM_ERROR("Cannot allocate memory for log message.\n");
 544		return -ENOMEM;
 545	}
 546
 547	length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
 548				   MAX_USER_MSG_LENGTH);
 549	if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
 550		DRM_ERROR("Userspace message access failure.\n");
 551		kfree(msg);
 552		return -EINVAL;
 553	}
 554
 555
 556	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
 557		DRM_ERROR("Failed to open channel.\n");
 558		goto out_open;
 559	}
 560
 561	if (vmw_send_msg(&channel, msg)) {
 562		DRM_ERROR("Failed to send message to host.\n");
 563		goto out_msg;
 564	}
 565
 566	if (!arg->send_only) {
 567		char *reply = NULL;
 568		size_t reply_len = 0;
 569
 570		if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
 571			DRM_ERROR("Failed to receive message from host.\n");
 572			goto out_msg;
 573		}
 574		if (reply && reply_len > 0) {
 575			if (copy_to_user((void __user *)((unsigned long)arg->receive),
 576					 reply, reply_len)) {
 577				DRM_ERROR("Failed to copy message to userspace.\n");
 578				kfree(reply);
 579				goto out_msg;
 580			}
 581			arg->receive_len = (__u32)reply_len;
 582		}
 583		kfree(reply);
 584	}
 585
 586	vmw_close_channel(&channel);
 587	kfree(msg);
 588
 589	return 0;
 590
 591out_msg:
 592	vmw_close_channel(&channel);
 593out_open:
 594	kfree(msg);
 595
 596	return -EINVAL;
 597}
 598
 599/**
 600 * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
 601 *
 602 * @arr: Array to reset.
 603 * @size: Array length.
 604 */
 605static inline void reset_ppn_array(PPN64 *arr, size_t size)
 606{
 607	size_t i;
 608
 609	BUG_ON(!arr || size == 0);
 610
 611	for (i = 0; i < size; ++i)
 612		arr[i] = INVALID_PPN64;
 613}
 614
 615/**
 616 * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
 617 * the hypervisor. All related pages should be subsequently unpinned or freed.
 618 *
 619 */
 620static inline void hypervisor_ppn_reset_all(void)
 621{
 622	vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0);
 
 
 
 
 
 
 623}
 624
 625/**
 626 * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
 627 * hypervisor. Any related userspace pages should be pinned in advance.
 628 *
 629 * @pfn: Physical page number of the instance descriptor
 630 */
 631static inline void hypervisor_ppn_add(PPN64 pfn)
 632{
 633	vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn);
 
 
 
 
 
 
 634}
 635
 636/**
 637 * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
 638 * the hypervisor. All related pages should be subsequently unpinned or freed.
 639 *
 640 * @pfn: Physical page number of the instance descriptor
 641 */
 642static inline void hypervisor_ppn_remove(PPN64 pfn)
 643{
 644	vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn);
 
 
 
 
 
 
 645}
 646
 647#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
 648
 649/* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
 650#define MKSSTAT_KERNEL_PAGES_ORDER 2
 651/* Header to the text description of mksGuestStat instance descriptor */
 652#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
 653
 654/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655 * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
 656 * for the respective mksGuestStat index.
 657 *
 658 * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
 659 * @pstat: Pointer to array of MKSGuestStatCounterTime.
 660 * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
 661 * @pstrs: Pointer to current end of the name/description sequence.
 662 * Return: Pointer to the new end of the names/description sequence.
 663 */
 664
 665static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
 666	MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
 667{
 668	char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
 669	strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
 670	strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
 671
 672	pinfo[stat_idx].name.s = pstrs;
 673	pinfo[stat_idx].description.s = pstrd;
 674	pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
 675	pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
 676
 677	return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
 678}
 679
 680/**
 681 * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
 682 * kernel-internal counters. Adds PFN mapping to the hypervisor.
 683 *
 684 * Create a single mksGuestStat instance descriptor and corresponding structures
 685 * for all kernel-internal counters. The corresponding PFNs are mapped with the
 686 * hypervisor.
 687 *
 688 * @ppage: Output pointer to page containing the instance descriptor.
 689 * Return: Zero on success, negative error code on error.
 690 */
 691
 692static int mksstat_init_kern_id(struct page **ppage)
 693{
 694	MKSGuestStatInstanceDescriptor *pdesc;
 695	MKSGuestStatCounterTime *pstat;
 696	MKSGuestStatInfoEntry *pinfo;
 697	char *pstrs, *pstrs_acc;
 698
 699	/* Allocate pages for the kernel-internal instance descriptor */
 700	struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
 701
 702	if (!page)
 703		return -ENOMEM;
 704
 705	pdesc = page_address(page);
 706	pstat = vmw_mksstat_get_kern_pstat(pdesc);
 707	pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
 708	pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
 709
 710	/* Set up all kernel-internal counters and corresponding structures */
 711	pstrs_acc = pstrs;
 712	pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
 713	pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
 714
 715	/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
 716
 717	BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
 718
 719	/* Set up the kernel-internal instance descriptor */
 720	pdesc->reservedMBZ = 0;
 721	pdesc->statStartVA = (uintptr_t)pstat;
 722	pdesc->strsStartVA = (uintptr_t)pstrs;
 723	pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
 724	pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
 725	pdesc->strsLength = pstrs_acc - pstrs;
 726	snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
 727		MKSSTAT_KERNEL_DESCRIPTION, current->pid);
 728
 729	pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
 730	reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
 731
 732	pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
 733	reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
 734
 735	pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
 736	reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
 737
 738	*ppage = page;
 739
 740	hypervisor_ppn_add((PPN64)page_to_pfn(page));
 741
 742	return 0;
 743}
 744
 745/**
 746 * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
 747 * mksGuestStat instance descriptor.
 748 *
 749 * Find a slot for a single kernel-internal mksGuestStat instance descriptor.
 750 * In case no such was already present, allocate a new one and set up a kernel-
 751 * internal mksGuestStat instance descriptor for the former.
 752 *
 753 * @pid: Process for which a slot is sought.
 754 * @dev_priv: Identifies the drm private device.
 755 * Return: Non-negative slot on success, negative error code on error.
 756 */
 757
 758int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
 759{
 760	const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
 761	size_t i;
 762
 763	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
 764		const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
 765
 766		/* Check if an instance descriptor for this pid is already present */
 767		if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
 768			return (int)slot;
 769
 770		/* Set up a new instance descriptor for this pid */
 771		if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
 772			const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
 773
 774			if (!ret) {
 775				/* Reset top-timer tracking for this slot */
 776				dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
 777
 778				atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
 779				return (int)slot;
 780			}
 781
 782			atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
 783			return ret;
 784		}
 785	}
 786
 787	return -ENOSPC;
 788}
 789
 790#endif
 791
 792/**
 793 * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
 794 * mksGuestStat instance-descriptor page and unpins all related user pages.
 795 *
 796 * Unpin all user pages realated to this instance descriptor and free
 797 * the instance-descriptor page itself.
 798 *
 799 * @page: Page of the instance descriptor.
 800 */
 801
 802static void vmw_mksstat_cleanup_descriptor(struct page *page)
 803{
 804	MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
 805	size_t i;
 806
 807	for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
 808		unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
 809
 810	for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
 811		unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
 812
 813	for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
 814		unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
 815
 816	__free_page(page);
 817}
 818
 819/**
 820 * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
 821 * from the hypervisor.
 822 *
 823 * Discard all hypervisor PFN mappings, containing active mksGuestState instance
 824 * descriptors, unpin the related userspace pages and free the related kernel pages.
 825 *
 826 * @dev_priv: Identifies the drm private device.
 827 * Return: Zero on success, negative error code on error.
 828 */
 829
 830int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
 831{
 832	int ret = 0;
 833	size_t i;
 834
 835	/* Discard all PFN mappings with the hypervisor */
 836	hypervisor_ppn_reset_all();
 837
 838	/* Discard all userspace-originating instance descriptors and unpin all related pages */
 839	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
 840		const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
 841
 842		if (!pid0)
 843			continue;
 844
 845		if (pid0 != MKSSTAT_PID_RESERVED) {
 846			const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
 847
 848			if (!pid1)
 849				continue;
 850
 851			if (pid1 == pid0) {
 852				struct page *const page = dev_priv->mksstat_user_pages[i];
 853
 854				BUG_ON(!page);
 855
 856				dev_priv->mksstat_user_pages[i] = NULL;
 857				atomic_set(&dev_priv->mksstat_user_pids[i], 0);
 858
 859				vmw_mksstat_cleanup_descriptor(page);
 860				continue;
 861			}
 862		}
 863
 864		ret = -EAGAIN;
 865	}
 866
 867#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
 868	/* Discard all kernel-internal instance descriptors and free all related pages */
 869	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
 870		const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
 871
 872		if (!pid0)
 873			continue;
 874
 875		if (pid0 != MKSSTAT_PID_RESERVED) {
 876			const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
 877
 878			if (!pid1)
 879				continue;
 880
 881			if (pid1 == pid0) {
 882				struct page *const page = dev_priv->mksstat_kern_pages[i];
 883
 884				BUG_ON(!page);
 885
 886				dev_priv->mksstat_kern_pages[i] = NULL;
 887				atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
 888
 889				__free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
 890				continue;
 891			}
 892		}
 893
 894		ret = -EAGAIN;
 895	}
 896
 897#endif
 898	return ret;
 899}
 900
 901/**
 902 * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
 903 * from the hypervisor.
 904 *
 905 * Discard all hypervisor PFN mappings, containing active mksGuestStat instance
 906 * descriptors, unpin the related userspace pages and free the related kernel pages.
 907 *
 908 * @dev: Identifies the drm device.
 909 * @data: Pointer to the ioctl argument.
 910 * @file_priv: Identifies the caller; unused.
 911 * Return: Zero on success, negative error code on error.
 912 */
 913
 914int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
 915				struct drm_file *file_priv)
 916{
 917	struct vmw_private *const dev_priv = vmw_priv(dev);
 918	return vmw_mksstat_remove_all(dev_priv);
 919}
 920
 921/**
 922 * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
 923 * instance descriptor and registers that with the hypervisor.
 924 *
 925 * Create a hypervisor PFN mapping, containing a single mksGuestStat instance
 926 * descriptor and pin the corresponding userspace pages.
 927 *
 928 * @dev: Identifies the drm device.
 929 * @data: Pointer to the ioctl argument.
 930 * @file_priv: Identifies the caller; unused.
 931 * Return: Zero on success, negative error code on error.
 932 */
 933
 934int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
 935				struct drm_file *file_priv)
 936{
 937	struct drm_vmw_mksstat_add_arg *arg =
 938		(struct drm_vmw_mksstat_add_arg *) data;
 939
 940	struct vmw_private *const dev_priv = vmw_priv(dev);
 941
 942	const size_t num_pages_stat = PFN_UP(arg->stat_len);
 943	const size_t num_pages_info = PFN_UP(arg->info_len);
 944	const size_t num_pages_strs = PFN_UP(arg->strs_len);
 945	long desc_len;
 946	long nr_pinned_stat;
 947	long nr_pinned_info;
 948	long nr_pinned_strs;
 949	MKSGuestStatInstanceDescriptor *pdesc;
 950	struct page *page = NULL;
 951	struct page **pages_stat = NULL;
 952	struct page **pages_info = NULL;
 953	struct page **pages_strs = NULL;
 954	size_t i, slot;
 955	int ret_err = -ENOMEM;
 956
 957	arg->id = -1;
 958
 959	if (!arg->stat || !arg->info || !arg->strs)
 960		return -EINVAL;
 961
 962	if (!arg->stat_len || !arg->info_len || !arg->strs_len)
 963		return -EINVAL;
 964
 965	if (!arg->description)
 966		return -EINVAL;
 967
 968	if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
 969		num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
 970		num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
 971		return -EINVAL;
 972
 973	/* Find an available slot in the mksGuestStats user array and reserve it */
 974	for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
 975		if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
 976			break;
 977
 978	if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
 979		return -ENOSPC;
 980
 981	BUG_ON(dev_priv->mksstat_user_pages[slot]);
 982
 983	/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
 984	pages_stat = (struct page **)kmalloc_array(
 985		ARRAY_SIZE(pdesc->statPPNs) +
 986		ARRAY_SIZE(pdesc->infoPPNs) +
 987		ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
 988
 989	if (!pages_stat)
 990		goto err_nomem;
 991
 992	pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
 993	pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
 994
 995	/* Allocate a page for the instance descriptor */
 996	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 997
 998	if (!page)
 999		goto err_nomem;
1000
1001	/* Set up the instance descriptor */
1002	pdesc = page_address(page);
1003
1004	pdesc->reservedMBZ = 0;
1005	pdesc->statStartVA = arg->stat;
1006	pdesc->strsStartVA = arg->strs;
1007	pdesc->statLength = arg->stat_len;
1008	pdesc->infoLength = arg->info_len;
1009	pdesc->strsLength = arg->strs_len;
1010	desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
1011		ARRAY_SIZE(pdesc->description) - 1);
1012
1013	if (desc_len < 0) {
1014		ret_err = -EFAULT;
1015		goto err_nomem;
1016	}
1017
1018	reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
1019	reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
1020	reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
1021
1022	/* Pin mksGuestStat user pages and store those in the instance descriptor */
1023	nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat);
1024	if (num_pages_stat != nr_pinned_stat)
1025		goto err_pin_stat;
1026
1027	for (i = 0; i < num_pages_stat; ++i)
1028		pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
1029
1030	nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info);
1031	if (num_pages_info != nr_pinned_info)
1032		goto err_pin_info;
1033
1034	for (i = 0; i < num_pages_info; ++i)
1035		pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
1036
1037	nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs);
1038	if (num_pages_strs != nr_pinned_strs)
1039		goto err_pin_strs;
1040
1041	for (i = 0; i < num_pages_strs; ++i)
1042		pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
1043
1044	/* Send the descriptor to the host via a hypervisor call. The mksGuestStat
1045	   pages will remain in use until the user requests a matching remove stats
1046	   or a stats reset occurs. */
1047	hypervisor_ppn_add((PPN64)page_to_pfn(page));
1048
1049	dev_priv->mksstat_user_pages[slot] = page;
1050	atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
1051
1052	arg->id = slot;
1053
1054	DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
1055
1056	kfree(pages_stat);
1057	return 0;
1058
1059err_pin_strs:
1060	if (nr_pinned_strs > 0)
1061		unpin_user_pages(pages_strs, nr_pinned_strs);
1062
1063err_pin_info:
1064	if (nr_pinned_info > 0)
1065		unpin_user_pages(pages_info, nr_pinned_info);
1066
1067err_pin_stat:
1068	if (nr_pinned_stat > 0)
1069		unpin_user_pages(pages_stat, nr_pinned_stat);
1070
1071err_nomem:
1072	atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1073	if (page)
1074		__free_page(page);
1075	kfree(pages_stat);
1076
1077	return ret_err;
1078}
1079
1080/**
1081 * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
1082 * instance descriptor from the hypervisor.
1083 *
1084 * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
1085 * descriptor and unpin the corresponding userspace pages.
1086 *
1087 * @dev: Identifies the drm device.
1088 * @data: Pointer to the ioctl argument.
1089 * @file_priv: Identifies the caller; unused.
1090 * Return: Zero on success, negative error code on error.
1091 */
1092
1093int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1094				struct drm_file *file_priv)
1095{
1096	struct drm_vmw_mksstat_remove_arg *arg =
1097		(struct drm_vmw_mksstat_remove_arg *) data;
1098
1099	struct vmw_private *const dev_priv = vmw_priv(dev);
1100
1101	const size_t slot = arg->id;
1102	pid_t pgid, pid;
1103
1104	if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
1105		return -EINVAL;
1106
1107	DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
1108
1109	pgid = task_pgrp_vnr(current);
1110	pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
1111
1112	if (!pid)
1113		return 0;
1114
1115	if (pid == pgid) {
1116		struct page *const page = dev_priv->mksstat_user_pages[slot];
1117
1118		BUG_ON(!page);
1119
1120		dev_priv->mksstat_user_pages[slot] = NULL;
1121		atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1122
1123		hypervisor_ppn_remove((PPN64)page_to_pfn(page));
1124
1125		vmw_mksstat_cleanup_descriptor(page);
1126		return 0;
1127	}
1128
1129	return -EAGAIN;
1130}
1131
1132/**
1133 * vmw_disable_backdoor: Disables all backdoor communication
1134 * with the hypervisor.
1135 */
1136void vmw_disable_backdoor(void)
1137{
1138	vmw_msg_enabled = 0;
1139}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial portions
  15 * of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <linux/objtool.h>
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/slab.h>
  31#include <linux/cc_platform.h>
  32
  33#include <asm/hypervisor.h>
  34#include <drm/drm_ioctl.h>
  35
  36#include "vmwgfx_drv.h"
  37#include "vmwgfx_msg_x86.h"
  38#include "vmwgfx_msg_arm64.h"
  39#include "vmwgfx_mksstat.h"
  40
  41#define MESSAGE_STATUS_SUCCESS  0x0001
  42#define MESSAGE_STATUS_DORECV   0x0002
  43#define MESSAGE_STATUS_CPT      0x0010
  44#define MESSAGE_STATUS_HB       0x0080
  45
  46#define RPCI_PROTOCOL_NUM       0x49435052
  47#define GUESTMSG_FLAG_COOKIE    0x80000000
  48
  49#define RETRIES                 3
  50
  51#define VMW_HYPERVISOR_MAGIC    0x564D5868
  52
  53#define VMW_PORT_CMD_MSG        30
  54#define VMW_PORT_CMD_HB_MSG     0
  55#define VMW_PORT_CMD_OPEN_CHANNEL  (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
  56#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
  57#define VMW_PORT_CMD_SENDSIZE   (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
  58#define VMW_PORT_CMD_RECVSIZE   (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
  59#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
  60
  61#define VMW_PORT_CMD_MKS_GUEST_STATS   85
  62#define VMW_PORT_CMD_MKSGS_RESET       (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
  63#define VMW_PORT_CMD_MKSGS_ADD_PPN     (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
  64#define VMW_PORT_CMD_MKSGS_REMOVE_PPN  (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
  65
  66#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
  67
  68#define MAX_USER_MSG_LENGTH	PAGE_SIZE
  69
  70static u32 vmw_msg_enabled = 1;
  71
  72enum rpc_msg_type {
  73	MSG_TYPE_OPEN,
  74	MSG_TYPE_SENDSIZE,
  75	MSG_TYPE_SENDPAYLOAD,
  76	MSG_TYPE_RECVSIZE,
  77	MSG_TYPE_RECVPAYLOAD,
  78	MSG_TYPE_RECVSTATUS,
  79	MSG_TYPE_CLOSE,
  80};
  81
  82struct rpc_channel {
  83	u16 channel_id;
  84	u32 cookie_high;
  85	u32 cookie_low;
  86};
  87
  88#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
  89/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
  90static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
  91{
  92	{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
  93	{ "vmw_cotable_resize", "vmw_cotable_resize" },
  94};
  95#endif
  96
  97/**
  98 * vmw_open_channel
  99 *
 100 * @channel: RPC channel
 101 * @protocol:
 102 *
 103 * Returns: 0 on success
 104 */
 105static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
 106{
 107	unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
 108
 109	VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
 110		(protocol | GUESTMSG_FLAG_COOKIE), si, di,
 111		0,
 112		VMW_HYPERVISOR_MAGIC,
 113		eax, ebx, ecx, edx, si, di);
 114
 115	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
 116		return -EINVAL;
 117
 118	channel->channel_id  = HIGH_WORD(edx);
 119	channel->cookie_high = si;
 120	channel->cookie_low  = di;
 121
 122	return 0;
 123}
 124
 125
 126
 127/**
 128 * vmw_close_channel
 129 *
 130 * @channel: RPC channel
 131 *
 132 * Returns: 0 on success
 133 */
 134static int vmw_close_channel(struct rpc_channel *channel)
 135{
 136	unsigned long eax, ebx, ecx, edx, si, di;
 137
 138	/* Set up additional parameters */
 139	si  = channel->cookie_high;
 140	di  = channel->cookie_low;
 141
 142	VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
 143		0, si, di,
 144		channel->channel_id << 16,
 145		VMW_HYPERVISOR_MAGIC,
 146		eax, ebx, ecx, edx, si, di);
 147
 148	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
 149		return -EINVAL;
 150
 151	return 0;
 152}
 153
 154/**
 155 * vmw_port_hb_out - Send the message payload either through the
 156 * high-bandwidth port if available, or through the backdoor otherwise.
 157 * @channel: The rpc channel.
 158 * @msg: NULL-terminated message.
 159 * @hb: Whether the high-bandwidth port is available.
 160 *
 161 * Return: The port status.
 162 */
 163static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
 164				     const char *msg, bool hb)
 165{
 166	unsigned long si, di, eax, ebx, ecx, edx;
 167	unsigned long msg_len = strlen(msg);
 168
 169	/* HB port can't access encrypted memory. */
 170	if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
 171		unsigned long bp = channel->cookie_high;
 172		u32 channel_id = (channel->channel_id << 16);
 173
 174		si = (uintptr_t) msg;
 175		di = channel->cookie_low;
 176
 177		VMW_PORT_HB_OUT(
 178			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
 179			msg_len, si, di,
 180			VMWARE_HYPERVISOR_HB | channel_id |
 181			VMWARE_HYPERVISOR_OUT,
 182			VMW_HYPERVISOR_MAGIC, bp,
 183			eax, ebx, ecx, edx, si, di);
 184
 185		return ebx;
 186	}
 187
 188	/* HB port not available. Send the message 4 bytes at a time. */
 189	ecx = MESSAGE_STATUS_SUCCESS << 16;
 190	while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
 191		unsigned int bytes = min_t(size_t, msg_len, 4);
 192		unsigned long word = 0;
 193
 194		memcpy(&word, msg, bytes);
 195		msg_len -= bytes;
 196		msg += bytes;
 197		si = channel->cookie_high;
 198		di = channel->cookie_low;
 199
 200		VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
 201			 word, si, di,
 202			 channel->channel_id << 16,
 203			 VMW_HYPERVISOR_MAGIC,
 204			 eax, ebx, ecx, edx, si, di);
 
 205	}
 206
 207	return ecx;
 208}
 209
 210/**
 211 * vmw_port_hb_in - Receive the message payload either through the
 212 * high-bandwidth port if available, or through the backdoor otherwise.
 213 * @channel: The rpc channel.
 214 * @reply: Pointer to buffer holding reply.
 215 * @reply_len: Length of the reply.
 216 * @hb: Whether the high-bandwidth port is available.
 217 *
 218 * Return: The port status.
 219 */
 220static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
 221				    unsigned long reply_len, bool hb)
 222{
 223	unsigned long si, di, eax, ebx, ecx, edx;
 224
 225	/* HB port can't access encrypted memory */
 226	if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
 227		unsigned long bp = channel->cookie_low;
 228		u32 channel_id = (channel->channel_id << 16);
 229
 230		si = channel->cookie_high;
 231		di = (uintptr_t) reply;
 232
 233		VMW_PORT_HB_IN(
 234			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
 235			reply_len, si, di,
 236			VMWARE_HYPERVISOR_HB | channel_id,
 237			VMW_HYPERVISOR_MAGIC, bp,
 238			eax, ebx, ecx, edx, si, di);
 
 239
 240		return ebx;
 241	}
 242
 243	/* HB port not available. Retrieve the message 4 bytes at a time. */
 244	ecx = MESSAGE_STATUS_SUCCESS << 16;
 245	while (reply_len) {
 246		unsigned int bytes = min_t(unsigned long, reply_len, 4);
 247
 248		si = channel->cookie_high;
 249		di = channel->cookie_low;
 250
 251		VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
 252			 MESSAGE_STATUS_SUCCESS, si, di,
 253			 channel->channel_id << 16,
 254			 VMW_HYPERVISOR_MAGIC,
 255			 eax, ebx, ecx, edx, si, di);
 256
 257		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
 258			break;
 259
 260		memcpy(reply, &ebx, bytes);
 261		reply_len -= bytes;
 262		reply += bytes;
 263	}
 264
 265	return ecx;
 266}
 267
 268
 269/**
 270 * vmw_send_msg: Sends a message to the host
 271 *
 272 * @channel: RPC channel
 273 * @msg: NULL terminated string
 274 *
 275 * Returns: 0 on success
 276 */
 277static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
 278{
 279	unsigned long eax, ebx, ecx, edx, si, di;
 280	size_t msg_len = strlen(msg);
 281	int retries = 0;
 282
 283	while (retries < RETRIES) {
 284		retries++;
 285
 286		/* Set up additional parameters */
 287		si  = channel->cookie_high;
 288		di  = channel->cookie_low;
 289
 290		VMW_PORT(VMW_PORT_CMD_SENDSIZE,
 291			msg_len, si, di,
 292			channel->channel_id << 16,
 293			VMW_HYPERVISOR_MAGIC,
 294			eax, ebx, ecx, edx, si, di);
 295
 296		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 297			/* Expected success. Give up. */
 298			return -EINVAL;
 299		}
 300
 301		/* Send msg */
 302		ebx = vmw_port_hb_out(channel, msg,
 303				      !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
 304
 305		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
 306			return 0;
 307		} else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
 308			/* A checkpoint occurred. Retry. */
 309			continue;
 310		} else {
 311			break;
 312		}
 313	}
 314
 315	return -EINVAL;
 316}
 317STACK_FRAME_NON_STANDARD(vmw_send_msg);
 318
 319
 320/**
 321 * vmw_recv_msg: Receives a message from the host
 322 *
 323 * Note:  It is the caller's responsibility to call kfree() on msg.
 324 *
 325 * @channel:  channel opened by vmw_open_channel
 326 * @msg:  [OUT] message received from the host
 327 * @msg_len: message length
 328 */
 329static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 330			size_t *msg_len)
 331{
 332	unsigned long eax, ebx, ecx, edx, si, di;
 333	char *reply;
 334	size_t reply_len;
 335	int retries = 0;
 336
 337
 338	*msg_len = 0;
 339	*msg = NULL;
 340
 341	while (retries < RETRIES) {
 342		retries++;
 343
 344		/* Set up additional parameters */
 345		si  = channel->cookie_high;
 346		di  = channel->cookie_low;
 347
 348		VMW_PORT(VMW_PORT_CMD_RECVSIZE,
 349			0, si, di,
 350			channel->channel_id << 16,
 351			VMW_HYPERVISOR_MAGIC,
 352			eax, ebx, ecx, edx, si, di);
 353
 354		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 355			DRM_ERROR("Failed to get reply size for host message.\n");
 356			return -EINVAL;
 357		}
 358
 359		/* No reply available.  This is okay. */
 360		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
 361			return 0;
 362
 363		reply_len = ebx;
 364		reply     = kzalloc(reply_len + 1, GFP_KERNEL);
 365		if (!reply) {
 366			DRM_ERROR("Cannot allocate memory for host message reply.\n");
 367			return -ENOMEM;
 368		}
 369
 370
 371		/* Receive buffer */
 372		ebx = vmw_port_hb_in(channel, reply, reply_len,
 373				     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
 374		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
 375			kfree(reply);
 376			reply = NULL;
 377			if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
 378				/* A checkpoint occurred. Retry. */
 379				continue;
 380			}
 381
 382			return -EINVAL;
 383		}
 384
 385		reply[reply_len] = '\0';
 386
 387
 388		/* Ack buffer */
 389		si  = channel->cookie_high;
 390		di  = channel->cookie_low;
 391
 392		VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
 393			MESSAGE_STATUS_SUCCESS, si, di,
 394			channel->channel_id << 16,
 395			VMW_HYPERVISOR_MAGIC,
 396			eax, ebx, ecx, edx, si, di);
 397
 398		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 399			kfree(reply);
 400			reply = NULL;
 401			if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
 402				/* A checkpoint occurred. Retry. */
 403				continue;
 404			}
 405
 406			return -EINVAL;
 407		}
 408
 409		break;
 410	}
 411
 412	if (!reply)
 413		return -EINVAL;
 414
 415	*msg_len = reply_len;
 416	*msg     = reply;
 417
 418	return 0;
 419}
 420STACK_FRAME_NON_STANDARD(vmw_recv_msg);
 421
 422
 423/**
 424 * vmw_host_get_guestinfo: Gets a GuestInfo parameter
 425 *
 426 * Gets the value of a  GuestInfo.* parameter.  The value returned will be in
 427 * a string, and it is up to the caller to post-process.
 428 *
 429 * @guest_info_param:  Parameter to get, e.g. GuestInfo.svga.gl3
 430 * @buffer: if NULL, *reply_len will contain reply size.
 431 * @length: size of the reply_buf.  Set to size of reply upon return
 432 *
 433 * Returns: 0 on success
 434 */
 435int vmw_host_get_guestinfo(const char *guest_info_param,
 436			   char *buffer, size_t *length)
 437{
 438	struct rpc_channel channel;
 439	char *msg, *reply = NULL;
 440	size_t reply_len = 0;
 441
 442	if (!vmw_msg_enabled)
 443		return -ENODEV;
 444
 445	if (!guest_info_param || !length)
 446		return -EINVAL;
 447
 448	msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
 449	if (!msg) {
 450		DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
 451			  guest_info_param);
 452		return -ENOMEM;
 453	}
 454
 455	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
 456		goto out_open;
 457
 458	if (vmw_send_msg(&channel, msg) ||
 459	    vmw_recv_msg(&channel, (void *) &reply, &reply_len))
 460		goto out_msg;
 461
 462	vmw_close_channel(&channel);
 463	if (buffer && reply && reply_len > 0) {
 464		/* Remove reply code, which are the first 2 characters of
 465		 * the reply
 466		 */
 467		reply_len = max(reply_len - 2, (size_t) 0);
 468		reply_len = min(reply_len, *length);
 469
 470		if (reply_len > 0)
 471			memcpy(buffer, reply + 2, reply_len);
 472	}
 473
 474	*length = reply_len;
 475
 476	kfree(reply);
 477	kfree(msg);
 478
 479	return 0;
 480
 481out_msg:
 482	vmw_close_channel(&channel);
 483	kfree(reply);
 484out_open:
 485	*length = 0;
 486	kfree(msg);
 487	DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
 488
 489	return -EINVAL;
 490}
 491
 492
 493/**
 494 * vmw_host_printf: Sends a log message to the host
 495 *
 496 * @fmt: Regular printf format string and arguments
 497 *
 498 * Returns: 0 on success
 499 */
 500__printf(1, 2)
 501int vmw_host_printf(const char *fmt, ...)
 502{
 503	va_list ap;
 504	struct rpc_channel channel;
 505	char *msg;
 506	char *log;
 507	int ret = 0;
 508
 509	if (!vmw_msg_enabled)
 510		return -ENODEV;
 511
 512	if (!fmt)
 513		return ret;
 514
 515	va_start(ap, fmt);
 516	log = kvasprintf(GFP_KERNEL, fmt, ap);
 517	va_end(ap);
 518	if (!log) {
 519		DRM_ERROR("Cannot allocate memory for the log message.\n");
 520		return -ENOMEM;
 521	}
 522
 523	msg = kasprintf(GFP_KERNEL, "log %s", log);
 524	if (!msg) {
 525		DRM_ERROR("Cannot allocate memory for host log message.\n");
 526		kfree(log);
 527		return -ENOMEM;
 528	}
 529
 530	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
 531		goto out_open;
 532
 533	if (vmw_send_msg(&channel, msg))
 534		goto out_msg;
 535
 536	vmw_close_channel(&channel);
 537	kfree(msg);
 538	kfree(log);
 539
 540	return 0;
 541
 542out_msg:
 543	vmw_close_channel(&channel);
 544out_open:
 545	kfree(msg);
 546	kfree(log);
 547	DRM_ERROR("Failed to send host log message.\n");
 548
 549	return -EINVAL;
 550}
 551
 552
 553/**
 554 * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
 555 *
 556 * Sends a message from user-space to host.
 557 * Can also receive a result from host and return that to user-space.
 558 *
 559 * @dev: Identifies the drm device.
 560 * @data: Pointer to the ioctl argument.
 561 * @file_priv: Identifies the caller.
 562 * Return: Zero on success, negative error code on error.
 563 */
 564
 565int vmw_msg_ioctl(struct drm_device *dev, void *data,
 566		  struct drm_file *file_priv)
 567{
 568	struct drm_vmw_msg_arg *arg =
 569			(struct drm_vmw_msg_arg *)data;
 570	struct rpc_channel channel;
 571	char *msg;
 572	int length;
 573
 574	msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
 575	if (!msg) {
 576		DRM_ERROR("Cannot allocate memory for log message.\n");
 577		return -ENOMEM;
 578	}
 579
 580	length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
 581				   MAX_USER_MSG_LENGTH);
 582	if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
 583		DRM_ERROR("Userspace message access failure.\n");
 584		kfree(msg);
 585		return -EINVAL;
 586	}
 587
 588
 589	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
 590		DRM_ERROR("Failed to open channel.\n");
 591		goto out_open;
 592	}
 593
 594	if (vmw_send_msg(&channel, msg)) {
 595		DRM_ERROR("Failed to send message to host.\n");
 596		goto out_msg;
 597	}
 598
 599	if (!arg->send_only) {
 600		char *reply = NULL;
 601		size_t reply_len = 0;
 602
 603		if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
 604			DRM_ERROR("Failed to receive message from host.\n");
 605			goto out_msg;
 606		}
 607		if (reply && reply_len > 0) {
 608			if (copy_to_user((void __user *)((unsigned long)arg->receive),
 609					 reply, reply_len)) {
 610				DRM_ERROR("Failed to copy message to userspace.\n");
 611				kfree(reply);
 612				goto out_msg;
 613			}
 614			arg->receive_len = (__u32)reply_len;
 615		}
 616		kfree(reply);
 617	}
 618
 619	vmw_close_channel(&channel);
 620	kfree(msg);
 621
 622	return 0;
 623
 624out_msg:
 625	vmw_close_channel(&channel);
 626out_open:
 627	kfree(msg);
 628
 629	return -EINVAL;
 630}
 631
 632/**
 633 * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
 634 *
 635 * @arr: Array to reset.
 636 * @size: Array length.
 637 */
 638static inline void reset_ppn_array(PPN64 *arr, size_t size)
 639{
 640	size_t i;
 641
 642	BUG_ON(!arr || size == 0);
 643
 644	for (i = 0; i < size; ++i)
 645		arr[i] = INVALID_PPN64;
 646}
 647
 648/**
 649 * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
 650 * the hypervisor. All related pages should be subsequently unpinned or freed.
 651 *
 652 */
 653static inline void hypervisor_ppn_reset_all(void)
 654{
 655	unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
 656
 657	VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
 658		0, si, di,
 659		0,
 660		VMW_HYPERVISOR_MAGIC,
 661		eax, ebx, ecx, edx, si, di);
 662}
 663
 664/**
 665 * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
 666 * hypervisor. Any related userspace pages should be pinned in advance.
 667 *
 668 * @pfn: Physical page number of the instance descriptor
 669 */
 670static inline void hypervisor_ppn_add(PPN64 pfn)
 671{
 672	unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
 673
 674	VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
 675		(unsigned long)pfn, si, di,
 676		0,
 677		VMW_HYPERVISOR_MAGIC,
 678		eax, ebx, ecx, edx, si, di);
 679}
 680
 681/**
 682 * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
 683 * the hypervisor. All related pages should be subsequently unpinned or freed.
 684 *
 685 * @pfn: Physical page number of the instance descriptor
 686 */
 687static inline void hypervisor_ppn_remove(PPN64 pfn)
 688{
 689	unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
 690
 691	VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
 692		(unsigned long)pfn, si, di,
 693		0,
 694		VMW_HYPERVISOR_MAGIC,
 695		eax, ebx, ecx, edx, si, di);
 696}
 697
 698#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
 699
 700/* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
 701#define MKSSTAT_KERNEL_PAGES_ORDER 2
 702/* Header to the text description of mksGuestStat instance descriptor */
 703#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
 704
 705/**
 706 * mksstat_init_record: Initializes an MKSGuestStatCounter-based record
 707 * for the respective mksGuestStat index.
 708 *
 709 * @stat_idx: Index of the MKSGuestStatCounter-based mksGuestStat record.
 710 * @pstat: Pointer to array of MKSGuestStatCounterTime.
 711 * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
 712 * @pstrs: Pointer to current end of the name/description sequence.
 713 * Return: Pointer to the new end of the names/description sequence.
 714 */
 715
 716static inline char *mksstat_init_record(mksstat_kern_stats_t stat_idx,
 717	MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
 718{
 719	char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
 720	strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
 721	strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
 722
 723	pinfo[stat_idx].name.s = pstrs;
 724	pinfo[stat_idx].description.s = pstrd;
 725	pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_NONE;
 726	pinfo[stat_idx].stat.counter = (MKSGuestStatCounter *)&pstat[stat_idx];
 727
 728	return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
 729}
 730
 731/**
 732 * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
 733 * for the respective mksGuestStat index.
 734 *
 735 * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
 736 * @pstat: Pointer to array of MKSGuestStatCounterTime.
 737 * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
 738 * @pstrs: Pointer to current end of the name/description sequence.
 739 * Return: Pointer to the new end of the names/description sequence.
 740 */
 741
 742static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
 743	MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
 744{
 745	char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
 746	strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
 747	strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
 748
 749	pinfo[stat_idx].name.s = pstrs;
 750	pinfo[stat_idx].description.s = pstrd;
 751	pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
 752	pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
 753
 754	return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
 755}
 756
 757/**
 758 * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
 759 * kernel-internal counters. Adds PFN mapping to the hypervisor.
 760 *
 761 * Create a single mksGuestStat instance descriptor and corresponding structures
 762 * for all kernel-internal counters. The corresponding PFNs are mapped with the
 763 * hypervisor.
 764 *
 765 * @ppage: Output pointer to page containing the instance descriptor.
 766 * Return: Zero on success, negative error code on error.
 767 */
 768
 769static int mksstat_init_kern_id(struct page **ppage)
 770{
 771	MKSGuestStatInstanceDescriptor *pdesc;
 772	MKSGuestStatCounterTime *pstat;
 773	MKSGuestStatInfoEntry *pinfo;
 774	char *pstrs, *pstrs_acc;
 775
 776	/* Allocate pages for the kernel-internal instance descriptor */
 777	struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
 778
 779	if (!page)
 780		return -ENOMEM;
 781
 782	pdesc = page_address(page);
 783	pstat = vmw_mksstat_get_kern_pstat(pdesc);
 784	pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
 785	pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
 786
 787	/* Set up all kernel-internal counters and corresponding structures */
 788	pstrs_acc = pstrs;
 789	pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
 790	pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
 791
 792	/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
 793
 794	BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
 795
 796	/* Set up the kernel-internal instance descriptor */
 797	pdesc->reservedMBZ = 0;
 798	pdesc->statStartVA = (uintptr_t)pstat;
 799	pdesc->strsStartVA = (uintptr_t)pstrs;
 800	pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
 801	pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
 802	pdesc->strsLength = pstrs_acc - pstrs;
 803	snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
 804		MKSSTAT_KERNEL_DESCRIPTION, current->pid);
 805
 806	pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
 807	reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
 808
 809	pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
 810	reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
 811
 812	pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
 813	reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
 814
 815	*ppage = page;
 816
 817	hypervisor_ppn_add((PPN64)page_to_pfn(page));
 818
 819	return 0;
 820}
 821
 822/**
 823 * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
 824 * mksGuestStat instance descriptor.
 825 *
 826 * Find a slot for a single kernel-internal mksGuestStat instance descriptor.
 827 * In case no such was already present, allocate a new one and set up a kernel-
 828 * internal mksGuestStat instance descriptor for the former.
 829 *
 830 * @pid: Process for which a slot is sought.
 831 * @dev_priv: Identifies the drm private device.
 832 * Return: Non-negative slot on success, negative error code on error.
 833 */
 834
 835int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
 836{
 837	const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
 838	size_t i;
 839
 840	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
 841		const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
 842
 843		/* Check if an instance descriptor for this pid is already present */
 844		if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
 845			return (int)slot;
 846
 847		/* Set up a new instance descriptor for this pid */
 848		if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
 849			const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
 850
 851			if (!ret) {
 852				/* Reset top-timer tracking for this slot */
 853				dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
 854
 855				atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
 856				return (int)slot;
 857			}
 858
 859			atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
 860			return ret;
 861		}
 862	}
 863
 864	return -ENOSPC;
 865}
 866
 867#endif
 868
 869/**
 870 * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
 871 * mksGuestStat instance-descriptor page and unpins all related user pages.
 872 *
 873 * Unpin all user pages realated to this instance descriptor and free
 874 * the instance-descriptor page itself.
 875 *
 876 * @page: Page of the instance descriptor.
 877 */
 878
 879static void vmw_mksstat_cleanup_descriptor(struct page *page)
 880{
 881	MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
 882	size_t i;
 883
 884	for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
 885		unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
 886
 887	for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
 888		unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
 889
 890	for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
 891		unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
 892
 893	__free_page(page);
 894}
 895
 896/**
 897 * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
 898 * from the hypervisor.
 899 *
 900 * Discard all hypervisor PFN mappings, containing active mksGuestState instance
 901 * descriptors, unpin the related userspace pages and free the related kernel pages.
 902 *
 903 * @dev_priv: Identifies the drm private device.
 904 * Return: Zero on success, negative error code on error.
 905 */
 906
 907int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
 908{
 909	int ret = 0;
 910	size_t i;
 911
 912	/* Discard all PFN mappings with the hypervisor */
 913	hypervisor_ppn_reset_all();
 914
 915	/* Discard all userspace-originating instance descriptors and unpin all related pages */
 916	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
 917		const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
 918
 919		if (!pid0)
 920			continue;
 921
 922		if (pid0 != MKSSTAT_PID_RESERVED) {
 923			const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
 924
 925			if (!pid1)
 926				continue;
 927
 928			if (pid1 == pid0) {
 929				struct page *const page = dev_priv->mksstat_user_pages[i];
 930
 931				BUG_ON(!page);
 932
 933				dev_priv->mksstat_user_pages[i] = NULL;
 934				atomic_set(&dev_priv->mksstat_user_pids[i], 0);
 935
 936				vmw_mksstat_cleanup_descriptor(page);
 937				continue;
 938			}
 939		}
 940
 941		ret = -EAGAIN;
 942	}
 943
 944#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
 945	/* Discard all kernel-internal instance descriptors and free all related pages */
 946	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
 947		const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
 948
 949		if (!pid0)
 950			continue;
 951
 952		if (pid0 != MKSSTAT_PID_RESERVED) {
 953			const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
 954
 955			if (!pid1)
 956				continue;
 957
 958			if (pid1 == pid0) {
 959				struct page *const page = dev_priv->mksstat_kern_pages[i];
 960
 961				BUG_ON(!page);
 962
 963				dev_priv->mksstat_kern_pages[i] = NULL;
 964				atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
 965
 966				__free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
 967				continue;
 968			}
 969		}
 970
 971		ret = -EAGAIN;
 972	}
 973
 974#endif
 975	return ret;
 976}
 977
 978/**
 979 * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
 980 * from the hypervisor.
 981 *
 982 * Discard all hypervisor PFN mappings, containing active mksGuestStat instance
 983 * descriptors, unpin the related userspace pages and free the related kernel pages.
 984 *
 985 * @dev: Identifies the drm device.
 986 * @data: Pointer to the ioctl argument.
 987 * @file_priv: Identifies the caller; unused.
 988 * Return: Zero on success, negative error code on error.
 989 */
 990
 991int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
 992				struct drm_file *file_priv)
 993{
 994	struct vmw_private *const dev_priv = vmw_priv(dev);
 995	return vmw_mksstat_remove_all(dev_priv);
 996}
 997
 998/**
 999 * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
1000 * instance descriptor and registers that with the hypervisor.
1001 *
1002 * Create a hypervisor PFN mapping, containing a single mksGuestStat instance
1003 * descriptor and pin the corresponding userspace pages.
1004 *
1005 * @dev: Identifies the drm device.
1006 * @data: Pointer to the ioctl argument.
1007 * @file_priv: Identifies the caller; unused.
1008 * Return: Zero on success, negative error code on error.
1009 */
1010
1011int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
1012				struct drm_file *file_priv)
1013{
1014	struct drm_vmw_mksstat_add_arg *arg =
1015		(struct drm_vmw_mksstat_add_arg *) data;
1016
1017	struct vmw_private *const dev_priv = vmw_priv(dev);
1018
1019	const size_t num_pages_stat = PFN_UP(arg->stat_len);
1020	const size_t num_pages_info = PFN_UP(arg->info_len);
1021	const size_t num_pages_strs = PFN_UP(arg->strs_len);
1022	long desc_len;
1023	long nr_pinned_stat;
1024	long nr_pinned_info;
1025	long nr_pinned_strs;
1026	MKSGuestStatInstanceDescriptor *pdesc;
1027	struct page *page = NULL;
1028	struct page **pages_stat = NULL;
1029	struct page **pages_info = NULL;
1030	struct page **pages_strs = NULL;
1031	size_t i, slot;
1032	int ret_err = -ENOMEM;
1033
1034	arg->id = -1;
1035
1036	if (!arg->stat || !arg->info || !arg->strs)
1037		return -EINVAL;
1038
1039	if (!arg->stat_len || !arg->info_len || !arg->strs_len)
1040		return -EINVAL;
1041
1042	if (!arg->description)
1043		return -EINVAL;
1044
1045	if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
1046		num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
1047		num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
1048		return -EINVAL;
1049
1050	/* Find an available slot in the mksGuestStats user array and reserve it */
1051	for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
1052		if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
1053			break;
1054
1055	if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
1056		return -ENOSPC;
1057
1058	BUG_ON(dev_priv->mksstat_user_pages[slot]);
1059
1060	/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
1061	pages_stat = (struct page **)kmalloc_array(
1062		ARRAY_SIZE(pdesc->statPPNs) +
1063		ARRAY_SIZE(pdesc->infoPPNs) +
1064		ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
1065
1066	if (!pages_stat)
1067		goto err_nomem;
1068
1069	pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
1070	pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
1071
1072	/* Allocate a page for the instance descriptor */
1073	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1074
1075	if (!page)
1076		goto err_nomem;
1077
1078	/* Set up the instance descriptor */
1079	pdesc = page_address(page);
1080
1081	pdesc->reservedMBZ = 0;
1082	pdesc->statStartVA = arg->stat;
1083	pdesc->strsStartVA = arg->strs;
1084	pdesc->statLength = arg->stat_len;
1085	pdesc->infoLength = arg->info_len;
1086	pdesc->strsLength = arg->strs_len;
1087	desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
1088		ARRAY_SIZE(pdesc->description) - 1);
1089
1090	if (desc_len < 0) {
1091		ret_err = -EFAULT;
1092		goto err_nomem;
1093	}
1094
1095	reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
1096	reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
1097	reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
1098
1099	/* Pin mksGuestStat user pages and store those in the instance descriptor */
1100	nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat);
1101	if (num_pages_stat != nr_pinned_stat)
1102		goto err_pin_stat;
1103
1104	for (i = 0; i < num_pages_stat; ++i)
1105		pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
1106
1107	nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info);
1108	if (num_pages_info != nr_pinned_info)
1109		goto err_pin_info;
1110
1111	for (i = 0; i < num_pages_info; ++i)
1112		pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
1113
1114	nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs);
1115	if (num_pages_strs != nr_pinned_strs)
1116		goto err_pin_strs;
1117
1118	for (i = 0; i < num_pages_strs; ++i)
1119		pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
1120
1121	/* Send the descriptor to the host via a hypervisor call. The mksGuestStat
1122	   pages will remain in use until the user requests a matching remove stats
1123	   or a stats reset occurs. */
1124	hypervisor_ppn_add((PPN64)page_to_pfn(page));
1125
1126	dev_priv->mksstat_user_pages[slot] = page;
1127	atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
1128
1129	arg->id = slot;
1130
1131	DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
1132
1133	kfree(pages_stat);
1134	return 0;
1135
1136err_pin_strs:
1137	if (nr_pinned_strs > 0)
1138		unpin_user_pages(pages_strs, nr_pinned_strs);
1139
1140err_pin_info:
1141	if (nr_pinned_info > 0)
1142		unpin_user_pages(pages_info, nr_pinned_info);
1143
1144err_pin_stat:
1145	if (nr_pinned_stat > 0)
1146		unpin_user_pages(pages_stat, nr_pinned_stat);
1147
1148err_nomem:
1149	atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1150	if (page)
1151		__free_page(page);
1152	kfree(pages_stat);
1153
1154	return ret_err;
1155}
1156
1157/**
1158 * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
1159 * instance descriptor from the hypervisor.
1160 *
1161 * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
1162 * descriptor and unpin the corresponding userspace pages.
1163 *
1164 * @dev: Identifies the drm device.
1165 * @data: Pointer to the ioctl argument.
1166 * @file_priv: Identifies the caller; unused.
1167 * Return: Zero on success, negative error code on error.
1168 */
1169
1170int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1171				struct drm_file *file_priv)
1172{
1173	struct drm_vmw_mksstat_remove_arg *arg =
1174		(struct drm_vmw_mksstat_remove_arg *) data;
1175
1176	struct vmw_private *const dev_priv = vmw_priv(dev);
1177
1178	const size_t slot = arg->id;
1179	pid_t pgid, pid;
1180
1181	if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
1182		return -EINVAL;
1183
1184	DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
1185
1186	pgid = task_pgrp_vnr(current);
1187	pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
1188
1189	if (!pid)
1190		return 0;
1191
1192	if (pid == pgid) {
1193		struct page *const page = dev_priv->mksstat_user_pages[slot];
1194
1195		BUG_ON(!page);
1196
1197		dev_priv->mksstat_user_pages[slot] = NULL;
1198		atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1199
1200		hypervisor_ppn_remove((PPN64)page_to_pfn(page));
1201
1202		vmw_mksstat_cleanup_descriptor(page);
1203		return 0;
1204	}
1205
1206	return -EAGAIN;
 
 
 
 
 
 
 
 
 
1207}