Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Ultravisor functions and initialization
   4 *
   5 * Copyright IBM Corp. 2019, 2024
   6 */
   7#define KMSG_COMPONENT "prot_virt"
   8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
   9
  10#include <linux/kernel.h>
  11#include <linux/types.h>
  12#include <linux/sizes.h>
  13#include <linux/bitmap.h>
  14#include <linux/memblock.h>
  15#include <linux/pagemap.h>
  16#include <linux/swap.h>
  17#include <linux/pagewalk.h>
  18#include <asm/facility.h>
  19#include <asm/sections.h>
  20#include <asm/uv.h>
  21
  22#if !IS_ENABLED(CONFIG_KVM)
  23unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  24{
  25	return 0;
  26}
  27
  28int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  29	       unsigned int fault_flags)
  30{
  31	return 0;
  32}
  33#endif
  34
  35/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
 
  36int __bootdata_preserved(prot_virt_guest);
  37EXPORT_SYMBOL(prot_virt_guest);
  38
  39/*
  40 * uv_info contains both host and guest information but it's currently only
  41 * expected to be used within modules if it's the KVM module or for
  42 * any PV guest module.
  43 *
  44 * The kernel itself will write these values once in uv_query_info()
  45 * and then make some of them readable via a sysfs interface.
  46 */
  47struct uv_info __bootdata_preserved(uv_info);
  48EXPORT_SYMBOL(uv_info);
  49
 
  50int __bootdata_preserved(prot_virt_host);
  51EXPORT_SYMBOL(prot_virt_host);
 
  52
  53static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
  54{
  55	struct uv_cb_init uvcb = {
  56		.header.cmd = UVC_CMD_INIT_UV,
  57		.header.len = sizeof(uvcb),
  58		.stor_origin = stor_base,
  59		.stor_len = stor_len,
  60	};
  61
  62	if (uv_call(0, (uint64_t)&uvcb)) {
  63		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
  64		       uvcb.header.rc, uvcb.header.rrc);
  65		return -1;
  66	}
  67	return 0;
  68}
  69
  70void __init setup_uv(void)
  71{
  72	void *uv_stor_base;
  73
  74	if (!is_prot_virt_host())
  75		return;
  76
  77	uv_stor_base = memblock_alloc_try_nid(
  78		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
  79		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
  80	if (!uv_stor_base) {
  81		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
  82			uv_info.uv_base_stor_len);
  83		goto fail;
  84	}
  85
  86	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
  87		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
  88		goto fail;
  89	}
  90
  91	pr_info("Reserving %luMB as ultravisor base storage\n",
  92		uv_info.uv_base_stor_len >> 20);
  93	return;
  94fail:
  95	pr_info("Disabling support for protected virtualization");
  96	prot_virt_host = 0;
  97}
  98
  99/*
 100 * Requests the Ultravisor to pin the page in the shared state. This will
 101 * cause an intercept when the guest attempts to unshare the pinned page.
 102 */
 103int uv_pin_shared(unsigned long paddr)
 104{
 105	struct uv_cb_cfs uvcb = {
 106		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
 107		.header.len = sizeof(uvcb),
 108		.paddr = paddr,
 109	};
 110
 111	if (uv_call(0, (u64)&uvcb))
 112		return -EINVAL;
 113	return 0;
 114}
 115EXPORT_SYMBOL_GPL(uv_pin_shared);
 116
 117/*
 118 * Requests the Ultravisor to destroy a guest page and make it
 119 * accessible to the host. The destroy clears the page instead of
 120 * exporting.
 121 *
 122 * @paddr: Absolute host address of page to be destroyed
 123 */
 124static int uv_destroy(unsigned long paddr)
 125{
 126	struct uv_cb_cfs uvcb = {
 127		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
 128		.header.len = sizeof(uvcb),
 129		.paddr = paddr
 130	};
 131
 132	if (uv_call(0, (u64)&uvcb)) {
 133		/*
 134		 * Older firmware uses 107/d as an indication of a non secure
 135		 * page. Let us emulate the newer variant (no-op).
 136		 */
 137		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
 138			return 0;
 139		return -EINVAL;
 140	}
 141	return 0;
 142}
 143
 144/*
 145 * The caller must already hold a reference to the folio
 146 */
 147int uv_destroy_folio(struct folio *folio)
 148{
 
 149	int rc;
 150
 151	/* See gmap_make_secure(): large folios cannot be secure */
 152	if (unlikely(folio_test_large(folio)))
 153		return 0;
 154
 155	folio_get(folio);
 156	rc = uv_destroy(folio_to_phys(folio));
 157	if (!rc)
 158		clear_bit(PG_arch_1, &folio->flags);
 159	folio_put(folio);
 160	return rc;
 161}
 162
 163/*
 164 * The present PTE still indirectly holds a folio reference through the mapping.
 165 */
 166int uv_destroy_pte(pte_t pte)
 167{
 168	VM_WARN_ON(!pte_present(pte));
 169	return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
 170}
 171
 172/*
 173 * Requests the Ultravisor to encrypt a guest page and make it
 174 * accessible to the host for paging (export).
 175 *
 176 * @paddr: Absolute host address of page to be exported
 177 */
 178static int uv_convert_from_secure(unsigned long paddr)
 179{
 180	struct uv_cb_cfs uvcb = {
 181		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
 182		.header.len = sizeof(uvcb),
 183		.paddr = paddr
 184	};
 185
 186	if (uv_call(0, (u64)&uvcb))
 187		return -EINVAL;
 188	return 0;
 189}
 190
 191/*
 192 * The caller must already hold a reference to the folio.
 193 */
 194static int uv_convert_from_secure_folio(struct folio *folio)
 195{
 
 196	int rc;
 197
 198	/* See gmap_make_secure(): large folios cannot be secure */
 199	if (unlikely(folio_test_large(folio)))
 200		return 0;
 201
 202	folio_get(folio);
 203	rc = uv_convert_from_secure(folio_to_phys(folio));
 204	if (!rc)
 205		clear_bit(PG_arch_1, &folio->flags);
 206	folio_put(folio);
 207	return rc;
 208}
 209
 210/*
 211 * The present PTE still indirectly holds a folio reference through the mapping.
 212 */
 213int uv_convert_from_secure_pte(pte_t pte)
 214{
 215	VM_WARN_ON(!pte_present(pte));
 216	return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
 217}
 218
 219/*
 220 * Calculate the expected ref_count for a folio that would otherwise have no
 221 * further pins. This was cribbed from similar functions in other places in
 222 * the kernel, but with some slight modifications. We know that a secure
 223 * folio can not be a large folio, for example.
 224 */
 225static int expected_folio_refs(struct folio *folio)
 226{
 227	int res;
 228
 229	res = folio_mapcount(folio);
 230	if (folio_test_swapcache(folio)) {
 231		res++;
 232	} else if (folio_mapping(folio)) {
 233		res++;
 234		if (folio->private)
 235			res++;
 236	}
 237	return res;
 238}
 239
 240static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
 
 241{
 
 
 242	int expected, cc = 0;
 243
 244	if (folio_test_writeback(folio))
 
 
 
 
 
 
 
 
 245		return -EAGAIN;
 246	expected = expected_folio_refs(folio);
 247	if (!folio_ref_freeze(folio, expected))
 248		return -EBUSY;
 249	set_bit(PG_arch_1, &folio->flags);
 250	/*
 251	 * If the UVC does not succeed or fail immediately, we don't want to
 252	 * loop for long, or we might get stall notifications.
 253	 * On the other hand, this is a complex scenario and we are holding a lot of
 254	 * locks, so we can't easily sleep and reschedule. We try only once,
 255	 * and if the UVC returned busy or partial completion, we return
 256	 * -EAGAIN and we let the callers deal with it.
 257	 */
 258	cc = __uv_call(0, (u64)uvcb);
 259	folio_ref_unfreeze(folio, expected);
 260	/*
 261	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
 262	 * If busy or partially completed, return -EAGAIN.
 263	 */
 264	if (cc == UVC_CC_OK)
 265		return 0;
 266	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
 267		return -EAGAIN;
 268	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
 269}
 270
 271/**
 272 * should_export_before_import - Determine whether an export is needed
 273 * before an import-like operation
 274 * @uvcb: the Ultravisor control block of the UVC to be performed
 275 * @mm: the mm of the process
 276 *
 277 * Returns whether an export is needed before every import-like operation.
 278 * This is needed for shared pages, which don't trigger a secure storage
 279 * exception when accessed from a different guest.
 280 *
 281 * Although considered as one, the Unpin Page UVC is not an actual import,
 282 * so it is not affected.
 283 *
 284 * No export is needed also when there is only one protected VM, because the
 285 * page cannot belong to the wrong VM in that case (there is no "other VM"
 286 * it can belong to).
 287 *
 288 * Return: true if an export is needed before every import, otherwise false.
 289 */
 290static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
 291{
 292	/*
 293	 * The misc feature indicates, among other things, that importing a
 294	 * shared page from a different protected VM will automatically also
 295	 * transfer its ownership.
 296	 */
 297	if (uv_has_feature(BIT_UV_FEAT_MISC))
 298		return false;
 299	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
 300		return false;
 301	return atomic_read(&mm->context.protected_count) > 1;
 302}
 303
 304/*
 305 * Drain LRU caches: the local one on first invocation and the ones of all
 306 * CPUs on successive invocations. Returns "true" on the first invocation.
 307 */
 308static bool drain_lru(bool *drain_lru_called)
 309{
 310	/*
 311	 * If we have tried a local drain and the folio refcount
 312	 * still does not match our expected safe value, try with a
 313	 * system wide drain. This is needed if the pagevecs holding
 314	 * the page are on a different CPU.
 315	 */
 316	if (*drain_lru_called) {
 317		lru_add_drain_all();
 318		/* We give up here, don't retry immediately. */
 319		return false;
 320	}
 321	/*
 322	 * We are here if the folio refcount does not match the
 323	 * expected safe value. The main culprits are usually
 324	 * pagevecs. With lru_add_drain() we drain the pagevecs
 325	 * on the local CPU so that hopefully the refcount will
 326	 * reach the expected safe value.
 327	 */
 328	lru_add_drain();
 329	*drain_lru_called = true;
 330	/* The caller should try again immediately */
 331	return true;
 332}
 333
 334/*
 335 * Requests the Ultravisor to make a page accessible to a guest.
 336 * If it's brought in the first time, it will be cleared. If
 337 * it has been exported before, it will be decrypted and integrity
 338 * checked.
 339 */
 340int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
 341{
 342	struct vm_area_struct *vma;
 343	bool drain_lru_called = false;
 344	spinlock_t *ptelock;
 345	unsigned long uaddr;
 346	struct folio *folio;
 347	pte_t *ptep;
 348	int rc;
 349
 350again:
 351	rc = -EFAULT;
 352	mmap_read_lock(gmap->mm);
 353
 354	uaddr = __gmap_translate(gmap, gaddr);
 355	if (IS_ERR_VALUE(uaddr))
 356		goto out;
 357	vma = vma_lookup(gmap->mm, uaddr);
 358	if (!vma)
 359		goto out;
 360	/*
 361	 * Secure pages cannot be huge and userspace should not combine both.
 362	 * In case userspace does it anyway this will result in an -EFAULT for
 363	 * the unpack. The guest is thus never reaching secure mode. If
 364	 * userspace is playing dirty tricky with mapping huge pages later
 365	 * on this will result in a segmentation fault.
 366	 */
 367	if (is_vm_hugetlb_page(vma))
 368		goto out;
 369
 370	rc = -ENXIO;
 371	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
 372	if (!ptep)
 373		goto out;
 374	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
 375		folio = page_folio(pte_page(*ptep));
 376		rc = -EAGAIN;
 377		if (folio_test_large(folio)) {
 378			rc = -E2BIG;
 379		} else if (folio_trylock(folio)) {
 380			if (should_export_before_import(uvcb, gmap->mm))
 381				uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
 382			rc = make_folio_secure(folio, uvcb);
 383			folio_unlock(folio);
 384		}
 385
 386		/*
 387		 * Once we drop the PTL, the folio may get unmapped and
 388		 * freed immediately. We need a temporary reference.
 389		 */
 390		if (rc == -EAGAIN || rc == -E2BIG)
 391			folio_get(folio);
 392	}
 393	pte_unmap_unlock(ptep, ptelock);
 
 394out:
 395	mmap_read_unlock(gmap->mm);
 396
 397	switch (rc) {
 398	case -E2BIG:
 399		folio_lock(folio);
 400		rc = split_folio(folio);
 401		folio_unlock(folio);
 402		folio_put(folio);
 403
 404		switch (rc) {
 405		case 0:
 406			/* Splitting succeeded, try again immediately. */
 407			goto again;
 408		case -EAGAIN:
 409			/* Additional folio references. */
 410			if (drain_lru(&drain_lru_called))
 411				goto again;
 412			return -EAGAIN;
 413		case -EBUSY:
 414			/* Unexpected race. */
 415			return -EAGAIN;
 416		}
 417		WARN_ON_ONCE(1);
 418		return -ENXIO;
 419	case -EAGAIN:
 420		/*
 421		 * If we are here because the UVC returned busy or partial
 422		 * completion, this is just a useless check, but it is safe.
 423		 */
 424		folio_wait_writeback(folio);
 425		folio_put(folio);
 426		return -EAGAIN;
 427	case -EBUSY:
 428		/* Additional folio references. */
 429		if (drain_lru(&drain_lru_called))
 430			goto again;
 431		return -EAGAIN;
 432	case -ENXIO:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
 434			return -EFAULT;
 435		return -EAGAIN;
 436	}
 437	return rc;
 438}
 439EXPORT_SYMBOL_GPL(gmap_make_secure);
 440
 441int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
 442{
 443	struct uv_cb_cts uvcb = {
 444		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
 445		.header.len = sizeof(uvcb),
 446		.guest_handle = gmap->guest_handle,
 447		.gaddr = gaddr,
 448	};
 449
 450	return gmap_make_secure(gmap, gaddr, &uvcb);
 451}
 452EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
 453
 454/**
 455 * gmap_destroy_page - Destroy a guest page.
 456 * @gmap: the gmap of the guest
 457 * @gaddr: the guest address to destroy
 458 *
 459 * An attempt will be made to destroy the given guest page. If the attempt
 460 * fails, an attempt is made to export the page. If both attempts fail, an
 461 * appropriate error is returned.
 462 */
 463int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
 464{
 465	struct vm_area_struct *vma;
 466	struct folio_walk fw;
 467	unsigned long uaddr;
 468	struct folio *folio;
 469	int rc;
 470
 471	rc = -EFAULT;
 472	mmap_read_lock(gmap->mm);
 473
 474	uaddr = __gmap_translate(gmap, gaddr);
 475	if (IS_ERR_VALUE(uaddr))
 476		goto out;
 477	vma = vma_lookup(gmap->mm, uaddr);
 478	if (!vma)
 479		goto out;
 480	/*
 481	 * Huge pages should not be able to become secure
 482	 */
 483	if (is_vm_hugetlb_page(vma))
 484		goto out;
 485
 486	rc = 0;
 487	folio = folio_walk_start(&fw, vma, uaddr, 0);
 488	if (!folio)
 
 489		goto out;
 490	/*
 491	 * See gmap_make_secure(): large folios cannot be secure. Small
 492	 * folio implies FW_LEVEL_PTE.
 493	 */
 494	if (folio_test_large(folio) || !pte_write(fw.pte))
 495		goto out_walk_end;
 496	rc = uv_destroy_folio(folio);
 497	/*
 498	 * Fault handlers can race; it is possible that two CPUs will fault
 499	 * on the same secure page. One CPU can destroy the page, reboot,
 500	 * re-enter secure mode and import it, while the second CPU was
 501	 * stuck at the beginning of the handler. At some point the second
 502	 * CPU will be able to progress, and it will not be able to destroy
 503	 * the page. In that case we do not want to terminate the process,
 504	 * we instead try to export the page.
 505	 */
 506	if (rc)
 507		rc = uv_convert_from_secure_folio(folio);
 508out_walk_end:
 509	folio_walk_end(&fw, vma);
 510out:
 511	mmap_read_unlock(gmap->mm);
 512	return rc;
 513}
 514EXPORT_SYMBOL_GPL(gmap_destroy_page);
 515
 516/*
 517 * To be called with the folio locked or with an extra reference! This will
 518 * prevent gmap_make_secure from touching the folio concurrently. Having 2
 519 * parallel arch_make_folio_accessible is fine, as the UV calls will become a
 520 * no-op if the folio is already exported.
 521 */
 522int arch_make_folio_accessible(struct folio *folio)
 523{
 524	int rc = 0;
 525
 526	/* See gmap_make_secure(): large folios cannot be secure */
 527	if (unlikely(folio_test_large(folio)))
 528		return 0;
 529
 530	/*
 531	 * PG_arch_1 is used in 2 places:
 532	 * 1. for storage keys of hugetlb folios and KVM
 533	 * 2. As an indication that this small folio might be secure. This can
 
 534	 *    overindicate, e.g. we set the bit before calling
 535	 *    convert_to_secure.
 536	 * As secure pages are never large folios, both variants can co-exists.
 537	 */
 538	if (!test_bit(PG_arch_1, &folio->flags))
 539		return 0;
 540
 541	rc = uv_pin_shared(folio_to_phys(folio));
 542	if (!rc) {
 543		clear_bit(PG_arch_1, &folio->flags);
 544		return 0;
 545	}
 546
 547	rc = uv_convert_from_secure(folio_to_phys(folio));
 548	if (!rc) {
 549		clear_bit(PG_arch_1, &folio->flags);
 550		return 0;
 551	}
 552
 553	return rc;
 554}
 555EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
 556
 
 
 
 557static ssize_t uv_query_facilities(struct kobject *kobj,
 558				   struct kobj_attribute *attr, char *buf)
 559{
 560	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
 561			  uv_info.inst_calls_list[0],
 562			  uv_info.inst_calls_list[1],
 563			  uv_info.inst_calls_list[2],
 564			  uv_info.inst_calls_list[3]);
 565}
 566
 567static struct kobj_attribute uv_query_facilities_attr =
 568	__ATTR(facilities, 0444, uv_query_facilities, NULL);
 569
 570static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
 571					struct kobj_attribute *attr, char *buf)
 572{
 573	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
 574}
 575
 576static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
 577	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
 578
 579static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
 580					struct kobj_attribute *attr, char *buf)
 581{
 582	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
 583}
 584
 585static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
 586	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
 587
 588static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
 589				     struct kobj_attribute *attr, char *buf)
 590{
 591	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
 
 592}
 593
 594static struct kobj_attribute uv_query_dump_cpu_len_attr =
 595	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
 596
 597static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
 598					       struct kobj_attribute *attr, char *buf)
 599{
 600	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
 
 601}
 602
 603static struct kobj_attribute uv_query_dump_storage_state_len_attr =
 604	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
 605
 606static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
 607					  struct kobj_attribute *attr, char *buf)
 608{
 609	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
 
 610}
 611
 612static struct kobj_attribute uv_query_dump_finalize_len_attr =
 613	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
 614
 615static ssize_t uv_query_feature_indications(struct kobject *kobj,
 616					    struct kobj_attribute *attr, char *buf)
 617{
 618	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
 619}
 620
 621static struct kobj_attribute uv_query_feature_indications_attr =
 622	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
 623
 624static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
 625				       struct kobj_attribute *attr, char *buf)
 626{
 627	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
 
 628}
 629
 630static struct kobj_attribute uv_query_max_guest_cpus_attr =
 631	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
 632
 633static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
 634				      struct kobj_attribute *attr, char *buf)
 635{
 636	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
 
 637}
 638
 639static struct kobj_attribute uv_query_max_guest_vms_attr =
 640	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
 641
 642static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
 643				       struct kobj_attribute *attr, char *buf)
 644{
 645	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
 
 646}
 647
 648static struct kobj_attribute uv_query_max_guest_addr_attr =
 649	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
 650
 651static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
 652					     struct kobj_attribute *attr, char *buf)
 653{
 654	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
 655}
 656
 657static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
 658	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
 659
 660static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
 661					struct kobj_attribute *attr, char *buf)
 662{
 663	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
 664}
 665
 666static struct kobj_attribute uv_query_supp_att_pflags_attr =
 667	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
 668
 669static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
 670						struct kobj_attribute *attr, char *buf)
 671{
 672	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
 673}
 674
 675static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
 676	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
 677
 678static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
 679					    struct kobj_attribute *attr, char *buf)
 680{
 681	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
 682}
 683
 684static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
 685	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
 686
 687static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
 688					  struct kobj_attribute *attr, char *buf)
 689{
 690	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
 691}
 692
 693static struct kobj_attribute uv_query_supp_secret_types_attr =
 694	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
 695
 696static ssize_t uv_query_max_secrets(struct kobject *kobj,
 697				    struct kobj_attribute *attr, char *buf)
 698{
 699	return sysfs_emit(buf, "%d\n",
 700			  uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
 701}
 702
 703static struct kobj_attribute uv_query_max_secrets_attr =
 704	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
 705
 706static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
 707					 struct kobj_attribute *attr, char *buf)
 708{
 709	return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
 710}
 711
 712static struct kobj_attribute uv_query_max_retr_secrets_attr =
 713	__ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
 714
 715static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
 716					  struct kobj_attribute *attr,
 717					  char *buf)
 718{
 719	return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
 720}
 721
 722static struct kobj_attribute uv_query_max_assoc_secrets_attr =
 723	__ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
 724
 725static struct attribute *uv_query_attrs[] = {
 726	&uv_query_facilities_attr.attr,
 727	&uv_query_feature_indications_attr.attr,
 728	&uv_query_max_guest_cpus_attr.attr,
 729	&uv_query_max_guest_vms_attr.attr,
 730	&uv_query_max_guest_addr_attr.attr,
 731	&uv_query_supp_se_hdr_ver_attr.attr,
 732	&uv_query_supp_se_hdr_pcf_attr.attr,
 733	&uv_query_dump_storage_state_len_attr.attr,
 734	&uv_query_dump_finalize_len_attr.attr,
 735	&uv_query_dump_cpu_len_attr.attr,
 736	&uv_query_supp_att_req_hdr_ver_attr.attr,
 737	&uv_query_supp_att_pflags_attr.attr,
 738	&uv_query_supp_add_secret_req_ver_attr.attr,
 739	&uv_query_supp_add_secret_pcf_attr.attr,
 740	&uv_query_supp_secret_types_attr.attr,
 741	&uv_query_max_secrets_attr.attr,
 742	&uv_query_max_assoc_secrets_attr.attr,
 743	&uv_query_max_retr_secrets_attr.attr,
 744	NULL,
 745};
 746
 747static inline struct uv_cb_query_keys uv_query_keys(void)
 748{
 749	struct uv_cb_query_keys uvcb = {
 750		.header.cmd = UVC_CMD_QUERY_KEYS,
 751		.header.len = sizeof(uvcb)
 752	};
 753
 754	uv_call(0, (uint64_t)&uvcb);
 755	return uvcb;
 756}
 757
 758static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
 759{
 760	return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
 761			    hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
 762}
 763
 764static ssize_t uv_keys_host_key(struct kobject *kobj,
 765				struct kobj_attribute *attr, char *buf)
 766{
 767	struct uv_cb_query_keys uvcb = uv_query_keys();
 768
 769	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
 770}
 771
 772static struct kobj_attribute uv_keys_host_key_attr =
 773	__ATTR(host_key, 0444, uv_keys_host_key, NULL);
 774
 775static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
 776				       struct kobj_attribute *attr, char *buf)
 777{
 778	struct uv_cb_query_keys uvcb = uv_query_keys();
 779
 780	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
 781}
 782
 783static struct kobj_attribute uv_keys_backup_host_key_attr =
 784	__ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
 785
 786static ssize_t uv_keys_all(struct kobject *kobj,
 787			   struct kobj_attribute *attr, char *buf)
 788{
 789	struct uv_cb_query_keys uvcb = uv_query_keys();
 790	ssize_t len = 0;
 791	int i;
 792
 793	for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
 794		len += emit_hash(uvcb.key_hashes + i, buf, len);
 795
 796	return len;
 797}
 798
 799static struct kobj_attribute uv_keys_all_attr =
 800	__ATTR(all, 0444, uv_keys_all, NULL);
 801
 802static struct attribute_group uv_query_attr_group = {
 803	.attrs = uv_query_attrs,
 804};
 805
 806static struct attribute *uv_keys_attrs[] = {
 807	&uv_keys_host_key_attr.attr,
 808	&uv_keys_backup_host_key_attr.attr,
 809	&uv_keys_all_attr.attr,
 810	NULL,
 811};
 812
 813static struct attribute_group uv_keys_attr_group = {
 814	.attrs = uv_keys_attrs,
 815};
 816
 817static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
 818				     struct kobj_attribute *attr, char *buf)
 819{
 820	return sysfs_emit(buf, "%d\n", prot_virt_guest);
 
 
 
 
 
 821}
 822
 823static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
 824				    struct kobj_attribute *attr, char *buf)
 825{
 826	return sysfs_emit(buf, "%d\n", prot_virt_host);
 
 
 
 
 
 
 827}
 828
 829static struct kobj_attribute uv_prot_virt_guest =
 830	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
 831
 832static struct kobj_attribute uv_prot_virt_host =
 833	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
 834
 835static const struct attribute *uv_prot_virt_attrs[] = {
 836	&uv_prot_virt_guest.attr,
 837	&uv_prot_virt_host.attr,
 838	NULL,
 839};
 840
 841static struct kset *uv_query_kset;
 842static struct kset *uv_keys_kset;
 843static struct kobject *uv_kobj;
 844
 845static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
 846				    struct kset **uv_dir_kset, const char *name)
 847{
 848	struct kset *kset;
 849	int rc;
 850
 851	kset = kset_create_and_add(name, NULL, uv_kobj);
 852	if (!kset)
 853		return -ENOMEM;
 854	*uv_dir_kset = kset;
 855
 856	rc = sysfs_create_group(&kset->kobj, grp);
 857	if (rc)
 858		kset_unregister(kset);
 859	return rc;
 860}
 861
 862static int __init uv_sysfs_init(void)
 863{
 864	int rc = -ENOMEM;
 865
 866	if (!test_facility(158))
 867		return 0;
 868
 869	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
 870	if (!uv_kobj)
 871		return -ENOMEM;
 872
 873	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
 874	if (rc)
 875		goto out_kobj;
 876
 877	rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
 878	if (rc)
 
 879		goto out_ind_files;
 
 880
 881	/* Get installed key hashes if available, ignore any errors */
 882	if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
 883		uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
 884
 885	return 0;
 886
 
 887out_ind_files:
 888	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
 889out_kobj:
 890	kobject_del(uv_kobj);
 891	kobject_put(uv_kobj);
 892	return rc;
 893}
 894device_initcall(uv_sysfs_init);
 895
 896/*
 897 * Find the secret with the secret_id in the provided list.
 898 *
 899 * Context: might sleep.
 900 */
 901static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
 902			       const struct uv_secret_list *list,
 903			       struct uv_secret_list_item_hdr *secret)
 904{
 905	u16 i;
 906
 907	for (i = 0; i < list->total_num_secrets; i++) {
 908		if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
 909			*secret = list->secrets[i].hdr;
 910			return 0;
 911		}
 912	}
 913	return -ENOENT;
 914}
 915
 916/*
 917 * Do the actual search for `uv_get_secret_metadata`.
 918 *
 919 * Context: might sleep.
 920 */
 921static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
 922		       struct uv_secret_list *list,
 923		       struct uv_secret_list_item_hdr *secret)
 924{
 925	u16 start_idx = 0;
 926	u16 list_rc;
 927	int ret;
 928
 929	do {
 930		uv_list_secrets(list, start_idx, &list_rc, NULL);
 931		if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
 932			if (list_rc == UVC_RC_INV_CMD)
 933				return -ENODEV;
 934			else
 935				return -EIO;
 936		}
 937		ret = find_secret_in_page(secret_id, list, secret);
 938		if (ret == 0)
 939			return ret;
 940		start_idx = list->next_secret_idx;
 941	} while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
 942
 943	return -ENOENT;
 944}
 945
 946/**
 947 * uv_get_secret_metadata() - get secret metadata for a given secret id.
 948 * @secret_id: search pattern.
 949 * @secret: output data, containing the secret's metadata.
 950 *
 951 * Search for a secret with the given secret_id in the Ultravisor secret store.
 952 *
 953 * Context: might sleep.
 954 *
 955 * Return:
 956 * * %0:	- Found entry; secret->idx and secret->type are valid.
 957 * * %ENOENT	- No entry found.
 958 * * %ENODEV:	- Not supported: UV not available or command not available.
 959 * * %EIO:	- Other unexpected UV error.
 960 */
 961int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
 962			   struct uv_secret_list_item_hdr *secret)
 963{
 964	struct uv_secret_list *buf;
 965	int rc;
 966
 967	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 968	if (!buf)
 969		return -ENOMEM;
 970	rc = find_secret(secret_id, buf, secret);
 971	kfree(buf);
 972	return rc;
 973}
 974EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
 975
 976/**
 977 * uv_retrieve_secret() - get the secret value for the secret index.
 978 * @secret_idx: Secret index for which the secret should be retrieved.
 979 * @buf: Buffer to store retrieved secret.
 980 * @buf_size: Size of the buffer. The correct buffer size is reported as part of
 981 * the result from `uv_get_secret_metadata`.
 982 *
 983 * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
 984 *
 985 * Context: might sleep.
 986 *
 987 * Return:
 988 * * %0		- Entry found; buffer contains a valid secret.
 989 * * %ENOENT:	- No entry found or secret at the index is non-retrievable.
 990 * * %ENODEV:	- Not supported: UV not available or command not available.
 991 * * %EINVAL:	- Buffer too small for content.
 992 * * %EIO:	- Other unexpected UV error.
 993 */
 994int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
 995{
 996	struct uv_cb_retr_secr uvcb = {
 997		.header.len = sizeof(uvcb),
 998		.header.cmd = UVC_CMD_RETR_SECRET,
 999		.secret_idx = secret_idx,
1000		.buf_addr = (u64)buf,
1001		.buf_size = buf_size,
1002	};
1003
1004	uv_call_sched(0, (u64)&uvcb);
1005
1006	switch (uvcb.header.rc) {
1007	case UVC_RC_EXECUTED:
1008		return 0;
1009	case UVC_RC_INV_CMD:
1010		return -ENODEV;
1011	case UVC_RC_RETR_SECR_STORE_EMPTY:
1012	case UVC_RC_RETR_SECR_INV_SECRET:
1013	case UVC_RC_RETR_SECR_INV_IDX:
1014		return -ENOENT;
1015	case UVC_RC_RETR_SECR_BUF_SMALL:
1016		return -EINVAL;
1017	default:
1018		return -EIO;
1019	}
1020}
1021EXPORT_SYMBOL_GPL(uv_retrieve_secret);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Common Ultravisor functions and initialization
  4 *
  5 * Copyright IBM Corp. 2019, 2020
  6 */
  7#define KMSG_COMPONENT "prot_virt"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/types.h>
 12#include <linux/sizes.h>
 13#include <linux/bitmap.h>
 14#include <linux/memblock.h>
 15#include <linux/pagemap.h>
 16#include <linux/swap.h>
 
 17#include <asm/facility.h>
 18#include <asm/sections.h>
 19#include <asm/uv.h>
 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 21/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
 22#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
 23int __bootdata_preserved(prot_virt_guest);
 24#endif
 25
 
 
 
 
 
 
 
 
 26struct uv_info __bootdata_preserved(uv_info);
 
 27
 28#if IS_ENABLED(CONFIG_KVM)
 29int __bootdata_preserved(prot_virt_host);
 30EXPORT_SYMBOL(prot_virt_host);
 31EXPORT_SYMBOL(uv_info);
 32
 33static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
 34{
 35	struct uv_cb_init uvcb = {
 36		.header.cmd = UVC_CMD_INIT_UV,
 37		.header.len = sizeof(uvcb),
 38		.stor_origin = stor_base,
 39		.stor_len = stor_len,
 40	};
 41
 42	if (uv_call(0, (uint64_t)&uvcb)) {
 43		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
 44		       uvcb.header.rc, uvcb.header.rrc);
 45		return -1;
 46	}
 47	return 0;
 48}
 49
 50void __init setup_uv(void)
 51{
 52	void *uv_stor_base;
 53
 54	if (!is_prot_virt_host())
 55		return;
 56
 57	uv_stor_base = memblock_alloc_try_nid(
 58		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
 59		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 60	if (!uv_stor_base) {
 61		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
 62			uv_info.uv_base_stor_len);
 63		goto fail;
 64	}
 65
 66	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
 67		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
 68		goto fail;
 69	}
 70
 71	pr_info("Reserving %luMB as ultravisor base storage\n",
 72		uv_info.uv_base_stor_len >> 20);
 73	return;
 74fail:
 75	pr_info("Disabling support for protected virtualization");
 76	prot_virt_host = 0;
 77}
 78
 79/*
 80 * Requests the Ultravisor to pin the page in the shared state. This will
 81 * cause an intercept when the guest attempts to unshare the pinned page.
 82 */
 83static int uv_pin_shared(unsigned long paddr)
 84{
 85	struct uv_cb_cfs uvcb = {
 86		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
 87		.header.len = sizeof(uvcb),
 88		.paddr = paddr,
 89	};
 90
 91	if (uv_call(0, (u64)&uvcb))
 92		return -EINVAL;
 93	return 0;
 94}
 
 95
 96/*
 97 * Requests the Ultravisor to destroy a guest page and make it
 98 * accessible to the host. The destroy clears the page instead of
 99 * exporting.
100 *
101 * @paddr: Absolute host address of page to be destroyed
102 */
103static int uv_destroy_page(unsigned long paddr)
104{
105	struct uv_cb_cfs uvcb = {
106		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
107		.header.len = sizeof(uvcb),
108		.paddr = paddr
109	};
110
111	if (uv_call(0, (u64)&uvcb)) {
112		/*
113		 * Older firmware uses 107/d as an indication of a non secure
114		 * page. Let us emulate the newer variant (no-op).
115		 */
116		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
117			return 0;
118		return -EINVAL;
119	}
120	return 0;
121}
122
123/*
124 * The caller must already hold a reference to the page
125 */
126int uv_destroy_owned_page(unsigned long paddr)
127{
128	struct page *page = phys_to_page(paddr);
129	int rc;
130
131	get_page(page);
132	rc = uv_destroy_page(paddr);
 
 
 
 
133	if (!rc)
134		clear_bit(PG_arch_1, &page->flags);
135	put_page(page);
136	return rc;
137}
138
139/*
 
 
 
 
 
 
 
 
 
140 * Requests the Ultravisor to encrypt a guest page and make it
141 * accessible to the host for paging (export).
142 *
143 * @paddr: Absolute host address of page to be exported
144 */
145int uv_convert_from_secure(unsigned long paddr)
146{
147	struct uv_cb_cfs uvcb = {
148		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
149		.header.len = sizeof(uvcb),
150		.paddr = paddr
151	};
152
153	if (uv_call(0, (u64)&uvcb))
154		return -EINVAL;
155	return 0;
156}
157
158/*
159 * The caller must already hold a reference to the page
160 */
161int uv_convert_owned_from_secure(unsigned long paddr)
162{
163	struct page *page = phys_to_page(paddr);
164	int rc;
165
166	get_page(page);
167	rc = uv_convert_from_secure(paddr);
 
 
 
 
168	if (!rc)
169		clear_bit(PG_arch_1, &page->flags);
170	put_page(page);
171	return rc;
172}
173
174/*
175 * Calculate the expected ref_count for a page that would otherwise have no
 
 
 
 
 
 
 
 
 
176 * further pins. This was cribbed from similar functions in other places in
177 * the kernel, but with some slight modifications. We know that a secure
178 * page can not be a huge page for example.
179 */
180static int expected_page_refs(struct page *page)
181{
182	int res;
183
184	res = page_mapcount(page);
185	if (PageSwapCache(page)) {
186		res++;
187	} else if (page_mapping(page)) {
188		res++;
189		if (page_has_private(page))
190			res++;
191	}
192	return res;
193}
194
195static int make_secure_pte(pte_t *ptep, unsigned long addr,
196			   struct page *exp_page, struct uv_cb_header *uvcb)
197{
198	pte_t entry = READ_ONCE(*ptep);
199	struct page *page;
200	int expected, cc = 0;
201
202	if (!pte_present(entry))
203		return -ENXIO;
204	if (pte_val(entry) & _PAGE_INVALID)
205		return -ENXIO;
206
207	page = pte_page(entry);
208	if (page != exp_page)
209		return -ENXIO;
210	if (PageWriteback(page))
211		return -EAGAIN;
212	expected = expected_page_refs(page);
213	if (!page_ref_freeze(page, expected))
214		return -EBUSY;
215	set_bit(PG_arch_1, &page->flags);
216	/*
217	 * If the UVC does not succeed or fail immediately, we don't want to
218	 * loop for long, or we might get stall notifications.
219	 * On the other hand, this is a complex scenario and we are holding a lot of
220	 * locks, so we can't easily sleep and reschedule. We try only once,
221	 * and if the UVC returned busy or partial completion, we return
222	 * -EAGAIN and we let the callers deal with it.
223	 */
224	cc = __uv_call(0, (u64)uvcb);
225	page_ref_unfreeze(page, expected);
226	/*
227	 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
228	 * If busy or partially completed, return -EAGAIN.
229	 */
230	if (cc == UVC_CC_OK)
231		return 0;
232	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
233		return -EAGAIN;
234	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
235}
236
237/**
238 * should_export_before_import - Determine whether an export is needed
239 * before an import-like operation
240 * @uvcb: the Ultravisor control block of the UVC to be performed
241 * @mm: the mm of the process
242 *
243 * Returns whether an export is needed before every import-like operation.
244 * This is needed for shared pages, which don't trigger a secure storage
245 * exception when accessed from a different guest.
246 *
247 * Although considered as one, the Unpin Page UVC is not an actual import,
248 * so it is not affected.
249 *
250 * No export is needed also when there is only one protected VM, because the
251 * page cannot belong to the wrong VM in that case (there is no "other VM"
252 * it can belong to).
253 *
254 * Return: true if an export is needed before every import, otherwise false.
255 */
256static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
257{
258	/*
259	 * The misc feature indicates, among other things, that importing a
260	 * shared page from a different protected VM will automatically also
261	 * transfer its ownership.
262	 */
263	if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
264		return false;
265	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
266		return false;
267	return atomic_read(&mm->context.protected_count) > 1;
268}
269
270/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271 * Requests the Ultravisor to make a page accessible to a guest.
272 * If it's brought in the first time, it will be cleared. If
273 * it has been exported before, it will be decrypted and integrity
274 * checked.
275 */
276int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
277{
278	struct vm_area_struct *vma;
279	bool local_drain = false;
280	spinlock_t *ptelock;
281	unsigned long uaddr;
282	struct page *page;
283	pte_t *ptep;
284	int rc;
285
286again:
287	rc = -EFAULT;
288	mmap_read_lock(gmap->mm);
289
290	uaddr = __gmap_translate(gmap, gaddr);
291	if (IS_ERR_VALUE(uaddr))
292		goto out;
293	vma = vma_lookup(gmap->mm, uaddr);
294	if (!vma)
295		goto out;
296	/*
297	 * Secure pages cannot be huge and userspace should not combine both.
298	 * In case userspace does it anyway this will result in an -EFAULT for
299	 * the unpack. The guest is thus never reaching secure mode. If
300	 * userspace is playing dirty tricky with mapping huge pages later
301	 * on this will result in a segmentation fault.
302	 */
303	if (is_vm_hugetlb_page(vma))
304		goto out;
305
306	rc = -ENXIO;
307	page = follow_page(vma, uaddr, FOLL_WRITE);
308	if (IS_ERR_OR_NULL(page))
309		goto out;
 
 
 
 
 
 
 
 
 
 
 
310
311	lock_page(page);
312	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
313	if (should_export_before_import(uvcb, gmap->mm))
314		uv_convert_from_secure(page_to_phys(page));
315	rc = make_secure_pte(ptep, uaddr, page, uvcb);
 
 
316	pte_unmap_unlock(ptep, ptelock);
317	unlock_page(page);
318out:
319	mmap_read_unlock(gmap->mm);
320
321	if (rc == -EAGAIN) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322		/*
323		 * If we are here because the UVC returned busy or partial
324		 * completion, this is just a useless check, but it is safe.
325		 */
326		wait_on_page_writeback(page);
327	} else if (rc == -EBUSY) {
328		/*
329		 * If we have tried a local drain and the page refcount
330		 * still does not match our expected safe value, try with a
331		 * system wide drain. This is needed if the pagevecs holding
332		 * the page are on a different CPU.
333		 */
334		if (local_drain) {
335			lru_add_drain_all();
336			/* We give up here, and let the caller try again */
337			return -EAGAIN;
338		}
339		/*
340		 * We are here if the page refcount does not match the
341		 * expected safe value. The main culprits are usually
342		 * pagevecs. With lru_add_drain() we drain the pagevecs
343		 * on the local CPU so that hopefully the refcount will
344		 * reach the expected safe value.
345		 */
346		lru_add_drain();
347		local_drain = true;
348		/* And now we try again immediately after draining */
349		goto again;
350	} else if (rc == -ENXIO) {
351		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
352			return -EFAULT;
353		return -EAGAIN;
354	}
355	return rc;
356}
357EXPORT_SYMBOL_GPL(gmap_make_secure);
358
359int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
360{
361	struct uv_cb_cts uvcb = {
362		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
363		.header.len = sizeof(uvcb),
364		.guest_handle = gmap->guest_handle,
365		.gaddr = gaddr,
366	};
367
368	return gmap_make_secure(gmap, gaddr, &uvcb);
369}
370EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
371
372/**
373 * gmap_destroy_page - Destroy a guest page.
374 * @gmap: the gmap of the guest
375 * @gaddr: the guest address to destroy
376 *
377 * An attempt will be made to destroy the given guest page. If the attempt
378 * fails, an attempt is made to export the page. If both attempts fail, an
379 * appropriate error is returned.
380 */
381int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
382{
383	struct vm_area_struct *vma;
 
384	unsigned long uaddr;
385	struct page *page;
386	int rc;
387
388	rc = -EFAULT;
389	mmap_read_lock(gmap->mm);
390
391	uaddr = __gmap_translate(gmap, gaddr);
392	if (IS_ERR_VALUE(uaddr))
393		goto out;
394	vma = vma_lookup(gmap->mm, uaddr);
395	if (!vma)
396		goto out;
397	/*
398	 * Huge pages should not be able to become secure
399	 */
400	if (is_vm_hugetlb_page(vma))
401		goto out;
402
403	rc = 0;
404	/* we take an extra reference here */
405	page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
406	if (IS_ERR_OR_NULL(page))
407		goto out;
408	rc = uv_destroy_owned_page(page_to_phys(page));
 
 
 
 
 
 
409	/*
410	 * Fault handlers can race; it is possible that two CPUs will fault
411	 * on the same secure page. One CPU can destroy the page, reboot,
412	 * re-enter secure mode and import it, while the second CPU was
413	 * stuck at the beginning of the handler. At some point the second
414	 * CPU will be able to progress, and it will not be able to destroy
415	 * the page. In that case we do not want to terminate the process,
416	 * we instead try to export the page.
417	 */
418	if (rc)
419		rc = uv_convert_owned_from_secure(page_to_phys(page));
420	put_page(page);
 
421out:
422	mmap_read_unlock(gmap->mm);
423	return rc;
424}
425EXPORT_SYMBOL_GPL(gmap_destroy_page);
426
427/*
428 * To be called with the page locked or with an extra reference! This will
429 * prevent gmap_make_secure from touching the page concurrently. Having 2
430 * parallel make_page_accessible is fine, as the UV calls will become a
431 * no-op if the page is already exported.
432 */
433int arch_make_page_accessible(struct page *page)
434{
435	int rc = 0;
436
437	/* Hugepage cannot be protected, so nothing to do */
438	if (PageHuge(page))
439		return 0;
440
441	/*
442	 * PG_arch_1 is used in 3 places:
443	 * 1. for kernel page tables during early boot
444	 * 2. for storage keys of huge pages and KVM
445	 * 3. As an indication that this page might be secure. This can
446	 *    overindicate, e.g. we set the bit before calling
447	 *    convert_to_secure.
448	 * As secure pages are never huge, all 3 variants can co-exists.
449	 */
450	if (!test_bit(PG_arch_1, &page->flags))
451		return 0;
452
453	rc = uv_pin_shared(page_to_phys(page));
454	if (!rc) {
455		clear_bit(PG_arch_1, &page->flags);
456		return 0;
457	}
458
459	rc = uv_convert_from_secure(page_to_phys(page));
460	if (!rc) {
461		clear_bit(PG_arch_1, &page->flags);
462		return 0;
463	}
464
465	return rc;
466}
467EXPORT_SYMBOL_GPL(arch_make_page_accessible);
468
469#endif
470
471#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
472static ssize_t uv_query_facilities(struct kobject *kobj,
473				   struct kobj_attribute *attr, char *page)
474{
475	return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
476			uv_info.inst_calls_list[0],
477			uv_info.inst_calls_list[1],
478			uv_info.inst_calls_list[2],
479			uv_info.inst_calls_list[3]);
480}
481
482static struct kobj_attribute uv_query_facilities_attr =
483	__ATTR(facilities, 0444, uv_query_facilities, NULL);
484
485static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
486					struct kobj_attribute *attr, char *buf)
487{
488	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
489}
490
491static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
492	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
493
494static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
495					struct kobj_attribute *attr, char *buf)
496{
497	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
498}
499
500static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
501	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
502
503static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
504				     struct kobj_attribute *attr, char *page)
505{
506	return scnprintf(page, PAGE_SIZE, "%lx\n",
507			uv_info.guest_cpu_stor_len);
508}
509
510static struct kobj_attribute uv_query_dump_cpu_len_attr =
511	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
512
513static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
514					       struct kobj_attribute *attr, char *page)
515{
516	return scnprintf(page, PAGE_SIZE, "%lx\n",
517			uv_info.conf_dump_storage_state_len);
518}
519
520static struct kobj_attribute uv_query_dump_storage_state_len_attr =
521	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
522
523static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
524					  struct kobj_attribute *attr, char *page)
525{
526	return scnprintf(page, PAGE_SIZE, "%lx\n",
527			uv_info.conf_dump_finalize_len);
528}
529
530static struct kobj_attribute uv_query_dump_finalize_len_attr =
531	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
532
533static ssize_t uv_query_feature_indications(struct kobject *kobj,
534					    struct kobj_attribute *attr, char *buf)
535{
536	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
537}
538
539static struct kobj_attribute uv_query_feature_indications_attr =
540	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
541
542static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
543				       struct kobj_attribute *attr, char *page)
544{
545	return scnprintf(page, PAGE_SIZE, "%d\n",
546			uv_info.max_guest_cpu_id + 1);
547}
548
549static struct kobj_attribute uv_query_max_guest_cpus_attr =
550	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
551
552static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
553				      struct kobj_attribute *attr, char *page)
554{
555	return scnprintf(page, PAGE_SIZE, "%d\n",
556			uv_info.max_num_sec_conf);
557}
558
559static struct kobj_attribute uv_query_max_guest_vms_attr =
560	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
561
562static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
563				       struct kobj_attribute *attr, char *page)
564{
565	return scnprintf(page, PAGE_SIZE, "%lx\n",
566			uv_info.max_sec_stor_addr);
567}
568
569static struct kobj_attribute uv_query_max_guest_addr_attr =
570	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
571
572static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
573					     struct kobj_attribute *attr, char *page)
574{
575	return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
576}
577
578static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
579	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
580
581static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
582					struct kobj_attribute *attr, char *page)
583{
584	return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
585}
586
587static struct kobj_attribute uv_query_supp_att_pflags_attr =
588	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590static struct attribute *uv_query_attrs[] = {
591	&uv_query_facilities_attr.attr,
592	&uv_query_feature_indications_attr.attr,
593	&uv_query_max_guest_cpus_attr.attr,
594	&uv_query_max_guest_vms_attr.attr,
595	&uv_query_max_guest_addr_attr.attr,
596	&uv_query_supp_se_hdr_ver_attr.attr,
597	&uv_query_supp_se_hdr_pcf_attr.attr,
598	&uv_query_dump_storage_state_len_attr.attr,
599	&uv_query_dump_finalize_len_attr.attr,
600	&uv_query_dump_cpu_len_attr.attr,
601	&uv_query_supp_att_req_hdr_ver_attr.attr,
602	&uv_query_supp_att_pflags_attr.attr,
 
 
 
 
 
 
603	NULL,
604};
605
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606static struct attribute_group uv_query_attr_group = {
607	.attrs = uv_query_attrs,
608};
609
 
 
 
 
 
 
 
 
 
 
 
610static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
611				     struct kobj_attribute *attr, char *page)
612{
613	int val = 0;
614
615#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
616	val = prot_virt_guest;
617#endif
618	return scnprintf(page, PAGE_SIZE, "%d\n", val);
619}
620
621static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
622				    struct kobj_attribute *attr, char *page)
623{
624	int val = 0;
625
626#if IS_ENABLED(CONFIG_KVM)
627	val = prot_virt_host;
628#endif
629
630	return scnprintf(page, PAGE_SIZE, "%d\n", val);
631}
632
633static struct kobj_attribute uv_prot_virt_guest =
634	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
635
636static struct kobj_attribute uv_prot_virt_host =
637	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
638
639static const struct attribute *uv_prot_virt_attrs[] = {
640	&uv_prot_virt_guest.attr,
641	&uv_prot_virt_host.attr,
642	NULL,
643};
644
645static struct kset *uv_query_kset;
 
646static struct kobject *uv_kobj;
647
648static int __init uv_info_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
649{
650	int rc = -ENOMEM;
651
652	if (!test_facility(158))
653		return 0;
654
655	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
656	if (!uv_kobj)
657		return -ENOMEM;
658
659	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
660	if (rc)
661		goto out_kobj;
662
663	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
664	if (!uv_query_kset) {
665		rc = -ENOMEM;
666		goto out_ind_files;
667	}
668
669	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
670	if (!rc)
671		return 0;
 
 
672
673	kset_unregister(uv_query_kset);
674out_ind_files:
675	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
676out_kobj:
677	kobject_del(uv_kobj);
678	kobject_put(uv_kobj);
679	return rc;
680}
681device_initcall(uv_info_init);
682#endif