Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Intel CPU Microcode Update Driver for Linux
   4 *
   5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
   6 *		 2006 Shaohua Li <shaohua.li@intel.com>
   7 *
   8 * Intel CPU microcode early update for Linux
   9 *
  10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11 *		      H Peter Anvin" <hpa@zytor.com>
  12 */
  13
  14/*
  15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
  16 * printk calls into no_printk().
  17 *
  18 *#define DEBUG
  19 */
  20#define pr_fmt(fmt) "microcode: " fmt
  21
  22#include <linux/earlycpio.h>
  23#include <linux/firmware.h>
  24#include <linux/uaccess.h>
  25#include <linux/vmalloc.h>
  26#include <linux/initrd.h>
  27#include <linux/kernel.h>
  28#include <linux/slab.h>
  29#include <linux/cpu.h>
  30#include <linux/uio.h>
  31#include <linux/mm.h>
  32
  33#include <asm/microcode_intel.h>
  34#include <asm/intel-family.h>
  35#include <asm/processor.h>
  36#include <asm/tlbflush.h>
  37#include <asm/setup.h>
  38#include <asm/msr.h>
  39
  40static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
  41
  42/* Current microcode patch used in early patching on the APs. */
  43static struct microcode_intel *intel_ucode_patch;
  44
  45/* last level cache size per core */
  46static int llc_size_per_core;
  47
  48static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
  49					unsigned int s2, unsigned int p2)
  50{
  51	if (s1 != s2)
  52		return false;
  53
  54	/* Processor flags are either both 0 ... */
  55	if (!p1 && !p2)
  56		return true;
  57
  58	/* ... or they intersect. */
  59	return p1 & p2;
  60}
  61
  62/*
  63 * Returns 1 if update has been found, 0 otherwise.
  64 */
  65static int find_matching_signature(void *mc, unsigned int csig, int cpf)
  66{
  67	struct microcode_header_intel *mc_hdr = mc;
  68	struct extended_sigtable *ext_hdr;
  69	struct extended_signature *ext_sig;
  70	int i;
  71
  72	if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
  73		return 1;
  74
  75	/* Look for ext. headers: */
  76	if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
  77		return 0;
  78
  79	ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
  80	ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
  81
  82	for (i = 0; i < ext_hdr->count; i++) {
  83		if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
  84			return 1;
  85		ext_sig++;
  86	}
  87	return 0;
  88}
  89
  90/*
  91 * Returns 1 if update has been found, 0 otherwise.
  92 */
  93static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
  94{
  95	struct microcode_header_intel *mc_hdr = mc;
  96
  97	if (mc_hdr->rev <= new_rev)
  98		return 0;
  99
 100	return find_matching_signature(mc, csig, cpf);
 101}
 102
 103static struct ucode_patch *memdup_patch(void *data, unsigned int size)
 104{
 105	struct ucode_patch *p;
 106
 107	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
 108	if (!p)
 109		return NULL;
 110
 111	p->data = kmemdup(data, size, GFP_KERNEL);
 112	if (!p->data) {
 113		kfree(p);
 114		return NULL;
 115	}
 116
 117	return p;
 118}
 119
 120static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
 121{
 122	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
 123	struct ucode_patch *iter, *tmp, *p = NULL;
 124	bool prev_found = false;
 125	unsigned int sig, pf;
 126
 127	mc_hdr = (struct microcode_header_intel *)data;
 128
 129	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
 130		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
 131		sig	     = mc_saved_hdr->sig;
 132		pf	     = mc_saved_hdr->pf;
 133
 134		if (find_matching_signature(data, sig, pf)) {
 135			prev_found = true;
 136
 137			if (mc_hdr->rev <= mc_saved_hdr->rev)
 138				continue;
 139
 140			p = memdup_patch(data, size);
 141			if (!p)
 142				pr_err("Error allocating buffer %p\n", data);
 143			else {
 144				list_replace(&iter->plist, &p->plist);
 145				kfree(iter->data);
 146				kfree(iter);
 147			}
 148		}
 149	}
 150
 151	/*
 152	 * There weren't any previous patches found in the list cache; save the
 153	 * newly found.
 154	 */
 155	if (!prev_found) {
 156		p = memdup_patch(data, size);
 157		if (!p)
 158			pr_err("Error allocating buffer for %p\n", data);
 159		else
 160			list_add_tail(&p->plist, &microcode_cache);
 161	}
 162
 163	if (!p)
 164		return;
 165
 166	if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
 167		return;
 168
 169	/*
 170	 * Save for early loading. On 32-bit, that needs to be a physical
 171	 * address as the APs are running from physical addresses, before
 172	 * paging has been enabled.
 173	 */
 174	if (IS_ENABLED(CONFIG_X86_32))
 175		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
 176	else
 177		intel_ucode_patch = p->data;
 178}
 179
 180static int microcode_sanity_check(void *mc, int print_err)
 181{
 182	unsigned long total_size, data_size, ext_table_size;
 183	struct microcode_header_intel *mc_header = mc;
 184	struct extended_sigtable *ext_header = NULL;
 185	u32 sum, orig_sum, ext_sigcount = 0, i;
 186	struct extended_signature *ext_sig;
 187
 188	total_size = get_totalsize(mc_header);
 189	data_size = get_datasize(mc_header);
 190
 191	if (data_size + MC_HEADER_SIZE > total_size) {
 192		if (print_err)
 193			pr_err("Error: bad microcode data file size.\n");
 194		return -EINVAL;
 195	}
 196
 197	if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
 198		if (print_err)
 199			pr_err("Error: invalid/unknown microcode update format.\n");
 200		return -EINVAL;
 201	}
 202
 203	ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
 204	if (ext_table_size) {
 205		u32 ext_table_sum = 0;
 206		u32 *ext_tablep;
 207
 208		if ((ext_table_size < EXT_HEADER_SIZE)
 209		 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
 210			if (print_err)
 211				pr_err("Error: truncated extended signature table.\n");
 212			return -EINVAL;
 213		}
 214
 215		ext_header = mc + MC_HEADER_SIZE + data_size;
 216		if (ext_table_size != exttable_size(ext_header)) {
 217			if (print_err)
 218				pr_err("Error: extended signature table size mismatch.\n");
 219			return -EFAULT;
 220		}
 221
 222		ext_sigcount = ext_header->count;
 223
 224		/*
 225		 * Check extended table checksum: the sum of all dwords that
 226		 * comprise a valid table must be 0.
 227		 */
 228		ext_tablep = (u32 *)ext_header;
 229
 230		i = ext_table_size / sizeof(u32);
 231		while (i--)
 232			ext_table_sum += ext_tablep[i];
 233
 234		if (ext_table_sum) {
 235			if (print_err)
 236				pr_warn("Bad extended signature table checksum, aborting.\n");
 237			return -EINVAL;
 238		}
 239	}
 240
 241	/*
 242	 * Calculate the checksum of update data and header. The checksum of
 243	 * valid update data and header including the extended signature table
 244	 * must be 0.
 245	 */
 246	orig_sum = 0;
 247	i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
 248	while (i--)
 249		orig_sum += ((u32 *)mc)[i];
 250
 251	if (orig_sum) {
 252		if (print_err)
 253			pr_err("Bad microcode data checksum, aborting.\n");
 254		return -EINVAL;
 255	}
 256
 257	if (!ext_table_size)
 258		return 0;
 259
 260	/*
 261	 * Check extended signature checksum: 0 => valid.
 262	 */
 263	for (i = 0; i < ext_sigcount; i++) {
 264		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
 265			  EXT_SIGNATURE_SIZE * i;
 266
 267		sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
 268		      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
 269		if (sum) {
 270			if (print_err)
 271				pr_err("Bad extended signature checksum, aborting.\n");
 272			return -EINVAL;
 273		}
 274	}
 275	return 0;
 276}
 277
 278/*
 279 * Get microcode matching with BSP's model. Only CPUs with the same model as
 280 * BSP can stay in the platform.
 281 */
 282static struct microcode_intel *
 283scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
 284{
 285	struct microcode_header_intel *mc_header;
 286	struct microcode_intel *patch = NULL;
 287	unsigned int mc_size;
 288
 289	while (size) {
 290		if (size < sizeof(struct microcode_header_intel))
 291			break;
 292
 293		mc_header = (struct microcode_header_intel *)data;
 294
 295		mc_size = get_totalsize(mc_header);
 296		if (!mc_size ||
 297		    mc_size > size ||
 298		    microcode_sanity_check(data, 0) < 0)
 299			break;
 300
 301		size -= mc_size;
 302
 303		if (!find_matching_signature(data, uci->cpu_sig.sig,
 304					     uci->cpu_sig.pf)) {
 305			data += mc_size;
 306			continue;
 307		}
 308
 309		if (save) {
 310			save_microcode_patch(uci, data, mc_size);
 311			goto next;
 312		}
 313
 314
 315		if (!patch) {
 316			if (!has_newer_microcode(data,
 317						 uci->cpu_sig.sig,
 318						 uci->cpu_sig.pf,
 319						 uci->cpu_sig.rev))
 320				goto next;
 321
 322		} else {
 323			struct microcode_header_intel *phdr = &patch->hdr;
 324
 325			if (!has_newer_microcode(data,
 326						 phdr->sig,
 327						 phdr->pf,
 328						 phdr->rev))
 329				goto next;
 330		}
 331
 332		/* We have a newer patch, save it. */
 333		patch = data;
 334
 335next:
 336		data += mc_size;
 337	}
 338
 339	if (size)
 340		return NULL;
 341
 342	return patch;
 343}
 344
 345static int collect_cpu_info_early(struct ucode_cpu_info *uci)
 346{
 347	unsigned int val[2];
 348	unsigned int family, model;
 349	struct cpu_signature csig = { 0 };
 350	unsigned int eax, ebx, ecx, edx;
 351
 352	memset(uci, 0, sizeof(*uci));
 353
 354	eax = 0x00000001;
 355	ecx = 0;
 356	native_cpuid(&eax, &ebx, &ecx, &edx);
 357	csig.sig = eax;
 358
 359	family = x86_family(eax);
 360	model  = x86_model(eax);
 361
 362	if ((model >= 5) || (family > 6)) {
 363		/* get processor flags from MSR 0x17 */
 364		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
 365		csig.pf = 1 << ((val[1] >> 18) & 7);
 366	}
 367
 368	csig.rev = intel_get_microcode_revision();
 369
 370	uci->cpu_sig = csig;
 371	uci->valid = 1;
 372
 373	return 0;
 374}
 375
 376static void show_saved_mc(void)
 377{
 378#ifdef DEBUG
 379	int i = 0, j;
 380	unsigned int sig, pf, rev, total_size, data_size, date;
 381	struct ucode_cpu_info uci;
 382	struct ucode_patch *p;
 383
 384	if (list_empty(&microcode_cache)) {
 385		pr_debug("no microcode data saved.\n");
 386		return;
 387	}
 388
 389	collect_cpu_info_early(&uci);
 390
 391	sig	= uci.cpu_sig.sig;
 392	pf	= uci.cpu_sig.pf;
 393	rev	= uci.cpu_sig.rev;
 394	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
 395
 396	list_for_each_entry(p, &microcode_cache, plist) {
 397		struct microcode_header_intel *mc_saved_header;
 398		struct extended_sigtable *ext_header;
 399		struct extended_signature *ext_sig;
 400		int ext_sigcount;
 401
 402		mc_saved_header = (struct microcode_header_intel *)p->data;
 403
 404		sig	= mc_saved_header->sig;
 405		pf	= mc_saved_header->pf;
 406		rev	= mc_saved_header->rev;
 407		date	= mc_saved_header->date;
 408
 409		total_size	= get_totalsize(mc_saved_header);
 410		data_size	= get_datasize(mc_saved_header);
 411
 412		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
 413			 i++, sig, pf, rev, total_size,
 414			 date & 0xffff,
 415			 date >> 24,
 416			 (date >> 16) & 0xff);
 417
 418		/* Look for ext. headers: */
 419		if (total_size <= data_size + MC_HEADER_SIZE)
 420			continue;
 421
 422		ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
 423		ext_sigcount = ext_header->count;
 424		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
 425
 426		for (j = 0; j < ext_sigcount; j++) {
 427			sig = ext_sig->sig;
 428			pf = ext_sig->pf;
 429
 430			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
 431				 j, sig, pf);
 432
 433			ext_sig++;
 434		}
 435	}
 436#endif
 437}
 438
 439/*
 440 * Save this microcode patch. It will be loaded early when a CPU is
 441 * hot-added or resumes.
 442 */
 443static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
 444{
 445	/* Synchronization during CPU hotplug. */
 446	static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 447
 448	mutex_lock(&x86_cpu_microcode_mutex);
 449
 450	save_microcode_patch(uci, mc, size);
 451	show_saved_mc();
 452
 453	mutex_unlock(&x86_cpu_microcode_mutex);
 454}
 455
 456static bool load_builtin_intel_microcode(struct cpio_data *cp)
 457{
 458	unsigned int eax = 1, ebx, ecx = 0, edx;
 
 459	char name[30];
 460
 461	if (IS_ENABLED(CONFIG_X86_32))
 462		return false;
 463
 464	native_cpuid(&eax, &ebx, &ecx, &edx);
 465
 466	sprintf(name, "intel-ucode/%02x-%02x-%02x",
 467		      x86_family(eax), x86_model(eax), x86_stepping(eax));
 468
 469	return get_builtin_firmware(cp, name);
 
 
 
 
 
 
 470}
 471
 472/*
 473 * Print ucode update info.
 474 */
 475static void
 476print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
 477{
 478	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
 479		     uci->cpu_sig.rev,
 480		     date & 0xffff,
 481		     date >> 24,
 482		     (date >> 16) & 0xff);
 483}
 484
 485#ifdef CONFIG_X86_32
 486
 487static int delay_ucode_info;
 488static int current_mc_date;
 489
 490/*
 491 * Print early updated ucode info after printk works. This is delayed info dump.
 492 */
 493void show_ucode_info_early(void)
 494{
 495	struct ucode_cpu_info uci;
 496
 497	if (delay_ucode_info) {
 498		collect_cpu_info_early(&uci);
 499		print_ucode_info(&uci, current_mc_date);
 500		delay_ucode_info = 0;
 501	}
 502}
 503
 504/*
 505 * At this point, we can not call printk() yet. Delay printing microcode info in
 506 * show_ucode_info_early() until printk() works.
 507 */
 508static void print_ucode(struct ucode_cpu_info *uci)
 509{
 510	struct microcode_intel *mc;
 511	int *delay_ucode_info_p;
 512	int *current_mc_date_p;
 513
 514	mc = uci->mc;
 515	if (!mc)
 516		return;
 517
 518	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
 519	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
 520
 521	*delay_ucode_info_p = 1;
 522	*current_mc_date_p = mc->hdr.date;
 523}
 524#else
 525
 526static inline void print_ucode(struct ucode_cpu_info *uci)
 527{
 528	struct microcode_intel *mc;
 529
 530	mc = uci->mc;
 531	if (!mc)
 532		return;
 533
 534	print_ucode_info(uci, mc->hdr.date);
 535}
 536#endif
 537
 538static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 539{
 540	struct microcode_intel *mc;
 541	u32 rev;
 542
 543	mc = uci->mc;
 544	if (!mc)
 545		return 0;
 546
 547	/*
 548	 * Save us the MSR write below - which is a particular expensive
 549	 * operation - when the other hyperthread has updated the microcode
 550	 * already.
 551	 */
 552	rev = intel_get_microcode_revision();
 553	if (rev >= mc->hdr.rev) {
 554		uci->cpu_sig.rev = rev;
 555		return UCODE_OK;
 556	}
 557
 558	/*
 559	 * Writeback and invalidate caches before updating microcode to avoid
 560	 * internal issues depending on what the microcode is updating.
 561	 */
 562	native_wbinvd();
 563
 564	/* write microcode via MSR 0x79 */
 565	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 566
 567	rev = intel_get_microcode_revision();
 568	if (rev != mc->hdr.rev)
 569		return -1;
 570
 571	uci->cpu_sig.rev = rev;
 572
 573	if (early)
 574		print_ucode(uci);
 575	else
 576		print_ucode_info(uci, mc->hdr.date);
 577
 578	return 0;
 579}
 580
 581int __init save_microcode_in_initrd_intel(void)
 582{
 583	struct ucode_cpu_info uci;
 584	struct cpio_data cp;
 585
 586	/*
 587	 * initrd is going away, clear patch ptr. We will scan the microcode one
 588	 * last time before jettisoning and save a patch, if found. Then we will
 589	 * update that pointer too, with a stable patch address to use when
 590	 * resuming the cores.
 591	 */
 592	intel_ucode_patch = NULL;
 593
 594	if (!load_builtin_intel_microcode(&cp))
 595		cp = find_microcode_in_initrd(ucode_path, false);
 596
 597	if (!(cp.data && cp.size))
 598		return 0;
 599
 600	collect_cpu_info_early(&uci);
 601
 602	scan_microcode(cp.data, cp.size, &uci, true);
 603
 604	show_saved_mc();
 605
 606	return 0;
 607}
 608
 609/*
 610 * @res_patch, output: a pointer to the patch we found.
 611 */
 612static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
 613{
 614	static const char *path;
 615	struct cpio_data cp;
 616	bool use_pa;
 617
 618	if (IS_ENABLED(CONFIG_X86_32)) {
 619		path	  = (const char *)__pa_nodebug(ucode_path);
 620		use_pa	  = true;
 621	} else {
 622		path	  = ucode_path;
 623		use_pa	  = false;
 624	}
 625
 626	/* try built-in microcode first */
 627	if (!load_builtin_intel_microcode(&cp))
 628		cp = find_microcode_in_initrd(path, use_pa);
 629
 630	if (!(cp.data && cp.size))
 631		return NULL;
 632
 633	collect_cpu_info_early(uci);
 634
 635	return scan_microcode(cp.data, cp.size, uci, false);
 636}
 637
 638void __init load_ucode_intel_bsp(void)
 639{
 640	struct microcode_intel *patch;
 641	struct ucode_cpu_info uci;
 642
 643	patch = __load_ucode_intel(&uci);
 644	if (!patch)
 645		return;
 646
 647	uci.mc = patch;
 648
 649	apply_microcode_early(&uci, true);
 650}
 651
 652void load_ucode_intel_ap(void)
 653{
 654	struct microcode_intel *patch, **iup;
 655	struct ucode_cpu_info uci;
 656
 657	if (IS_ENABLED(CONFIG_X86_32))
 658		iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
 659	else
 660		iup = &intel_ucode_patch;
 661
 662reget:
 663	if (!*iup) {
 664		patch = __load_ucode_intel(&uci);
 665		if (!patch)
 666			return;
 667
 668		*iup = patch;
 669	}
 670
 671	uci.mc = *iup;
 672
 673	if (apply_microcode_early(&uci, true)) {
 674		/* Mixed-silicon system? Try to refetch the proper patch: */
 675		*iup = NULL;
 676
 677		goto reget;
 678	}
 679}
 680
 681static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
 682{
 683	struct microcode_header_intel *phdr;
 684	struct ucode_patch *iter, *tmp;
 685
 686	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
 687
 688		phdr = (struct microcode_header_intel *)iter->data;
 689
 690		if (phdr->rev <= uci->cpu_sig.rev)
 691			continue;
 692
 693		if (!find_matching_signature(phdr,
 694					     uci->cpu_sig.sig,
 695					     uci->cpu_sig.pf))
 696			continue;
 697
 698		return iter->data;
 699	}
 700	return NULL;
 701}
 702
 703void reload_ucode_intel(void)
 704{
 705	struct microcode_intel *p;
 706	struct ucode_cpu_info uci;
 707
 708	collect_cpu_info_early(&uci);
 709
 710	p = find_patch(&uci);
 711	if (!p)
 712		return;
 713
 714	uci.mc = p;
 715
 716	apply_microcode_early(&uci, false);
 717}
 718
 719static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
 720{
 721	static struct cpu_signature prev;
 722	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
 723	unsigned int val[2];
 724
 725	memset(csig, 0, sizeof(*csig));
 726
 727	csig->sig = cpuid_eax(0x00000001);
 728
 729	if ((c->x86_model >= 5) || (c->x86 > 6)) {
 730		/* get processor flags from MSR 0x17 */
 731		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
 732		csig->pf = 1 << ((val[1] >> 18) & 7);
 733	}
 734
 735	csig->rev = c->microcode;
 736
 737	/* No extra locking on prev, races are harmless. */
 738	if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
 739		pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
 740			csig->sig, csig->pf, csig->rev);
 741		prev = *csig;
 742	}
 743
 744	return 0;
 745}
 746
 747static enum ucode_state apply_microcode_intel(int cpu)
 748{
 749	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 750	struct cpuinfo_x86 *c = &cpu_data(cpu);
 751	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
 752	struct microcode_intel *mc;
 753	enum ucode_state ret;
 754	static int prev_rev;
 755	u32 rev;
 756
 757	/* We should bind the task to the CPU */
 758	if (WARN_ON(raw_smp_processor_id() != cpu))
 759		return UCODE_ERROR;
 760
 761	/* Look for a newer patch in our cache: */
 762	mc = find_patch(uci);
 763	if (!mc) {
 764		mc = uci->mc;
 765		if (!mc)
 766			return UCODE_NFOUND;
 767	}
 768
 769	/*
 770	 * Save us the MSR write below - which is a particular expensive
 771	 * operation - when the other hyperthread has updated the microcode
 772	 * already.
 773	 */
 774	rev = intel_get_microcode_revision();
 775	if (rev >= mc->hdr.rev) {
 776		ret = UCODE_OK;
 777		goto out;
 778	}
 779
 780	/*
 781	 * Writeback and invalidate caches before updating microcode to avoid
 782	 * internal issues depending on what the microcode is updating.
 783	 */
 784	native_wbinvd();
 785
 786	/* write microcode via MSR 0x79 */
 787	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 788
 789	rev = intel_get_microcode_revision();
 790
 791	if (rev != mc->hdr.rev) {
 792		pr_err("CPU%d update to revision 0x%x failed\n",
 793		       cpu, mc->hdr.rev);
 794		return UCODE_ERROR;
 795	}
 796
 797	if (bsp && rev != prev_rev) {
 798		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
 799			rev,
 800			mc->hdr.date & 0xffff,
 801			mc->hdr.date >> 24,
 802			(mc->hdr.date >> 16) & 0xff);
 803		prev_rev = rev;
 804	}
 805
 806	ret = UCODE_UPDATED;
 807
 808out:
 809	uci->cpu_sig.rev = rev;
 810	c->microcode	 = rev;
 811
 812	/* Update boot_cpu_data's revision too, if we're on the BSP: */
 813	if (bsp)
 814		boot_cpu_data.microcode = rev;
 815
 816	return ret;
 817}
 818
 819static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
 820{
 821	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 822	unsigned int curr_mc_size = 0, new_mc_size = 0;
 823	enum ucode_state ret = UCODE_OK;
 824	int new_rev = uci->cpu_sig.rev;
 825	u8 *new_mc = NULL, *mc = NULL;
 826	unsigned int csig, cpf;
 827
 828	while (iov_iter_count(iter)) {
 829		struct microcode_header_intel mc_header;
 830		unsigned int mc_size, data_size;
 831		u8 *data;
 832
 833		if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
 834			pr_err("error! Truncated or inaccessible header in microcode data file\n");
 835			break;
 836		}
 837
 838		mc_size = get_totalsize(&mc_header);
 839		if (mc_size < sizeof(mc_header)) {
 840			pr_err("error! Bad data in microcode data file (totalsize too small)\n");
 841			break;
 842		}
 843		data_size = mc_size - sizeof(mc_header);
 844		if (data_size > iov_iter_count(iter)) {
 845			pr_err("error! Bad data in microcode data file (truncated file?)\n");
 846			break;
 847		}
 848
 849		/* For performance reasons, reuse mc area when possible */
 850		if (!mc || mc_size > curr_mc_size) {
 851			vfree(mc);
 852			mc = vmalloc(mc_size);
 853			if (!mc)
 854				break;
 855			curr_mc_size = mc_size;
 856		}
 857
 858		memcpy(mc, &mc_header, sizeof(mc_header));
 859		data = mc + sizeof(mc_header);
 860		if (!copy_from_iter_full(data, data_size, iter) ||
 861		    microcode_sanity_check(mc, 1) < 0) {
 862			break;
 863		}
 864
 865		csig = uci->cpu_sig.sig;
 866		cpf = uci->cpu_sig.pf;
 867		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
 868			vfree(new_mc);
 869			new_rev = mc_header.rev;
 870			new_mc  = mc;
 871			new_mc_size = mc_size;
 872			mc = NULL;	/* trigger new vmalloc */
 873			ret = UCODE_NEW;
 874		}
 875	}
 876
 877	vfree(mc);
 878
 879	if (iov_iter_count(iter)) {
 880		vfree(new_mc);
 881		return UCODE_ERROR;
 882	}
 883
 884	if (!new_mc)
 885		return UCODE_NFOUND;
 886
 887	vfree(uci->mc);
 888	uci->mc = (struct microcode_intel *)new_mc;
 889
 890	/*
 891	 * If early loading microcode is supported, save this mc into
 892	 * permanent memory. So it will be loaded early when a CPU is hot added
 893	 * or resumes.
 894	 */
 895	save_mc_for_early(uci, new_mc, new_mc_size);
 896
 897	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
 898		 cpu, new_rev, uci->cpu_sig.rev);
 899
 900	return ret;
 901}
 902
 903static bool is_blacklisted(unsigned int cpu)
 904{
 905	struct cpuinfo_x86 *c = &cpu_data(cpu);
 906
 907	/*
 908	 * Late loading on model 79 with microcode revision less than 0x0b000021
 909	 * and LLC size per core bigger than 2.5MB may result in a system hang.
 910	 * This behavior is documented in item BDF90, #334165 (Intel Xeon
 911	 * Processor E7-8800/4800 v4 Product Family).
 912	 */
 913	if (c->x86 == 6 &&
 914	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
 915	    c->x86_stepping == 0x01 &&
 916	    llc_size_per_core > 2621440 &&
 917	    c->microcode < 0x0b000021) {
 918		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
 919		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 920		return true;
 921	}
 922
 923	return false;
 924}
 925
 926static enum ucode_state request_microcode_fw(int cpu, struct device *device,
 927					     bool refresh_fw)
 928{
 929	struct cpuinfo_x86 *c = &cpu_data(cpu);
 930	const struct firmware *firmware;
 931	struct iov_iter iter;
 932	enum ucode_state ret;
 933	struct kvec kvec;
 934	char name[30];
 935
 936	if (is_blacklisted(cpu))
 937		return UCODE_NFOUND;
 938
 939	sprintf(name, "intel-ucode/%02x-%02x-%02x",
 940		c->x86, c->x86_model, c->x86_stepping);
 941
 942	if (request_firmware_direct(&firmware, name, device)) {
 943		pr_debug("data file %s load failed\n", name);
 944		return UCODE_NFOUND;
 945	}
 946
 947	kvec.iov_base = (void *)firmware->data;
 948	kvec.iov_len = firmware->size;
 949	iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
 950	ret = generic_load_microcode(cpu, &iter);
 951
 952	release_firmware(firmware);
 953
 954	return ret;
 955}
 956
 957static enum ucode_state
 958request_microcode_user(int cpu, const void __user *buf, size_t size)
 959{
 960	struct iov_iter iter;
 961	struct iovec iov;
 962
 963	if (is_blacklisted(cpu))
 964		return UCODE_NFOUND;
 965
 966	iov.iov_base = (void __user *)buf;
 967	iov.iov_len = size;
 968	iov_iter_init(&iter, WRITE, &iov, 1, size);
 969
 970	return generic_load_microcode(cpu, &iter);
 971}
 972
 973static struct microcode_ops microcode_intel_ops = {
 974	.request_microcode_user		  = request_microcode_user,
 975	.request_microcode_fw             = request_microcode_fw,
 976	.collect_cpu_info                 = collect_cpu_info,
 977	.apply_microcode                  = apply_microcode_intel,
 978};
 979
 980static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
 981{
 982	u64 llc_size = c->x86_cache_size * 1024ULL;
 983
 984	do_div(llc_size, c->x86_max_cores);
 985
 986	return (int)llc_size;
 987}
 988
 989struct microcode_ops * __init init_intel_microcode(void)
 990{
 991	struct cpuinfo_x86 *c = &boot_cpu_data;
 992
 993	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
 994	    cpu_has(c, X86_FEATURE_IA64)) {
 995		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
 996		return NULL;
 997	}
 998
 999	llc_size_per_core = calc_llc_size_per_core(c);
1000
1001	return &microcode_intel_ops;
1002}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Intel CPU Microcode Update Driver for Linux
  4 *
  5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
  6 *		 2006 Shaohua Li <shaohua.li@intel.com>
  7 *
  8 * Intel CPU microcode early update for Linux
  9 *
 10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 11 *		      H Peter Anvin" <hpa@zytor.com>
 12 */
 13
 14/*
 15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
 16 * printk calls into no_printk().
 17 *
 18 *#define DEBUG
 19 */
 20#define pr_fmt(fmt) "microcode: " fmt
 21
 22#include <linux/earlycpio.h>
 23#include <linux/firmware.h>
 24#include <linux/uaccess.h>
 25#include <linux/vmalloc.h>
 26#include <linux/initrd.h>
 27#include <linux/kernel.h>
 28#include <linux/slab.h>
 29#include <linux/cpu.h>
 30#include <linux/uio.h>
 31#include <linux/mm.h>
 32
 33#include <asm/microcode_intel.h>
 34#include <asm/intel-family.h>
 35#include <asm/processor.h>
 36#include <asm/tlbflush.h>
 37#include <asm/setup.h>
 38#include <asm/msr.h>
 39
 40static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 41
 42/* Current microcode patch used in early patching on the APs. */
 43static struct microcode_intel *intel_ucode_patch;
 44
 45/* last level cache size per core */
 46static int llc_size_per_core;
 47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48/*
 49 * Returns 1 if update has been found, 0 otherwise.
 50 */
 51static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
 52{
 53	struct microcode_header_intel *mc_hdr = mc;
 54
 55	if (mc_hdr->rev <= new_rev)
 56		return 0;
 57
 58	return intel_find_matching_signature(mc, csig, cpf);
 59}
 60
 61static struct ucode_patch *memdup_patch(void *data, unsigned int size)
 62{
 63	struct ucode_patch *p;
 64
 65	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
 66	if (!p)
 67		return NULL;
 68
 69	p->data = kmemdup(data, size, GFP_KERNEL);
 70	if (!p->data) {
 71		kfree(p);
 72		return NULL;
 73	}
 74
 75	return p;
 76}
 77
 78static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
 79{
 80	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
 81	struct ucode_patch *iter, *tmp, *p = NULL;
 82	bool prev_found = false;
 83	unsigned int sig, pf;
 84
 85	mc_hdr = (struct microcode_header_intel *)data;
 86
 87	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
 88		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
 89		sig	     = mc_saved_hdr->sig;
 90		pf	     = mc_saved_hdr->pf;
 91
 92		if (intel_find_matching_signature(data, sig, pf)) {
 93			prev_found = true;
 94
 95			if (mc_hdr->rev <= mc_saved_hdr->rev)
 96				continue;
 97
 98			p = memdup_patch(data, size);
 99			if (!p)
100				pr_err("Error allocating buffer %p\n", data);
101			else {
102				list_replace(&iter->plist, &p->plist);
103				kfree(iter->data);
104				kfree(iter);
105			}
106		}
107	}
108
109	/*
110	 * There weren't any previous patches found in the list cache; save the
111	 * newly found.
112	 */
113	if (!prev_found) {
114		p = memdup_patch(data, size);
115		if (!p)
116			pr_err("Error allocating buffer for %p\n", data);
117		else
118			list_add_tail(&p->plist, &microcode_cache);
119	}
120
121	if (!p)
122		return;
123
124	if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
125		return;
126
127	/*
128	 * Save for early loading. On 32-bit, that needs to be a physical
129	 * address as the APs are running from physical addresses, before
130	 * paging has been enabled.
131	 */
132	if (IS_ENABLED(CONFIG_X86_32))
133		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
134	else
135		intel_ucode_patch = p->data;
136}
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138/*
139 * Get microcode matching with BSP's model. Only CPUs with the same model as
140 * BSP can stay in the platform.
141 */
142static struct microcode_intel *
143scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
144{
145	struct microcode_header_intel *mc_header;
146	struct microcode_intel *patch = NULL;
147	unsigned int mc_size;
148
149	while (size) {
150		if (size < sizeof(struct microcode_header_intel))
151			break;
152
153		mc_header = (struct microcode_header_intel *)data;
154
155		mc_size = get_totalsize(mc_header);
156		if (!mc_size ||
157		    mc_size > size ||
158		    intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)
159			break;
160
161		size -= mc_size;
162
163		if (!intel_find_matching_signature(data, uci->cpu_sig.sig,
164						   uci->cpu_sig.pf)) {
165			data += mc_size;
166			continue;
167		}
168
169		if (save) {
170			save_microcode_patch(uci, data, mc_size);
171			goto next;
172		}
173
174
175		if (!patch) {
176			if (!has_newer_microcode(data,
177						 uci->cpu_sig.sig,
178						 uci->cpu_sig.pf,
179						 uci->cpu_sig.rev))
180				goto next;
181
182		} else {
183			struct microcode_header_intel *phdr = &patch->hdr;
184
185			if (!has_newer_microcode(data,
186						 phdr->sig,
187						 phdr->pf,
188						 phdr->rev))
189				goto next;
190		}
191
192		/* We have a newer patch, save it. */
193		patch = data;
194
195next:
196		data += mc_size;
197	}
198
199	if (size)
200		return NULL;
201
202	return patch;
203}
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205static void show_saved_mc(void)
206{
207#ifdef DEBUG
208	int i = 0, j;
209	unsigned int sig, pf, rev, total_size, data_size, date;
210	struct ucode_cpu_info uci;
211	struct ucode_patch *p;
212
213	if (list_empty(&microcode_cache)) {
214		pr_debug("no microcode data saved.\n");
215		return;
216	}
217
218	intel_cpu_collect_info(&uci);
219
220	sig	= uci.cpu_sig.sig;
221	pf	= uci.cpu_sig.pf;
222	rev	= uci.cpu_sig.rev;
223	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
224
225	list_for_each_entry(p, &microcode_cache, plist) {
226		struct microcode_header_intel *mc_saved_header;
227		struct extended_sigtable *ext_header;
228		struct extended_signature *ext_sig;
229		int ext_sigcount;
230
231		mc_saved_header = (struct microcode_header_intel *)p->data;
232
233		sig	= mc_saved_header->sig;
234		pf	= mc_saved_header->pf;
235		rev	= mc_saved_header->rev;
236		date	= mc_saved_header->date;
237
238		total_size	= get_totalsize(mc_saved_header);
239		data_size	= get_datasize(mc_saved_header);
240
241		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
242			 i++, sig, pf, rev, total_size,
243			 date & 0xffff,
244			 date >> 24,
245			 (date >> 16) & 0xff);
246
247		/* Look for ext. headers: */
248		if (total_size <= data_size + MC_HEADER_SIZE)
249			continue;
250
251		ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
252		ext_sigcount = ext_header->count;
253		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
254
255		for (j = 0; j < ext_sigcount; j++) {
256			sig = ext_sig->sig;
257			pf = ext_sig->pf;
258
259			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
260				 j, sig, pf);
261
262			ext_sig++;
263		}
264	}
265#endif
266}
267
268/*
269 * Save this microcode patch. It will be loaded early when a CPU is
270 * hot-added or resumes.
271 */
272static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
273{
274	/* Synchronization during CPU hotplug. */
275	static DEFINE_MUTEX(x86_cpu_microcode_mutex);
276
277	mutex_lock(&x86_cpu_microcode_mutex);
278
279	save_microcode_patch(uci, mc, size);
280	show_saved_mc();
281
282	mutex_unlock(&x86_cpu_microcode_mutex);
283}
284
285static bool load_builtin_intel_microcode(struct cpio_data *cp)
286{
287	unsigned int eax = 1, ebx, ecx = 0, edx;
288	struct firmware fw;
289	char name[30];
290
291	if (IS_ENABLED(CONFIG_X86_32))
292		return false;
293
294	native_cpuid(&eax, &ebx, &ecx, &edx);
295
296	sprintf(name, "intel-ucode/%02x-%02x-%02x",
297		      x86_family(eax), x86_model(eax), x86_stepping(eax));
298
299	if (firmware_request_builtin(&fw, name)) {
300		cp->size = fw.size;
301		cp->data = (void *)fw.data;
302		return true;
303	}
304
305	return false;
306}
307
308/*
309 * Print ucode update info.
310 */
311static void
312print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
313{
314	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
315		     uci->cpu_sig.rev,
316		     date & 0xffff,
317		     date >> 24,
318		     (date >> 16) & 0xff);
319}
320
321#ifdef CONFIG_X86_32
322
323static int delay_ucode_info;
324static int current_mc_date;
325
326/*
327 * Print early updated ucode info after printk works. This is delayed info dump.
328 */
329void show_ucode_info_early(void)
330{
331	struct ucode_cpu_info uci;
332
333	if (delay_ucode_info) {
334		intel_cpu_collect_info(&uci);
335		print_ucode_info(&uci, current_mc_date);
336		delay_ucode_info = 0;
337	}
338}
339
340/*
341 * At this point, we can not call printk() yet. Delay printing microcode info in
342 * show_ucode_info_early() until printk() works.
343 */
344static void print_ucode(struct ucode_cpu_info *uci)
345{
346	struct microcode_intel *mc;
347	int *delay_ucode_info_p;
348	int *current_mc_date_p;
349
350	mc = uci->mc;
351	if (!mc)
352		return;
353
354	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
355	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
356
357	*delay_ucode_info_p = 1;
358	*current_mc_date_p = mc->hdr.date;
359}
360#else
361
362static inline void print_ucode(struct ucode_cpu_info *uci)
363{
364	struct microcode_intel *mc;
365
366	mc = uci->mc;
367	if (!mc)
368		return;
369
370	print_ucode_info(uci, mc->hdr.date);
371}
372#endif
373
374static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
375{
376	struct microcode_intel *mc;
377	u32 rev;
378
379	mc = uci->mc;
380	if (!mc)
381		return 0;
382
383	/*
384	 * Save us the MSR write below - which is a particular expensive
385	 * operation - when the other hyperthread has updated the microcode
386	 * already.
387	 */
388	rev = intel_get_microcode_revision();
389	if (rev >= mc->hdr.rev) {
390		uci->cpu_sig.rev = rev;
391		return UCODE_OK;
392	}
393
394	/*
395	 * Writeback and invalidate caches before updating microcode to avoid
396	 * internal issues depending on what the microcode is updating.
397	 */
398	native_wbinvd();
399
400	/* write microcode via MSR 0x79 */
401	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
402
403	rev = intel_get_microcode_revision();
404	if (rev != mc->hdr.rev)
405		return -1;
406
407	uci->cpu_sig.rev = rev;
408
409	if (early)
410		print_ucode(uci);
411	else
412		print_ucode_info(uci, mc->hdr.date);
413
414	return 0;
415}
416
417int __init save_microcode_in_initrd_intel(void)
418{
419	struct ucode_cpu_info uci;
420	struct cpio_data cp;
421
422	/*
423	 * initrd is going away, clear patch ptr. We will scan the microcode one
424	 * last time before jettisoning and save a patch, if found. Then we will
425	 * update that pointer too, with a stable patch address to use when
426	 * resuming the cores.
427	 */
428	intel_ucode_patch = NULL;
429
430	if (!load_builtin_intel_microcode(&cp))
431		cp = find_microcode_in_initrd(ucode_path, false);
432
433	if (!(cp.data && cp.size))
434		return 0;
435
436	intel_cpu_collect_info(&uci);
437
438	scan_microcode(cp.data, cp.size, &uci, true);
439
440	show_saved_mc();
441
442	return 0;
443}
444
445/*
446 * @res_patch, output: a pointer to the patch we found.
447 */
448static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
449{
450	static const char *path;
451	struct cpio_data cp;
452	bool use_pa;
453
454	if (IS_ENABLED(CONFIG_X86_32)) {
455		path	  = (const char *)__pa_nodebug(ucode_path);
456		use_pa	  = true;
457	} else {
458		path	  = ucode_path;
459		use_pa	  = false;
460	}
461
462	/* try built-in microcode first */
463	if (!load_builtin_intel_microcode(&cp))
464		cp = find_microcode_in_initrd(path, use_pa);
465
466	if (!(cp.data && cp.size))
467		return NULL;
468
469	intel_cpu_collect_info(uci);
470
471	return scan_microcode(cp.data, cp.size, uci, false);
472}
473
474void __init load_ucode_intel_bsp(void)
475{
476	struct microcode_intel *patch;
477	struct ucode_cpu_info uci;
478
479	patch = __load_ucode_intel(&uci);
480	if (!patch)
481		return;
482
483	uci.mc = patch;
484
485	apply_microcode_early(&uci, true);
486}
487
488void load_ucode_intel_ap(void)
489{
490	struct microcode_intel *patch, **iup;
491	struct ucode_cpu_info uci;
492
493	if (IS_ENABLED(CONFIG_X86_32))
494		iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
495	else
496		iup = &intel_ucode_patch;
497
 
498	if (!*iup) {
499		patch = __load_ucode_intel(&uci);
500		if (!patch)
501			return;
502
503		*iup = patch;
504	}
505
506	uci.mc = *iup;
507
508	apply_microcode_early(&uci, true);
 
 
 
 
 
509}
510
511static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
512{
513	struct microcode_header_intel *phdr;
514	struct ucode_patch *iter, *tmp;
515
516	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
517
518		phdr = (struct microcode_header_intel *)iter->data;
519
520		if (phdr->rev <= uci->cpu_sig.rev)
521			continue;
522
523		if (!intel_find_matching_signature(phdr,
524						   uci->cpu_sig.sig,
525						   uci->cpu_sig.pf))
526			continue;
527
528		return iter->data;
529	}
530	return NULL;
531}
532
533void reload_ucode_intel(void)
534{
535	struct microcode_intel *p;
536	struct ucode_cpu_info uci;
537
538	intel_cpu_collect_info(&uci);
539
540	p = find_patch(&uci);
541	if (!p)
542		return;
543
544	uci.mc = p;
545
546	apply_microcode_early(&uci, false);
547}
548
549static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
550{
 
551	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
552	unsigned int val[2];
553
554	memset(csig, 0, sizeof(*csig));
555
556	csig->sig = cpuid_eax(0x00000001);
557
558	if ((c->x86_model >= 5) || (c->x86 > 6)) {
559		/* get processor flags from MSR 0x17 */
560		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
561		csig->pf = 1 << ((val[1] >> 18) & 7);
562	}
563
564	csig->rev = c->microcode;
565
 
 
 
 
 
 
 
566	return 0;
567}
568
569static enum ucode_state apply_microcode_intel(int cpu)
570{
571	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
572	struct cpuinfo_x86 *c = &cpu_data(cpu);
573	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
574	struct microcode_intel *mc;
575	enum ucode_state ret;
576	static int prev_rev;
577	u32 rev;
578
579	/* We should bind the task to the CPU */
580	if (WARN_ON(raw_smp_processor_id() != cpu))
581		return UCODE_ERROR;
582
583	/* Look for a newer patch in our cache: */
584	mc = find_patch(uci);
585	if (!mc) {
586		mc = uci->mc;
587		if (!mc)
588			return UCODE_NFOUND;
589	}
590
591	/*
592	 * Save us the MSR write below - which is a particular expensive
593	 * operation - when the other hyperthread has updated the microcode
594	 * already.
595	 */
596	rev = intel_get_microcode_revision();
597	if (rev >= mc->hdr.rev) {
598		ret = UCODE_OK;
599		goto out;
600	}
601
602	/*
603	 * Writeback and invalidate caches before updating microcode to avoid
604	 * internal issues depending on what the microcode is updating.
605	 */
606	native_wbinvd();
607
608	/* write microcode via MSR 0x79 */
609	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
610
611	rev = intel_get_microcode_revision();
612
613	if (rev != mc->hdr.rev) {
614		pr_err("CPU%d update to revision 0x%x failed\n",
615		       cpu, mc->hdr.rev);
616		return UCODE_ERROR;
617	}
618
619	if (bsp && rev != prev_rev) {
620		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
621			rev,
622			mc->hdr.date & 0xffff,
623			mc->hdr.date >> 24,
624			(mc->hdr.date >> 16) & 0xff);
625		prev_rev = rev;
626	}
627
628	ret = UCODE_UPDATED;
629
630out:
631	uci->cpu_sig.rev = rev;
632	c->microcode	 = rev;
633
634	/* Update boot_cpu_data's revision too, if we're on the BSP: */
635	if (bsp)
636		boot_cpu_data.microcode = rev;
637
638	return ret;
639}
640
641static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
642{
643	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
644	unsigned int curr_mc_size = 0, new_mc_size = 0;
645	enum ucode_state ret = UCODE_OK;
646	int new_rev = uci->cpu_sig.rev;
647	u8 *new_mc = NULL, *mc = NULL;
648	unsigned int csig, cpf;
649
650	while (iov_iter_count(iter)) {
651		struct microcode_header_intel mc_header;
652		unsigned int mc_size, data_size;
653		u8 *data;
654
655		if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
656			pr_err("error! Truncated or inaccessible header in microcode data file\n");
657			break;
658		}
659
660		mc_size = get_totalsize(&mc_header);
661		if (mc_size < sizeof(mc_header)) {
662			pr_err("error! Bad data in microcode data file (totalsize too small)\n");
663			break;
664		}
665		data_size = mc_size - sizeof(mc_header);
666		if (data_size > iov_iter_count(iter)) {
667			pr_err("error! Bad data in microcode data file (truncated file?)\n");
668			break;
669		}
670
671		/* For performance reasons, reuse mc area when possible */
672		if (!mc || mc_size > curr_mc_size) {
673			vfree(mc);
674			mc = vmalloc(mc_size);
675			if (!mc)
676				break;
677			curr_mc_size = mc_size;
678		}
679
680		memcpy(mc, &mc_header, sizeof(mc_header));
681		data = mc + sizeof(mc_header);
682		if (!copy_from_iter_full(data, data_size, iter) ||
683		    intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
684			break;
685		}
686
687		csig = uci->cpu_sig.sig;
688		cpf = uci->cpu_sig.pf;
689		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
690			vfree(new_mc);
691			new_rev = mc_header.rev;
692			new_mc  = mc;
693			new_mc_size = mc_size;
694			mc = NULL;	/* trigger new vmalloc */
695			ret = UCODE_NEW;
696		}
697	}
698
699	vfree(mc);
700
701	if (iov_iter_count(iter)) {
702		vfree(new_mc);
703		return UCODE_ERROR;
704	}
705
706	if (!new_mc)
707		return UCODE_NFOUND;
708
709	vfree(uci->mc);
710	uci->mc = (struct microcode_intel *)new_mc;
711
712	/*
713	 * If early loading microcode is supported, save this mc into
714	 * permanent memory. So it will be loaded early when a CPU is hot added
715	 * or resumes.
716	 */
717	save_mc_for_early(uci, new_mc, new_mc_size);
718
719	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
720		 cpu, new_rev, uci->cpu_sig.rev);
721
722	return ret;
723}
724
725static bool is_blacklisted(unsigned int cpu)
726{
727	struct cpuinfo_x86 *c = &cpu_data(cpu);
728
729	/*
730	 * Late loading on model 79 with microcode revision less than 0x0b000021
731	 * and LLC size per core bigger than 2.5MB may result in a system hang.
732	 * This behavior is documented in item BDF90, #334165 (Intel Xeon
733	 * Processor E7-8800/4800 v4 Product Family).
734	 */
735	if (c->x86 == 6 &&
736	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
737	    c->x86_stepping == 0x01 &&
738	    llc_size_per_core > 2621440 &&
739	    c->microcode < 0x0b000021) {
740		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
741		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
742		return true;
743	}
744
745	return false;
746}
747
748static enum ucode_state request_microcode_fw(int cpu, struct device *device)
 
749{
750	struct cpuinfo_x86 *c = &cpu_data(cpu);
751	const struct firmware *firmware;
752	struct iov_iter iter;
753	enum ucode_state ret;
754	struct kvec kvec;
755	char name[30];
756
757	if (is_blacklisted(cpu))
758		return UCODE_NFOUND;
759
760	sprintf(name, "intel-ucode/%02x-%02x-%02x",
761		c->x86, c->x86_model, c->x86_stepping);
762
763	if (request_firmware_direct(&firmware, name, device)) {
764		pr_debug("data file %s load failed\n", name);
765		return UCODE_NFOUND;
766	}
767
768	kvec.iov_base = (void *)firmware->data;
769	kvec.iov_len = firmware->size;
770	iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
771	ret = generic_load_microcode(cpu, &iter);
772
773	release_firmware(firmware);
774
775	return ret;
776}
777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778static struct microcode_ops microcode_intel_ops = {
 
779	.request_microcode_fw             = request_microcode_fw,
780	.collect_cpu_info                 = collect_cpu_info,
781	.apply_microcode                  = apply_microcode_intel,
782};
783
784static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
785{
786	u64 llc_size = c->x86_cache_size * 1024ULL;
787
788	do_div(llc_size, c->x86_max_cores);
789
790	return (int)llc_size;
791}
792
793struct microcode_ops * __init init_intel_microcode(void)
794{
795	struct cpuinfo_x86 *c = &boot_cpu_data;
796
797	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
798	    cpu_has(c, X86_FEATURE_IA64)) {
799		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
800		return NULL;
801	}
802
803	llc_size_per_core = calc_llc_size_per_core(c);
804
805	return &microcode_intel_ops;
806}