Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * CPU Microcode Update Driver for Linux
  3 *
  4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  5 *	      2006	Shaohua Li <shaohua.li@intel.com>
  6 *	      2013-2016	Borislav Petkov <bp@alien8.de>
  7 *
  8 * X86 CPU microcode early update for Linux:
  9 *
 10 *	Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 11 *			   H Peter Anvin" <hpa@zytor.com>
 12 *		  (C) 2015 Borislav Petkov <bp@alien8.de>
 13 *
 14 * This driver allows to upgrade microcode on x86 processors.
 15 *
 16 * This program is free software; you can redistribute it and/or
 17 * modify it under the terms of the GNU General Public License
 18 * as published by the Free Software Foundation; either version
 19 * 2 of the License, or (at your option) any later version.
 20 */
 21
 22#define pr_fmt(fmt) "microcode: " fmt
 23
 24#include <linux/platform_device.h>
 
 25#include <linux/syscore_ops.h>
 26#include <linux/miscdevice.h>
 27#include <linux/capability.h>
 28#include <linux/firmware.h>
 
 29#include <linux/kernel.h>
 
 30#include <linux/mutex.h>
 31#include <linux/cpu.h>
 
 32#include <linux/fs.h>
 33#include <linux/mm.h>
 34
 35#include <asm/microcode_intel.h>
 36#include <asm/cpu_device_id.h>
 37#include <asm/microcode_amd.h>
 38#include <asm/perf_event.h>
 39#include <asm/microcode.h>
 40#include <asm/processor.h>
 41#include <asm/cmdline.h>
 42#include <asm/setup.h>
 43
 44#define DRIVER_VERSION	"2.2"
 45
 46static struct microcode_ops	*microcode_ops;
 47static bool dis_ucode_ldr = true;
 48
 49bool initrd_gone;
 50
 51LIST_HEAD(microcode_cache);
 
 52
 53/*
 54 * Synchronization.
 55 *
 56 * All non cpu-hotplug-callback call sites use:
 57 *
 58 * - microcode_mutex to synchronize with each other;
 59 * - get/put_online_cpus() to synchronize with
 60 *   the cpu-hotplug-callback call sites.
 61 *
 62 * We guarantee that only a single cpu is being
 63 * updated at any particular moment of time.
 64 */
 65static DEFINE_MUTEX(microcode_mutex);
 66
 67struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];
 68
 69/*
 70 * Operations that are run on a target cpu:
 71 */
 72
 73struct cpu_info_ctx {
 74	struct cpu_signature	*cpu_sig;
 75	int			err;
 76};
 77
 78static bool __init check_loader_disabled_bsp(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79{
 80	static const char *__dis_opt_str = "dis_ucode_ldr";
 81	u32 a, b, c, d;
 82
 83#ifdef CONFIG_X86_32
 84	const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
 85	const char *option  = (const char *)__pa_nodebug(__dis_opt_str);
 86	bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
 87
 88#else /* CONFIG_X86_64 */
 89	const char *cmdline = boot_command_line;
 90	const char *option  = __dis_opt_str;
 91	bool *res = &dis_ucode_ldr;
 92#endif
 93
 94	if (!have_cpuid_p())
 95		return *res;
 
 
 
 
 96
 97	a = 1;
 98	c = 0;
 99	native_cpuid(&a, &b, &c, &d);
 
 
100
101	/*
102	 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
103	 * completely accurate as xen pv guests don't see that CPUID bit set but
104	 * that's good enough as they don't land on the BSP path anyway.
105	 */
106	if (c & BIT(31))
107		return *res;
108
109	if (cmdline_find_option_bool(cmdline, option) <= 0)
110		*res = false;
111
112	return *res;
113}
114
115extern struct builtin_fw __start_builtin_fw[];
116extern struct builtin_fw __end_builtin_fw[];
117
118bool get_builtin_firmware(struct cpio_data *cd, const char *name)
119{
120#ifdef CONFIG_FW_LOADER
121	struct builtin_fw *b_fw;
122
123	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
124		if (!strcmp(name, b_fw->name)) {
125			cd->size = b_fw->size;
126			cd->data = b_fw->data;
127			return true;
128		}
129	}
130#endif
131	return false;
 
 
 
132}
133
134void __init load_ucode_bsp(void)
135{
136	int vendor;
137	unsigned int family;
138
139	if (check_loader_disabled_bsp())
140		return;
141
142	vendor = x86_cpuid_vendor();
143	family = x86_cpuid_family();
144
145	switch (vendor) {
146	case X86_VENDOR_INTEL:
147		if (family >= 6)
148			load_ucode_intel_bsp();
149		break;
 
150	case X86_VENDOR_AMD:
151		if (family >= 0x10)
152			load_ucode_amd_bsp(family);
 
153		break;
 
154	default:
155		break;
156	}
157}
158
159static bool check_loader_disabled_ap(void)
160{
161#ifdef CONFIG_X86_32
162	return *((bool *)__pa_nodebug(&dis_ucode_ldr));
163#else
164	return dis_ucode_ldr;
165#endif
166}
167
168void load_ucode_ap(void)
169{
170	int vendor, family;
171
172	if (check_loader_disabled_ap())
173		return;
174
175	vendor = x86_cpuid_vendor();
176	family = x86_cpuid_family();
177
178	switch (vendor) {
179	case X86_VENDOR_INTEL:
180		if (family >= 6)
181			load_ucode_intel_ap();
182		break;
183	case X86_VENDOR_AMD:
184		if (family >= 0x10)
185			load_ucode_amd_ap(family);
186		break;
187	default:
188		break;
189	}
190}
191
192static int __init save_microcode_in_initrd(void)
193{
194	struct cpuinfo_x86 *c = &boot_cpu_data;
195	int ret = -EINVAL;
196
197	switch (c->x86_vendor) {
198	case X86_VENDOR_INTEL:
199		if (c->x86 >= 6)
200			ret = save_microcode_in_initrd_intel();
201		break;
202	case X86_VENDOR_AMD:
203		if (c->x86 >= 0x10)
204			ret = save_microcode_in_initrd_amd(c->x86);
205		break;
206	default:
207		break;
208	}
209
210	initrd_gone = true;
211
212	return ret;
213}
214
215struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
216{
217#ifdef CONFIG_BLK_DEV_INITRD
218	unsigned long start = 0;
219	size_t size;
220
221#ifdef CONFIG_X86_32
222	struct boot_params *params;
223
224	if (use_pa)
225		params = (struct boot_params *)__pa_nodebug(&boot_params);
226	else
227		params = &boot_params;
228
229	size = params->hdr.ramdisk_size;
230
231	/*
232	 * Set start only if we have an initrd image. We cannot use initrd_start
233	 * because it is not set that early yet.
234	 */
235	if (size)
236		start = params->hdr.ramdisk_image;
237
238# else /* CONFIG_X86_64 */
239	size  = (unsigned long)boot_params.ext_ramdisk_size << 32;
240	size |= boot_params.hdr.ramdisk_size;
241
242	if (size) {
243		start  = (unsigned long)boot_params.ext_ramdisk_image << 32;
244		start |= boot_params.hdr.ramdisk_image;
245
246		start += PAGE_OFFSET;
247	}
248# endif
249
250	/*
251	 * Fixup the start address: after reserve_initrd() runs, initrd_start
252	 * has the virtual address of the beginning of the initrd. It also
253	 * possibly relocates the ramdisk. In either case, initrd_start contains
254	 * the updated address so use that instead.
255	 *
256	 * initrd_gone is for the hotplug case where we've thrown out initrd
257	 * already.
258	 */
259	if (!use_pa) {
260		if (initrd_gone)
261			return (struct cpio_data){ NULL, 0, "" };
262		if (initrd_start)
263			start = initrd_start;
264	}
265
266	return find_cpio_data(path, (void *)start, size, NULL);
267#else /* !CONFIG_BLK_DEV_INITRD */
268	return (struct cpio_data){ NULL, 0, "" };
269#endif
270}
271
272void reload_early_microcode(void)
273{
274	int vendor, family;
275
276	vendor = x86_cpuid_vendor();
277	family = x86_cpuid_family();
278
279	switch (vendor) {
280	case X86_VENDOR_INTEL:
281		if (family >= 6)
282			reload_ucode_intel();
283		break;
284	case X86_VENDOR_AMD:
285		if (family >= 0x10)
286			reload_ucode_amd();
287		break;
288	default:
289		break;
290	}
291}
292
293static void collect_cpu_info_local(void *arg)
294{
295	struct cpu_info_ctx *ctx = arg;
296
297	ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
298						   ctx->cpu_sig);
299}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300
301static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
 
 
 
 
 
 
 
 
 
 
 
 
 
302{
303	struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
304	int ret;
305
306	ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
307	if (!ret)
308		ret = ctx.err;
 
 
309
310	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
311}
312
313static int collect_cpu_info(int cpu)
314{
315	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
316	int ret;
317
318	memset(uci, 0, sizeof(*uci));
 
 
319
320	ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
321	if (!ret)
322		uci->valid = 1;
323
324	return ret;
 
 
 
 
 
 
 
325}
326
327struct apply_microcode_ctx {
328	int err;
329};
330
331static void apply_microcode_local(void *arg)
332{
333	struct apply_microcode_ctx *ctx = arg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
335	ctx->err = microcode_ops->apply_microcode(smp_processor_id());
 
 
336}
337
338static int apply_microcode_on_target(int cpu)
 
 
 
 
339{
340	struct apply_microcode_ctx ctx = { .err = 0 };
341	int ret;
342
343	ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
344	if (!ret)
345		ret = ctx.err;
 
 
 
 
346
347	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
348}
349
350#ifdef CONFIG_MICROCODE_OLD_INTERFACE
351static int do_microcode_update(const void __user *buf, size_t size)
352{
353	int error = 0;
354	int cpu;
 
 
355
356	for_each_online_cpu(cpu) {
357		struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
358		enum ucode_state ustate;
 
 
 
359
360		if (!uci->valid)
361			continue;
 
362
363		ustate = microcode_ops->request_microcode_user(cpu, buf, size);
364		if (ustate == UCODE_ERROR) {
365			error = -1;
366			break;
367		} else if (ustate == UCODE_OK)
368			apply_microcode_on_target(cpu);
369	}
 
 
 
370
371	return error;
 
 
 
372}
373
374static int microcode_open(struct inode *inode, struct file *file)
375{
376	return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
377}
378
379static ssize_t microcode_write(struct file *file, const char __user *buf,
380			       size_t len, loff_t *ppos)
381{
382	ssize_t ret = -EINVAL;
 
383
384	if ((len >> PAGE_SHIFT) > totalram_pages) {
385		pr_err("too much data (max %ld pages)\n", totalram_pages);
386		return ret;
 
 
387	}
 
 
 
388
389	get_online_cpus();
390	mutex_lock(&microcode_mutex);
 
391
392	if (do_microcode_update(buf, len) == 0)
393		ret = (ssize_t)len;
 
394
395	if (ret > 0)
396		perf_check_microcode();
 
 
397
398	mutex_unlock(&microcode_mutex);
399	put_online_cpus();
 
400
401	return ret;
402}
 
403
404static const struct file_operations microcode_fops = {
405	.owner			= THIS_MODULE,
406	.write			= microcode_write,
407	.open			= microcode_open,
408	.llseek		= no_llseek,
409};
410
411static struct miscdevice microcode_dev = {
412	.minor			= MICROCODE_MINOR,
413	.name			= "microcode",
414	.nodename		= "cpu/microcode",
415	.fops			= &microcode_fops,
416};
 
 
 
 
 
 
 
 
417
418static int __init microcode_dev_init(void)
419{
420	int error;
421
422	error = misc_register(&microcode_dev);
423	if (error) {
424		pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
425		return error;
 
 
426	}
427
428	return 0;
 
 
 
 
429}
430
431static void __exit microcode_dev_exit(void)
 
 
 
 
 
 
 
 
 
 
 
 
432{
433	misc_deregister(&microcode_dev);
 
 
 
 
434}
435#else
436#define microcode_dev_init()	0
437#define microcode_dev_exit()	do { } while (0)
438#endif
439
440/* fake device for request_firmware */
441static struct platform_device	*microcode_pdev;
 
 
 
 
 
 
 
 
 
 
442
443static int reload_for_cpu(int cpu)
444{
445	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
446	enum ucode_state ustate;
447	int err = 0;
 
448
449	if (!uci->valid)
450		return err;
 
 
451
452	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
453	if (ustate == UCODE_OK)
454		apply_microcode_on_target(cpu);
455	else
456		if (ustate == UCODE_ERROR)
457			err = -EINVAL;
458	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459}
460
461static ssize_t reload_store(struct device *dev,
462			    struct device_attribute *attr,
463			    const char *buf, size_t size)
464{
465	unsigned long val;
466	int cpu;
467	ssize_t ret = 0, tmp_ret;
468
469	ret = kstrtoul(buf, 0, &val);
470	if (ret)
471		return ret;
472
473	if (val != 1)
474		return size;
475
476	get_online_cpus();
477	mutex_lock(&microcode_mutex);
478	for_each_online_cpu(cpu) {
479		tmp_ret = reload_for_cpu(cpu);
480		if (tmp_ret != 0)
481			pr_warn("Error reloading microcode on CPU %d\n", cpu);
482
483		/* save retval of the first encountered reload error */
484		if (!ret)
485			ret = tmp_ret;
486	}
487	if (!ret)
488		perf_check_microcode();
489	mutex_unlock(&microcode_mutex);
490	put_online_cpus();
491
492	if (!ret)
493		ret = size;
 
494
495	return ret;
496}
497
 
 
 
498static ssize_t version_show(struct device *dev,
499			struct device_attribute *attr, char *buf)
500{
501	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
502
503	return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
504}
505
506static ssize_t pf_show(struct device *dev,
507			struct device_attribute *attr, char *buf)
508{
509	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
510
511	return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
512}
513
514static DEVICE_ATTR(reload, 0200, NULL, reload_store);
515static DEVICE_ATTR(version, 0400, version_show, NULL);
516static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
517
518static struct attribute *mc_default_attrs[] = {
519	&dev_attr_version.attr,
520	&dev_attr_processor_flags.attr,
521	NULL
522};
523
524static struct attribute_group mc_attr_group = {
525	.attrs			= mc_default_attrs,
526	.name			= "microcode",
527};
528
529static void microcode_fini_cpu(int cpu)
530{
531	if (microcode_ops->microcode_fini_cpu)
532		microcode_ops->microcode_fini_cpu(cpu);
533}
534
535static enum ucode_state microcode_resume_cpu(int cpu)
536{
537	if (apply_microcode_on_target(cpu))
538		return UCODE_ERROR;
539
540	pr_debug("CPU%d updated upon resume\n", cpu);
541
542	return UCODE_OK;
543}
544
545static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
546{
547	enum ucode_state ustate;
548	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
549
550	if (uci->valid)
551		return UCODE_OK;
552
553	if (collect_cpu_info(cpu))
554		return UCODE_ERROR;
555
556	/* --dimm. Trigger a delayed update? */
557	if (system_state != SYSTEM_RUNNING)
558		return UCODE_NFOUND;
559
560	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
561						     refresh_fw);
562
563	if (ustate == UCODE_OK) {
564		pr_debug("CPU%d updated upon init\n", cpu);
565		apply_microcode_on_target(cpu);
566	}
567
568	return ustate;
569}
570
571static enum ucode_state microcode_update_cpu(int cpu)
572{
573	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
574
575	/* Refresh CPU microcode revision after resume. */
576	collect_cpu_info(cpu);
577
578	if (uci->valid)
579		return microcode_resume_cpu(cpu);
580
581	return microcode_init_cpu(cpu, false);
582}
583
584static int mc_device_add(struct device *dev, struct subsys_interface *sif)
585{
586	int err, cpu = dev->id;
587
588	if (!cpu_online(cpu))
589		return 0;
590
591	pr_debug("CPU%d added\n", cpu);
592
593	err = sysfs_create_group(&dev->kobj, &mc_attr_group);
594	if (err)
595		return err;
596
597	if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
598		return -EINVAL;
599
600	return err;
601}
602
603static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
604{
605	int cpu = dev->id;
606
607	if (!cpu_online(cpu))
608		return;
609
610	pr_debug("CPU%d removed\n", cpu);
611	microcode_fini_cpu(cpu);
612	sysfs_remove_group(&dev->kobj, &mc_attr_group);
613}
614
615static struct subsys_interface mc_cpu_interface = {
616	.name			= "microcode",
617	.subsys			= &cpu_subsys,
618	.add_dev		= mc_device_add,
619	.remove_dev		= mc_device_remove,
620};
621
622/**
623 * mc_bp_resume - Update boot CPU microcode during resume.
624 */
625static void mc_bp_resume(void)
626{
627	int cpu = smp_processor_id();
628	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
629
630	if (uci->valid && uci->mc)
631		microcode_ops->apply_microcode(cpu);
632	else if (!uci->mc)
633		reload_early_microcode();
634}
635
636static struct syscore_ops mc_syscore_ops = {
637	.resume			= mc_bp_resume,
638};
639
640static int mc_cpu_online(unsigned int cpu)
641{
642	struct device *dev;
 
643
644	dev = get_cpu_device(cpu);
645	microcode_update_cpu(cpu);
646	pr_debug("CPU%d added\n", cpu);
 
 
 
647
648	if (sysfs_create_group(&dev->kobj, &mc_attr_group))
649		pr_err("Failed to create group for CPU%d\n", cpu);
650	return 0;
651}
652
653static int mc_cpu_down_prep(unsigned int cpu)
654{
655	struct device *dev;
656
657	dev = get_cpu_device(cpu);
658	/* Suspend is in progress, only remove the interface */
659	sysfs_remove_group(&dev->kobj, &mc_attr_group);
660	pr_debug("CPU%d removed\n", cpu);
661
662	return 0;
663}
664
665static struct attribute *cpu_root_microcode_attrs[] = {
 
666	&dev_attr_reload.attr,
 
667	NULL
668};
669
670static struct attribute_group cpu_root_microcode_group = {
671	.name  = "microcode",
672	.attrs = cpu_root_microcode_attrs,
673};
674
675int __init microcode_init(void)
676{
 
677	struct cpuinfo_x86 *c = &boot_cpu_data;
678	int error;
679
680	if (dis_ucode_ldr)
681		return -EINVAL;
682
683	if (c->x86_vendor == X86_VENDOR_INTEL)
684		microcode_ops = init_intel_microcode();
685	else if (c->x86_vendor == X86_VENDOR_AMD)
686		microcode_ops = init_amd_microcode();
687	else
688		pr_err("no support for this CPU vendor\n");
689
690	if (!microcode_ops)
691		return -ENODEV;
692
693	microcode_pdev = platform_device_register_simple("microcode", -1,
694							 NULL, 0);
695	if (IS_ERR(microcode_pdev))
696		return PTR_ERR(microcode_pdev);
697
698	get_online_cpus();
699	mutex_lock(&microcode_mutex);
700
701	error = subsys_interface_register(&mc_cpu_interface);
702	if (!error)
703		perf_check_microcode();
704	mutex_unlock(&microcode_mutex);
705	put_online_cpus();
706
707	if (error)
708		goto out_pdev;
709
710	error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
711				   &cpu_root_microcode_group);
 
712
713	if (error) {
714		pr_err("Error creating microcode group!\n");
715		goto out_driver;
 
 
 
 
 
716	}
717
718	error = microcode_dev_init();
719	if (error)
720		goto out_ucode_group;
721
722	register_syscore_ops(&mc_syscore_ops);
723	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
724				  mc_cpu_online, mc_cpu_down_prep);
725
726	pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
727
728	return 0;
729
730 out_ucode_group:
731	sysfs_remove_group(&cpu_subsys.dev_root->kobj,
732			   &cpu_root_microcode_group);
733
734 out_driver:
735	get_online_cpus();
736	mutex_lock(&microcode_mutex);
737
738	subsys_interface_unregister(&mc_cpu_interface);
739
740	mutex_unlock(&microcode_mutex);
741	put_online_cpus();
742
743 out_pdev:
744	platform_device_unregister(microcode_pdev);
745	return error;
746
747}
748fs_initcall(save_microcode_in_initrd);
749late_initcall(microcode_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * CPU Microcode Update Driver for Linux
  4 *
  5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
  6 *	      2006	Shaohua Li <shaohua.li@intel.com>
  7 *	      2013-2016	Borislav Petkov <bp@alien8.de>
  8 *
  9 * X86 CPU microcode early update for Linux:
 10 *
 11 *	Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 12 *			   H Peter Anvin" <hpa@zytor.com>
 13 *		  (C) 2015 Borislav Petkov <bp@alien8.de>
 14 *
 15 * This driver allows to upgrade microcode on x86 processors.
 
 
 
 
 
 16 */
 17
 18#define pr_fmt(fmt) "microcode: " fmt
 19
 20#include <linux/platform_device.h>
 21#include <linux/stop_machine.h>
 22#include <linux/syscore_ops.h>
 23#include <linux/miscdevice.h>
 24#include <linux/capability.h>
 25#include <linux/firmware.h>
 26#include <linux/cpumask.h>
 27#include <linux/kernel.h>
 28#include <linux/delay.h>
 29#include <linux/mutex.h>
 30#include <linux/cpu.h>
 31#include <linux/nmi.h>
 32#include <linux/fs.h>
 33#include <linux/mm.h>
 34
 35#include <asm/apic.h>
 36#include <asm/cpu_device_id.h>
 
 37#include <asm/perf_event.h>
 
 38#include <asm/processor.h>
 39#include <asm/cmdline.h>
 40#include <asm/setup.h>
 41
 42#include "internal.h"
 43
 44static struct microcode_ops	*microcode_ops;
 45bool dis_ucode_ldr = true;
 
 
 46
 47bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
 48module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
 49
 50/*
 51 * Synchronization.
 52 *
 53 * All non cpu-hotplug-callback call sites use:
 54 *
 55 * - cpus_read_lock/unlock() to synchronize with
 
 56 *   the cpu-hotplug-callback call sites.
 57 *
 58 * We guarantee that only a single cpu is being
 59 * updated at any particular moment of time.
 60 */
 
 
 61struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];
 62
 
 
 
 
 63struct cpu_info_ctx {
 64	struct cpu_signature	*cpu_sig;
 65	int			err;
 66};
 67
 68/*
 69 * Those patch levels cannot be updated to newer ones and thus should be final.
 70 */
 71static u32 final_levels[] = {
 72	0x01000098,
 73	0x0100009f,
 74	0x010000af,
 75	0, /* T-101 terminator */
 76};
 77
 78struct early_load_data early_data;
 79
 80/*
 81 * Check the current patch level on this CPU.
 82 *
 83 * Returns:
 84 *  - true: if update should stop
 85 *  - false: otherwise
 86 */
 87static bool amd_check_current_patch_level(void)
 88{
 89	u32 lvl, dummy, i;
 90	u32 *levels;
 91
 92	native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
 
 
 
 93
 94	levels = final_levels;
 
 
 
 
 95
 96	for (i = 0; levels[i]; i++) {
 97		if (lvl == levels[i])
 98			return true;
 99	}
100	return false;
101}
102
103static bool __init check_loader_disabled_bsp(void)
104{
105	static const char *__dis_opt_str = "dis_ucode_ldr";
106	const char *cmdline = boot_command_line;
107	const char *option  = __dis_opt_str;
108
109	/*
110	 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
111	 * completely accurate as xen pv guests don't see that CPUID bit set but
112	 * that's good enough as they don't land on the BSP path anyway.
113	 */
114	if (native_cpuid_ecx(1) & BIT(31))
115		return true;
 
 
 
 
 
 
116
117	if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
118		if (amd_check_current_patch_level())
 
 
 
 
 
 
 
 
 
 
119			return true;
 
120	}
121
122	if (cmdline_find_option_bool(cmdline, option) <= 0)
123		dis_ucode_ldr = false;
124
125	return dis_ucode_ldr;
126}
127
128void __init load_ucode_bsp(void)
129{
130	unsigned int cpuid_1_eax;
131	bool intel = true;
132
133	if (!have_cpuid_p())
134		return;
135
136	cpuid_1_eax = native_cpuid_eax(1);
 
137
138	switch (x86_cpuid_vendor()) {
139	case X86_VENDOR_INTEL:
140		if (x86_family(cpuid_1_eax) < 6)
141			return;
142		break;
143
144	case X86_VENDOR_AMD:
145		if (x86_family(cpuid_1_eax) < 0x10)
146			return;
147		intel = false;
148		break;
149
150	default:
151		return;
152	}
 
153
154	if (check_loader_disabled_bsp())
155		return;
156
157	if (intel)
158		load_ucode_intel_bsp(&early_data);
159	else
160		load_ucode_amd_bsp(&early_data, cpuid_1_eax);
161}
162
163void load_ucode_ap(void)
164{
165	unsigned int cpuid_1_eax;
166
167	if (dis_ucode_ldr)
168		return;
169
170	cpuid_1_eax = native_cpuid_eax(1);
 
171
172	switch (x86_cpuid_vendor()) {
173	case X86_VENDOR_INTEL:
174		if (x86_family(cpuid_1_eax) >= 6)
175			load_ucode_intel_ap();
176		break;
177	case X86_VENDOR_AMD:
178		if (x86_family(cpuid_1_eax) >= 0x10)
179			load_ucode_amd_ap(cpuid_1_eax);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180		break;
181	default:
182		break;
183	}
 
 
 
 
184}
185
186struct cpio_data __init find_microcode_in_initrd(const char *path)
187{
188#ifdef CONFIG_BLK_DEV_INITRD
189	unsigned long start = 0;
190	size_t size;
191
192#ifdef CONFIG_X86_32
193	size = boot_params.hdr.ramdisk_size;
194	/* Early load on BSP has a temporary mapping. */
 
 
 
 
 
 
 
 
 
 
 
195	if (size)
196		start = initrd_start_early;
197
198#else /* CONFIG_X86_64 */
199	size  = (unsigned long)boot_params.ext_ramdisk_size << 32;
200	size |= boot_params.hdr.ramdisk_size;
201
202	if (size) {
203		start  = (unsigned long)boot_params.ext_ramdisk_image << 32;
204		start |= boot_params.hdr.ramdisk_image;
 
205		start += PAGE_OFFSET;
206	}
207#endif
208
209	/*
210	 * Fixup the start address: after reserve_initrd() runs, initrd_start
211	 * has the virtual address of the beginning of the initrd. It also
212	 * possibly relocates the ramdisk. In either case, initrd_start contains
213	 * the updated address so use that instead.
 
 
 
214	 */
215	if (initrd_start)
216		start = initrd_start;
 
 
 
 
217
218	return find_cpio_data(path, (void *)start, size, NULL);
219#else /* !CONFIG_BLK_DEV_INITRD */
220	return (struct cpio_data){ NULL, 0, "" };
221#endif
222}
223
224static void reload_early_microcode(unsigned int cpu)
225{
226	int vendor, family;
227
228	vendor = x86_cpuid_vendor();
229	family = x86_cpuid_family();
230
231	switch (vendor) {
232	case X86_VENDOR_INTEL:
233		if (family >= 6)
234			reload_ucode_intel();
235		break;
236	case X86_VENDOR_AMD:
237		if (family >= 0x10)
238			reload_ucode_amd(cpu);
239		break;
240	default:
241		break;
242	}
243}
244
245/* fake device for request_firmware */
246static struct platform_device	*microcode_pdev;
 
247
248#ifdef CONFIG_MICROCODE_LATE_LOADING
249/*
250 * Late loading dance. Why the heavy-handed stomp_machine effort?
251 *
252 * - HT siblings must be idle and not execute other code while the other sibling
253 *   is loading microcode in order to avoid any negative interactions caused by
254 *   the loading.
255 *
256 * - In addition, microcode update on the cores must be serialized until this
257 *   requirement can be relaxed in the future. Right now, this is conservative
258 *   and good.
259 */
260enum sibling_ctrl {
261	/* Spinwait with timeout */
262	SCTRL_WAIT,
263	/* Invoke the microcode_apply() callback */
264	SCTRL_APPLY,
265	/* Proceed without invoking the microcode_apply() callback */
266	SCTRL_DONE,
267};
268
269struct microcode_ctrl {
270	enum sibling_ctrl	ctrl;
271	enum ucode_state	result;
272	unsigned int		ctrl_cpu;
273	bool			nmi_enabled;
274};
275
276DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
277static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
278static atomic_t late_cpus_in, offline_in_nmi;
279static unsigned int loops_per_usec;
280static cpumask_t cpu_offline_mask;
281
282static noinstr bool wait_for_cpus(atomic_t *cnt)
283{
284	unsigned int timeout, loops;
 
285
286	WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0);
287
288	for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
289		if (!raw_atomic_read(cnt))
290			return true;
291
292		for (loops = 0; loops < loops_per_usec; loops++)
293			cpu_relax();
294
295		/* If invoked directly, tickle the NMI watchdog */
296		if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
297			instrumentation_begin();
298			touch_nmi_watchdog();
299			instrumentation_end();
300		}
301	}
302	/* Prevent the late comers from making progress and let them time out */
303	raw_atomic_inc(cnt);
304	return false;
305}
306
307static noinstr bool wait_for_ctrl(void)
308{
309	unsigned int timeout, loops;
 
310
311	for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
312		if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
313			return true;
314
315		for (loops = 0; loops < loops_per_usec; loops++)
316			cpu_relax();
 
317
318		/* If invoked directly, tickle the NMI watchdog */
319		if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
320			instrumentation_begin();
321			touch_nmi_watchdog();
322			instrumentation_end();
323		}
324	}
325	return false;
326}
327
328/*
329 * Protected against instrumentation up to the point where the primary
330 * thread completed the update. See microcode_nmi_handler() for details.
331 */
332static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
333{
334	/* Initial rendezvous to ensure that all CPUs have arrived */
335	if (!wait_for_cpus(&late_cpus_in)) {
336		raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
337		return false;
338	}
339
340	/*
341	 * Wait for primary threads to complete. If one of them hangs due
342	 * to the update, there is no way out. This is non-recoverable
343	 * because the CPU might hold locks or resources and confuse the
344	 * scheduler, watchdogs etc. There is no way to safely evacuate the
345	 * machine.
346	 */
347	if (wait_for_ctrl())
348		return true;
349
350	instrumentation_begin();
351	panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu);
352	instrumentation_end();
353}
354
355/*
356 * Protected against instrumentation up to the point where the primary
357 * thread completed the update. See microcode_nmi_handler() for details.
358 */
359static noinstr void load_secondary(unsigned int cpu)
360{
361	unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu);
362	enum ucode_state ret;
363
364	if (!load_secondary_wait(ctrl_cpu)) {
365		instrumentation_begin();
366		pr_err_once("load: %d CPUs timed out\n",
367			    atomic_read(&late_cpus_in) - 1);
368		instrumentation_end();
369		return;
370	}
371
372	/* Primary thread completed. Allow to invoke instrumentable code */
373	instrumentation_begin();
374	/*
375	 * If the primary succeeded then invoke the apply() callback,
376	 * otherwise copy the state from the primary thread.
377	 */
378	if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
379		ret = microcode_ops->apply_microcode(cpu);
380	else
381		ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
382
383	this_cpu_write(ucode_ctrl.result, ret);
384	this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
385	instrumentation_end();
386}
387
388static void __load_primary(unsigned int cpu)
 
389{
390	struct cpumask *secondaries = topology_sibling_cpumask(cpu);
391	enum sibling_ctrl ctrl;
392	enum ucode_state ret;
393	unsigned int sibling;
394
395	/* Initial rendezvous to ensure that all CPUs have arrived */
396	if (!wait_for_cpus(&late_cpus_in)) {
397		this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
398		pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1);
399		return;
400	}
401
402	ret = microcode_ops->apply_microcode(cpu);
403	this_cpu_write(ucode_ctrl.result, ret);
404	this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
405
406	/*
407	 * If the update was successful, let the siblings run the apply()
408	 * callback. If not, tell them it's done. This also covers the
409	 * case where the CPU has uniform loading at package or system
410	 * scope implemented but does not advertise it.
411	 */
412	if (ret == UCODE_UPDATED || ret == UCODE_OK)
413		ctrl = SCTRL_APPLY;
414	else
415		ctrl = SCTRL_DONE;
416
417	for_each_cpu(sibling, secondaries) {
418		if (sibling != cpu)
419			per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
420	}
421}
422
423static bool kick_offline_cpus(unsigned int nr_offl)
424{
425	unsigned int cpu, timeout;
 
426
427	for_each_cpu(cpu, &cpu_offline_mask) {
428		/* Enable the rendezvous handler and send NMI */
429		per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
430		apic_send_nmi_to_offline_cpu(cpu);
431	}
432
433	/* Wait for them to arrive */
434	for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) {
435		if (atomic_read(&offline_in_nmi) == nr_offl)
436			return true;
437		udelay(1);
438	}
439	/* Let the others time out */
440	return false;
441}
442
443static void release_offline_cpus(void)
444{
445	unsigned int cpu;
446
447	for_each_cpu(cpu, &cpu_offline_mask)
448		per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
449}
450
451static void load_primary(unsigned int cpu)
452{
453	unsigned int nr_offl = cpumask_weight(&cpu_offline_mask);
454	bool proceed = true;
455
456	/* Kick soft-offlined SMT siblings if required */
457	if (!cpu && nr_offl)
458		proceed = kick_offline_cpus(nr_offl);
459
460	/* If the soft-offlined CPUs did not respond, abort */
461	if (proceed)
462		__load_primary(cpu);
463
464	/* Unconditionally release soft-offlined SMT siblings if required */
465	if (!cpu && nr_offl)
466		release_offline_cpus();
467}
 
 
468
469/*
470 * Minimal stub rendezvous handler for soft-offlined CPUs which participate
471 * in the NMI rendezvous to protect against a concurrent NMI on affected
472 * CPUs.
473 */
474void noinstr microcode_offline_nmi_handler(void)
475{
476	if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
477		return;
478	raw_cpu_write(ucode_ctrl.nmi_enabled, false);
479	raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
480	raw_atomic_inc(&offline_in_nmi);
481	wait_for_ctrl();
482}
483
484static noinstr bool microcode_update_handler(void)
485{
486	unsigned int cpu = raw_smp_processor_id();
487
488	if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
489		instrumentation_begin();
490		load_primary(cpu);
491		instrumentation_end();
492	} else {
493		load_secondary(cpu);
494	}
495
496	instrumentation_begin();
497	touch_nmi_watchdog();
498	instrumentation_end();
499
500	return true;
501}
502
503/*
504 * Protection against instrumentation is required for CPUs which are not
505 * safe against an NMI which is delivered to the secondary SMT sibling
506 * while the primary thread updates the microcode. Instrumentation can end
507 * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
508 * which is the opposite of what the NMI rendezvous is trying to achieve.
509 *
510 * The primary thread is safe versus instrumentation as the actual
511 * microcode update handles this correctly. It's only the sibling code
512 * path which must be NMI safe until the primary thread completed the
513 * update.
514 */
515bool noinstr microcode_nmi_handler(void)
516{
517	if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
518		return false;
519
520	raw_cpu_write(ucode_ctrl.nmi_enabled, false);
521	return microcode_update_handler();
522}
 
 
 
 
523
524static int load_cpus_stopped(void *unused)
525{
526	if (microcode_ops->use_nmi) {
527		/* Enable the NMI handler and raise NMI */
528		this_cpu_write(ucode_ctrl.nmi_enabled, true);
529		apic->send_IPI(smp_processor_id(), NMI_VECTOR);
530	} else {
531		/* Just invoke the handler directly */
532		microcode_update_handler();
533	}
534	return 0;
535}
536
537static int load_late_stop_cpus(bool is_safe)
538{
539	unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
540	unsigned int nr_offl, offline = 0;
541	int old_rev = boot_cpu_data.microcode;
542	struct cpuinfo_x86 prev_info;
543
544	if (!is_safe) {
545		pr_err("Late microcode loading without minimal revision check.\n");
546		pr_err("You should switch to early loading, if possible.\n");
547	}
548
549	atomic_set(&late_cpus_in, num_online_cpus());
550	atomic_set(&offline_in_nmi, 0);
551	loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000);
552
553	/*
554	 * Take a snapshot before the microcode update in order to compare and
555	 * check whether any bits changed after an update.
556	 */
557	store_cpu_caps(&prev_info);
558
559	if (microcode_ops->use_nmi)
560		static_branch_enable_cpuslocked(&microcode_nmi_handler_enable);
561
562	stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
563
564	if (microcode_ops->use_nmi)
565		static_branch_disable_cpuslocked(&microcode_nmi_handler_enable);
566
567	/* Analyze the results */
568	for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
569		switch (per_cpu(ucode_ctrl.result, cpu)) {
570		case UCODE_UPDATED:	updated++; break;
571		case UCODE_TIMEOUT:	timedout++; break;
572		case UCODE_OK:		siblings++; break;
573		case UCODE_OFFLINE:	offline++; break;
574		default:		failed++; break;
575		}
576	}
577
578	if (microcode_ops->finalize_late_load)
579		microcode_ops->finalize_late_load(!updated);
580
581	if (!updated) {
582		/* Nothing changed. */
583		if (!failed && !timedout)
584			return 0;
585
586		nr_offl = cpumask_weight(&cpu_offline_mask);
587		if (offline < nr_offl) {
588			pr_warn("%u offline siblings did not respond.\n",
589				nr_offl - atomic_read(&offline_in_nmi));
590			return -EIO;
591		}
592		pr_err("update failed: %u CPUs failed %u CPUs timed out\n",
593		       failed, timedout);
594		return -EIO;
595	}
596
597	if (!is_safe || failed || timedout)
598		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
599
600	pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
601	if (failed || timedout) {
602		pr_err("load incomplete. %u CPUs timed out or failed\n",
603		       num_online_cpus() - (updated + siblings));
604	}
605	pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode);
606	microcode_check(&prev_info);
607
608	return updated + siblings == num_online_cpus() ? 0 : -EIO;
609}
610
611/*
612 * This function does two things:
613 *
614 * 1) Ensure that all required CPUs which are present and have been booted
615 *    once are online.
616 *
617 *    To pass this check, all primary threads must be online.
618 *
619 *    If the microcode load is not safe against NMI then all SMT threads
620 *    must be online as well because they still react to NMIs when they are
621 *    soft-offlined and parked in one of the play_dead() variants. So if a
622 *    NMI hits while the primary thread updates the microcode the resulting
623 *    behaviour is undefined. The default play_dead() implementation on
624 *    modern CPUs uses MWAIT, which is also not guaranteed to be safe
625 *    against a microcode update which affects MWAIT.
626 *
627 *    As soft-offlined CPUs still react on NMIs, the SMT sibling
628 *    restriction can be lifted when the vendor driver signals to use NMI
629 *    for rendezvous and the APIC provides a mechanism to send an NMI to a
630 *    soft-offlined CPU. The soft-offlined CPUs are then able to
631 *    participate in the rendezvous in a trivial stub handler.
632 *
633 * 2) Initialize the per CPU control structure and create a cpumask
634 *    which contains "offline"; secondary threads, so they can be handled
635 *    correctly by a control CPU.
636 */
637static bool setup_cpus(void)
638{
639	struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, };
640	bool allow_smt_offline;
641	unsigned int cpu;
642
643	allow_smt_offline = microcode_ops->nmi_safe ||
644		(microcode_ops->use_nmi && apic->nmi_to_offline_cpu);
645
646	cpumask_clear(&cpu_offline_mask);
647
648	for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
649		/*
650		 * Offline CPUs sit in one of the play_dead() functions
651		 * with interrupts disabled, but they still react on NMIs
652		 * and execute arbitrary code. Also MWAIT being updated
653		 * while the offline CPU sits there is not necessarily safe
654		 * on all CPU variants.
655		 *
656		 * Mark them in the offline_cpus mask which will be handled
657		 * by CPU0 later in the update process.
658		 *
659		 * Ensure that the primary thread is online so that it is
660		 * guaranteed that all cores are updated.
661		 */
662		if (!cpu_online(cpu)) {
663			if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
664				pr_err("CPU %u not online, loading aborted\n", cpu);
665				return false;
666			}
667			cpumask_set_cpu(cpu, &cpu_offline_mask);
668			per_cpu(ucode_ctrl, cpu) = ctrl;
669			continue;
670		}
671
672		/*
673		 * Initialize the per CPU state. This is core scope for now,
674		 * but prepared to take package or system scope into account.
675		 */
676		ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
677		per_cpu(ucode_ctrl, cpu) = ctrl;
678	}
679	return true;
680}
681
682static int load_late_locked(void)
683{
684	if (!setup_cpus())
685		return -EBUSY;
686
687	switch (microcode_ops->request_microcode_fw(0, &microcode_pdev->dev)) {
688	case UCODE_NEW:
689		return load_late_stop_cpus(false);
690	case UCODE_NEW_SAFE:
691		return load_late_stop_cpus(true);
692	case UCODE_NFOUND:
693		return -ENOENT;
694	default:
695		return -EBADFD;
696	}
697}
698
699static ssize_t reload_store(struct device *dev,
700			    struct device_attribute *attr,
701			    const char *buf, size_t size)
702{
703	unsigned long val;
704	ssize_t ret;
 
705
706	ret = kstrtoul(buf, 0, &val);
707	if (ret || val != 1)
708		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709
710	cpus_read_lock();
711	ret = load_late_locked();
712	cpus_read_unlock();
713
714	return ret ? : size;
715}
716
717static DEVICE_ATTR_WO(reload);
718#endif
719
720static ssize_t version_show(struct device *dev,
721			struct device_attribute *attr, char *buf)
722{
723	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
724
725	return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
726}
727
728static ssize_t processor_flags_show(struct device *dev,
729			struct device_attribute *attr, char *buf)
730{
731	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
732
733	return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
734}
735
736static DEVICE_ATTR_RO(version);
737static DEVICE_ATTR_RO(processor_flags);
 
738
739static struct attribute *mc_default_attrs[] = {
740	&dev_attr_version.attr,
741	&dev_attr_processor_flags.attr,
742	NULL
743};
744
745static const struct attribute_group mc_attr_group = {
746	.attrs			= mc_default_attrs,
747	.name			= "microcode",
748};
749
750static void microcode_fini_cpu(int cpu)
751{
752	if (microcode_ops->microcode_fini_cpu)
753		microcode_ops->microcode_fini_cpu(cpu);
754}
755
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
756/**
757 * microcode_bsp_resume - Update boot CPU microcode during resume.
758 */
759void microcode_bsp_resume(void)
760{
761	int cpu = smp_processor_id();
762	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
763
764	if (uci->mc)
765		microcode_ops->apply_microcode(cpu);
766	else
767		reload_early_microcode(cpu);
768}
769
770static struct syscore_ops mc_syscore_ops = {
771	.resume	= microcode_bsp_resume,
772};
773
774static int mc_cpu_online(unsigned int cpu)
775{
776	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
777	struct device *dev = get_cpu_device(cpu);
778
779	memset(uci, 0, sizeof(*uci));
780
781	microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
782	cpu_data(cpu).microcode = uci->cpu_sig.rev;
783	if (!cpu)
784		boot_cpu_data.microcode = uci->cpu_sig.rev;
785
786	if (sysfs_create_group(&dev->kobj, &mc_attr_group))
787		pr_err("Failed to create group for CPU%d\n", cpu);
788	return 0;
789}
790
791static int mc_cpu_down_prep(unsigned int cpu)
792{
793	struct device *dev = get_cpu_device(cpu);
794
795	microcode_fini_cpu(cpu);
 
796	sysfs_remove_group(&dev->kobj, &mc_attr_group);
 
 
797	return 0;
798}
799
800static struct attribute *cpu_root_microcode_attrs[] = {
801#ifdef CONFIG_MICROCODE_LATE_LOADING
802	&dev_attr_reload.attr,
803#endif
804	NULL
805};
806
807static const struct attribute_group cpu_root_microcode_group = {
808	.name  = "microcode",
809	.attrs = cpu_root_microcode_attrs,
810};
811
812static int __init microcode_init(void)
813{
814	struct device *dev_root;
815	struct cpuinfo_x86 *c = &boot_cpu_data;
816	int error;
817
818	if (dis_ucode_ldr)
819		return -EINVAL;
820
821	if (c->x86_vendor == X86_VENDOR_INTEL)
822		microcode_ops = init_intel_microcode();
823	else if (c->x86_vendor == X86_VENDOR_AMD)
824		microcode_ops = init_amd_microcode();
825	else
826		pr_err("no support for this CPU vendor\n");
827
828	if (!microcode_ops)
829		return -ENODEV;
830
831	pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
 
 
 
 
 
 
 
 
 
 
 
 
832
833	if (early_data.new_rev)
834		pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
835
836	microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
837	if (IS_ERR(microcode_pdev))
838		return PTR_ERR(microcode_pdev);
839
840	dev_root = bus_get_dev_root(&cpu_subsys);
841	if (dev_root) {
842		error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group);
843		put_device(dev_root);
844		if (error) {
845			pr_err("Error creating microcode group!\n");
846			goto out_pdev;
847		}
848	}
849
 
 
 
 
850	register_syscore_ops(&mc_syscore_ops);
851	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
852			  mc_cpu_online, mc_cpu_down_prep);
 
 
853
854	return 0;
855
 
 
 
 
 
 
 
 
 
 
 
 
 
856 out_pdev:
857	platform_device_unregister(microcode_pdev);
858	return error;
859
860}
 
861late_initcall(microcode_init);