Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Intel CPU Microcode Update Driver for Linux
4 *
5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
6 * 2006 Shaohua Li <shaohua.li@intel.com>
7 *
8 * Intel CPU microcode early update for Linux
9 *
10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
11 * H Peter Anvin" <hpa@zytor.com>
12 */
13
14/*
15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
16 * printk calls into no_printk().
17 *
18 *#define DEBUG
19 */
20#define pr_fmt(fmt) "microcode: " fmt
21
22#include <linux/earlycpio.h>
23#include <linux/firmware.h>
24#include <linux/uaccess.h>
25#include <linux/vmalloc.h>
26#include <linux/initrd.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/cpu.h>
30#include <linux/uio.h>
31#include <linux/mm.h>
32
33#include <asm/microcode_intel.h>
34#include <asm/intel-family.h>
35#include <asm/processor.h>
36#include <asm/tlbflush.h>
37#include <asm/setup.h>
38#include <asm/msr.h>
39
40static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
41
42/* Current microcode patch used in early patching on the APs. */
43static struct microcode_intel *intel_ucode_patch;
44
45/* last level cache size per core */
46static int llc_size_per_core;
47
48static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
49 unsigned int s2, unsigned int p2)
50{
51 if (s1 != s2)
52 return false;
53
54 /* Processor flags are either both 0 ... */
55 if (!p1 && !p2)
56 return true;
57
58 /* ... or they intersect. */
59 return p1 & p2;
60}
61
62/*
63 * Returns 1 if update has been found, 0 otherwise.
64 */
65static int find_matching_signature(void *mc, unsigned int csig, int cpf)
66{
67 struct microcode_header_intel *mc_hdr = mc;
68 struct extended_sigtable *ext_hdr;
69 struct extended_signature *ext_sig;
70 int i;
71
72 if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
73 return 1;
74
75 /* Look for ext. headers: */
76 if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
77 return 0;
78
79 ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
80 ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
81
82 for (i = 0; i < ext_hdr->count; i++) {
83 if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
84 return 1;
85 ext_sig++;
86 }
87 return 0;
88}
89
90/*
91 * Returns 1 if update has been found, 0 otherwise.
92 */
93static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
94{
95 struct microcode_header_intel *mc_hdr = mc;
96
97 if (mc_hdr->rev <= new_rev)
98 return 0;
99
100 return find_matching_signature(mc, csig, cpf);
101}
102
103/*
104 * Given CPU signature and a microcode patch, this function finds if the
105 * microcode patch has matching family and model with the CPU.
106 *
107 * %true - if there's a match
108 * %false - otherwise
109 */
110static bool microcode_matches(struct microcode_header_intel *mc_header,
111 unsigned long sig)
112{
113 unsigned long total_size = get_totalsize(mc_header);
114 unsigned long data_size = get_datasize(mc_header);
115 struct extended_sigtable *ext_header;
116 unsigned int fam_ucode, model_ucode;
117 struct extended_signature *ext_sig;
118 unsigned int fam, model;
119 int ext_sigcount, i;
120
121 fam = x86_family(sig);
122 model = x86_model(sig);
123
124 fam_ucode = x86_family(mc_header->sig);
125 model_ucode = x86_model(mc_header->sig);
126
127 if (fam == fam_ucode && model == model_ucode)
128 return true;
129
130 /* Look for ext. headers: */
131 if (total_size <= data_size + MC_HEADER_SIZE)
132 return false;
133
134 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
135 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
136 ext_sigcount = ext_header->count;
137
138 for (i = 0; i < ext_sigcount; i++) {
139 fam_ucode = x86_family(ext_sig->sig);
140 model_ucode = x86_model(ext_sig->sig);
141
142 if (fam == fam_ucode && model == model_ucode)
143 return true;
144
145 ext_sig++;
146 }
147 return false;
148}
149
150static struct ucode_patch *memdup_patch(void *data, unsigned int size)
151{
152 struct ucode_patch *p;
153
154 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
155 if (!p)
156 return NULL;
157
158 p->data = kmemdup(data, size, GFP_KERNEL);
159 if (!p->data) {
160 kfree(p);
161 return NULL;
162 }
163
164 return p;
165}
166
167static void save_microcode_patch(void *data, unsigned int size)
168{
169 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
170 struct ucode_patch *iter, *tmp, *p = NULL;
171 bool prev_found = false;
172 unsigned int sig, pf;
173
174 mc_hdr = (struct microcode_header_intel *)data;
175
176 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) {
177 mc_saved_hdr = (struct microcode_header_intel *)iter->data;
178 sig = mc_saved_hdr->sig;
179 pf = mc_saved_hdr->pf;
180
181 if (find_matching_signature(data, sig, pf)) {
182 prev_found = true;
183
184 if (mc_hdr->rev <= mc_saved_hdr->rev)
185 continue;
186
187 p = memdup_patch(data, size);
188 if (!p)
189 pr_err("Error allocating buffer %p\n", data);
190 else {
191 list_replace(&iter->plist, &p->plist);
192 kfree(iter->data);
193 kfree(iter);
194 }
195 }
196 }
197
198 /*
199 * There weren't any previous patches found in the list cache; save the
200 * newly found.
201 */
202 if (!prev_found) {
203 p = memdup_patch(data, size);
204 if (!p)
205 pr_err("Error allocating buffer for %p\n", data);
206 else
207 list_add_tail(&p->plist, µcode_cache);
208 }
209
210 if (!p)
211 return;
212
213 /*
214 * Save for early loading. On 32-bit, that needs to be a physical
215 * address as the APs are running from physical addresses, before
216 * paging has been enabled.
217 */
218 if (IS_ENABLED(CONFIG_X86_32))
219 intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
220 else
221 intel_ucode_patch = p->data;
222}
223
224static int microcode_sanity_check(void *mc, int print_err)
225{
226 unsigned long total_size, data_size, ext_table_size;
227 struct microcode_header_intel *mc_header = mc;
228 struct extended_sigtable *ext_header = NULL;
229 u32 sum, orig_sum, ext_sigcount = 0, i;
230 struct extended_signature *ext_sig;
231
232 total_size = get_totalsize(mc_header);
233 data_size = get_datasize(mc_header);
234
235 if (data_size + MC_HEADER_SIZE > total_size) {
236 if (print_err)
237 pr_err("Error: bad microcode data file size.\n");
238 return -EINVAL;
239 }
240
241 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
242 if (print_err)
243 pr_err("Error: invalid/unknown microcode update format.\n");
244 return -EINVAL;
245 }
246
247 ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
248 if (ext_table_size) {
249 u32 ext_table_sum = 0;
250 u32 *ext_tablep;
251
252 if ((ext_table_size < EXT_HEADER_SIZE)
253 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
254 if (print_err)
255 pr_err("Error: truncated extended signature table.\n");
256 return -EINVAL;
257 }
258
259 ext_header = mc + MC_HEADER_SIZE + data_size;
260 if (ext_table_size != exttable_size(ext_header)) {
261 if (print_err)
262 pr_err("Error: extended signature table size mismatch.\n");
263 return -EFAULT;
264 }
265
266 ext_sigcount = ext_header->count;
267
268 /*
269 * Check extended table checksum: the sum of all dwords that
270 * comprise a valid table must be 0.
271 */
272 ext_tablep = (u32 *)ext_header;
273
274 i = ext_table_size / sizeof(u32);
275 while (i--)
276 ext_table_sum += ext_tablep[i];
277
278 if (ext_table_sum) {
279 if (print_err)
280 pr_warn("Bad extended signature table checksum, aborting.\n");
281 return -EINVAL;
282 }
283 }
284
285 /*
286 * Calculate the checksum of update data and header. The checksum of
287 * valid update data and header including the extended signature table
288 * must be 0.
289 */
290 orig_sum = 0;
291 i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
292 while (i--)
293 orig_sum += ((u32 *)mc)[i];
294
295 if (orig_sum) {
296 if (print_err)
297 pr_err("Bad microcode data checksum, aborting.\n");
298 return -EINVAL;
299 }
300
301 if (!ext_table_size)
302 return 0;
303
304 /*
305 * Check extended signature checksum: 0 => valid.
306 */
307 for (i = 0; i < ext_sigcount; i++) {
308 ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
309 EXT_SIGNATURE_SIZE * i;
310
311 sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
312 (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
313 if (sum) {
314 if (print_err)
315 pr_err("Bad extended signature checksum, aborting.\n");
316 return -EINVAL;
317 }
318 }
319 return 0;
320}
321
322/*
323 * Get microcode matching with BSP's model. Only CPUs with the same model as
324 * BSP can stay in the platform.
325 */
326static struct microcode_intel *
327scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
328{
329 struct microcode_header_intel *mc_header;
330 struct microcode_intel *patch = NULL;
331 unsigned int mc_size;
332
333 while (size) {
334 if (size < sizeof(struct microcode_header_intel))
335 break;
336
337 mc_header = (struct microcode_header_intel *)data;
338
339 mc_size = get_totalsize(mc_header);
340 if (!mc_size ||
341 mc_size > size ||
342 microcode_sanity_check(data, 0) < 0)
343 break;
344
345 size -= mc_size;
346
347 if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
348 data += mc_size;
349 continue;
350 }
351
352 if (save) {
353 save_microcode_patch(data, mc_size);
354 goto next;
355 }
356
357
358 if (!patch) {
359 if (!has_newer_microcode(data,
360 uci->cpu_sig.sig,
361 uci->cpu_sig.pf,
362 uci->cpu_sig.rev))
363 goto next;
364
365 } else {
366 struct microcode_header_intel *phdr = &patch->hdr;
367
368 if (!has_newer_microcode(data,
369 phdr->sig,
370 phdr->pf,
371 phdr->rev))
372 goto next;
373 }
374
375 /* We have a newer patch, save it. */
376 patch = data;
377
378next:
379 data += mc_size;
380 }
381
382 if (size)
383 return NULL;
384
385 return patch;
386}
387
388static int collect_cpu_info_early(struct ucode_cpu_info *uci)
389{
390 unsigned int val[2];
391 unsigned int family, model;
392 struct cpu_signature csig = { 0 };
393 unsigned int eax, ebx, ecx, edx;
394
395 memset(uci, 0, sizeof(*uci));
396
397 eax = 0x00000001;
398 ecx = 0;
399 native_cpuid(&eax, &ebx, &ecx, &edx);
400 csig.sig = eax;
401
402 family = x86_family(eax);
403 model = x86_model(eax);
404
405 if ((model >= 5) || (family > 6)) {
406 /* get processor flags from MSR 0x17 */
407 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
408 csig.pf = 1 << ((val[1] >> 18) & 7);
409 }
410
411 csig.rev = intel_get_microcode_revision();
412
413 uci->cpu_sig = csig;
414 uci->valid = 1;
415
416 return 0;
417}
418
419static void show_saved_mc(void)
420{
421#ifdef DEBUG
422 int i = 0, j;
423 unsigned int sig, pf, rev, total_size, data_size, date;
424 struct ucode_cpu_info uci;
425 struct ucode_patch *p;
426
427 if (list_empty(µcode_cache)) {
428 pr_debug("no microcode data saved.\n");
429 return;
430 }
431
432 collect_cpu_info_early(&uci);
433
434 sig = uci.cpu_sig.sig;
435 pf = uci.cpu_sig.pf;
436 rev = uci.cpu_sig.rev;
437 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
438
439 list_for_each_entry(p, µcode_cache, plist) {
440 struct microcode_header_intel *mc_saved_header;
441 struct extended_sigtable *ext_header;
442 struct extended_signature *ext_sig;
443 int ext_sigcount;
444
445 mc_saved_header = (struct microcode_header_intel *)p->data;
446
447 sig = mc_saved_header->sig;
448 pf = mc_saved_header->pf;
449 rev = mc_saved_header->rev;
450 date = mc_saved_header->date;
451
452 total_size = get_totalsize(mc_saved_header);
453 data_size = get_datasize(mc_saved_header);
454
455 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
456 i++, sig, pf, rev, total_size,
457 date & 0xffff,
458 date >> 24,
459 (date >> 16) & 0xff);
460
461 /* Look for ext. headers: */
462 if (total_size <= data_size + MC_HEADER_SIZE)
463 continue;
464
465 ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
466 ext_sigcount = ext_header->count;
467 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
468
469 for (j = 0; j < ext_sigcount; j++) {
470 sig = ext_sig->sig;
471 pf = ext_sig->pf;
472
473 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
474 j, sig, pf);
475
476 ext_sig++;
477 }
478 }
479#endif
480}
481
482/*
483 * Save this microcode patch. It will be loaded early when a CPU is
484 * hot-added or resumes.
485 */
486static void save_mc_for_early(u8 *mc, unsigned int size)
487{
488 /* Synchronization during CPU hotplug. */
489 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
490
491 mutex_lock(&x86_cpu_microcode_mutex);
492
493 save_microcode_patch(mc, size);
494 show_saved_mc();
495
496 mutex_unlock(&x86_cpu_microcode_mutex);
497}
498
499static bool load_builtin_intel_microcode(struct cpio_data *cp)
500{
501 unsigned int eax = 1, ebx, ecx = 0, edx;
502 char name[30];
503
504 if (IS_ENABLED(CONFIG_X86_32))
505 return false;
506
507 native_cpuid(&eax, &ebx, &ecx, &edx);
508
509 sprintf(name, "intel-ucode/%02x-%02x-%02x",
510 x86_family(eax), x86_model(eax), x86_stepping(eax));
511
512 return get_builtin_firmware(cp, name);
513}
514
515/*
516 * Print ucode update info.
517 */
518static void
519print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
520{
521 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
522 uci->cpu_sig.rev,
523 date & 0xffff,
524 date >> 24,
525 (date >> 16) & 0xff);
526}
527
528#ifdef CONFIG_X86_32
529
530static int delay_ucode_info;
531static int current_mc_date;
532
533/*
534 * Print early updated ucode info after printk works. This is delayed info dump.
535 */
536void show_ucode_info_early(void)
537{
538 struct ucode_cpu_info uci;
539
540 if (delay_ucode_info) {
541 collect_cpu_info_early(&uci);
542 print_ucode_info(&uci, current_mc_date);
543 delay_ucode_info = 0;
544 }
545}
546
547/*
548 * At this point, we can not call printk() yet. Delay printing microcode info in
549 * show_ucode_info_early() until printk() works.
550 */
551static void print_ucode(struct ucode_cpu_info *uci)
552{
553 struct microcode_intel *mc;
554 int *delay_ucode_info_p;
555 int *current_mc_date_p;
556
557 mc = uci->mc;
558 if (!mc)
559 return;
560
561 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
562 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
563
564 *delay_ucode_info_p = 1;
565 *current_mc_date_p = mc->hdr.date;
566}
567#else
568
569static inline void print_ucode(struct ucode_cpu_info *uci)
570{
571 struct microcode_intel *mc;
572
573 mc = uci->mc;
574 if (!mc)
575 return;
576
577 print_ucode_info(uci, mc->hdr.date);
578}
579#endif
580
581static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
582{
583 struct microcode_intel *mc;
584 u32 rev;
585
586 mc = uci->mc;
587 if (!mc)
588 return 0;
589
590 /*
591 * Save us the MSR write below - which is a particular expensive
592 * operation - when the other hyperthread has updated the microcode
593 * already.
594 */
595 rev = intel_get_microcode_revision();
596 if (rev >= mc->hdr.rev) {
597 uci->cpu_sig.rev = rev;
598 return UCODE_OK;
599 }
600
601 /*
602 * Writeback and invalidate caches before updating microcode to avoid
603 * internal issues depending on what the microcode is updating.
604 */
605 native_wbinvd();
606
607 /* write microcode via MSR 0x79 */
608 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
609
610 rev = intel_get_microcode_revision();
611 if (rev != mc->hdr.rev)
612 return -1;
613
614 uci->cpu_sig.rev = rev;
615
616 if (early)
617 print_ucode(uci);
618 else
619 print_ucode_info(uci, mc->hdr.date);
620
621 return 0;
622}
623
624int __init save_microcode_in_initrd_intel(void)
625{
626 struct ucode_cpu_info uci;
627 struct cpio_data cp;
628
629 /*
630 * initrd is going away, clear patch ptr. We will scan the microcode one
631 * last time before jettisoning and save a patch, if found. Then we will
632 * update that pointer too, with a stable patch address to use when
633 * resuming the cores.
634 */
635 intel_ucode_patch = NULL;
636
637 if (!load_builtin_intel_microcode(&cp))
638 cp = find_microcode_in_initrd(ucode_path, false);
639
640 if (!(cp.data && cp.size))
641 return 0;
642
643 collect_cpu_info_early(&uci);
644
645 scan_microcode(cp.data, cp.size, &uci, true);
646
647 show_saved_mc();
648
649 return 0;
650}
651
652/*
653 * @res_patch, output: a pointer to the patch we found.
654 */
655static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
656{
657 static const char *path;
658 struct cpio_data cp;
659 bool use_pa;
660
661 if (IS_ENABLED(CONFIG_X86_32)) {
662 path = (const char *)__pa_nodebug(ucode_path);
663 use_pa = true;
664 } else {
665 path = ucode_path;
666 use_pa = false;
667 }
668
669 /* try built-in microcode first */
670 if (!load_builtin_intel_microcode(&cp))
671 cp = find_microcode_in_initrd(path, use_pa);
672
673 if (!(cp.data && cp.size))
674 return NULL;
675
676 collect_cpu_info_early(uci);
677
678 return scan_microcode(cp.data, cp.size, uci, false);
679}
680
681void __init load_ucode_intel_bsp(void)
682{
683 struct microcode_intel *patch;
684 struct ucode_cpu_info uci;
685
686 patch = __load_ucode_intel(&uci);
687 if (!patch)
688 return;
689
690 uci.mc = patch;
691
692 apply_microcode_early(&uci, true);
693}
694
695void load_ucode_intel_ap(void)
696{
697 struct microcode_intel *patch, **iup;
698 struct ucode_cpu_info uci;
699
700 if (IS_ENABLED(CONFIG_X86_32))
701 iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
702 else
703 iup = &intel_ucode_patch;
704
705reget:
706 if (!*iup) {
707 patch = __load_ucode_intel(&uci);
708 if (!patch)
709 return;
710
711 *iup = patch;
712 }
713
714 uci.mc = *iup;
715
716 if (apply_microcode_early(&uci, true)) {
717 /* Mixed-silicon system? Try to refetch the proper patch: */
718 *iup = NULL;
719
720 goto reget;
721 }
722}
723
724static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
725{
726 struct microcode_header_intel *phdr;
727 struct ucode_patch *iter, *tmp;
728
729 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) {
730
731 phdr = (struct microcode_header_intel *)iter->data;
732
733 if (phdr->rev <= uci->cpu_sig.rev)
734 continue;
735
736 if (!find_matching_signature(phdr,
737 uci->cpu_sig.sig,
738 uci->cpu_sig.pf))
739 continue;
740
741 return iter->data;
742 }
743 return NULL;
744}
745
746void reload_ucode_intel(void)
747{
748 struct microcode_intel *p;
749 struct ucode_cpu_info uci;
750
751 collect_cpu_info_early(&uci);
752
753 p = find_patch(&uci);
754 if (!p)
755 return;
756
757 uci.mc = p;
758
759 apply_microcode_early(&uci, false);
760}
761
762static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
763{
764 static struct cpu_signature prev;
765 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
766 unsigned int val[2];
767
768 memset(csig, 0, sizeof(*csig));
769
770 csig->sig = cpuid_eax(0x00000001);
771
772 if ((c->x86_model >= 5) || (c->x86 > 6)) {
773 /* get processor flags from MSR 0x17 */
774 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
775 csig->pf = 1 << ((val[1] >> 18) & 7);
776 }
777
778 csig->rev = c->microcode;
779
780 /* No extra locking on prev, races are harmless. */
781 if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
782 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
783 csig->sig, csig->pf, csig->rev);
784 prev = *csig;
785 }
786
787 return 0;
788}
789
790static enum ucode_state apply_microcode_intel(int cpu)
791{
792 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
793 struct cpuinfo_x86 *c = &cpu_data(cpu);
794 struct microcode_intel *mc;
795 enum ucode_state ret;
796 static int prev_rev;
797 u32 rev;
798
799 /* We should bind the task to the CPU */
800 if (WARN_ON(raw_smp_processor_id() != cpu))
801 return UCODE_ERROR;
802
803 /* Look for a newer patch in our cache: */
804 mc = find_patch(uci);
805 if (!mc) {
806 mc = uci->mc;
807 if (!mc)
808 return UCODE_NFOUND;
809 }
810
811 /*
812 * Save us the MSR write below - which is a particular expensive
813 * operation - when the other hyperthread has updated the microcode
814 * already.
815 */
816 rev = intel_get_microcode_revision();
817 if (rev >= mc->hdr.rev) {
818 ret = UCODE_OK;
819 goto out;
820 }
821
822 /*
823 * Writeback and invalidate caches before updating microcode to avoid
824 * internal issues depending on what the microcode is updating.
825 */
826 native_wbinvd();
827
828 /* write microcode via MSR 0x79 */
829 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
830
831 rev = intel_get_microcode_revision();
832
833 if (rev != mc->hdr.rev) {
834 pr_err("CPU%d update to revision 0x%x failed\n",
835 cpu, mc->hdr.rev);
836 return UCODE_ERROR;
837 }
838
839 if (rev != prev_rev) {
840 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
841 rev,
842 mc->hdr.date & 0xffff,
843 mc->hdr.date >> 24,
844 (mc->hdr.date >> 16) & 0xff);
845 prev_rev = rev;
846 }
847
848 ret = UCODE_UPDATED;
849
850out:
851 uci->cpu_sig.rev = rev;
852 c->microcode = rev;
853
854 /* Update boot_cpu_data's revision too, if we're on the BSP: */
855 if (c->cpu_index == boot_cpu_data.cpu_index)
856 boot_cpu_data.microcode = rev;
857
858 return ret;
859}
860
861static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
862{
863 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
864 unsigned int curr_mc_size = 0, new_mc_size = 0;
865 enum ucode_state ret = UCODE_OK;
866 int new_rev = uci->cpu_sig.rev;
867 u8 *new_mc = NULL, *mc = NULL;
868 unsigned int csig, cpf;
869
870 while (iov_iter_count(iter)) {
871 struct microcode_header_intel mc_header;
872 unsigned int mc_size, data_size;
873 u8 *data;
874
875 if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
876 pr_err("error! Truncated or inaccessible header in microcode data file\n");
877 break;
878 }
879
880 mc_size = get_totalsize(&mc_header);
881 if (mc_size < sizeof(mc_header)) {
882 pr_err("error! Bad data in microcode data file (totalsize too small)\n");
883 break;
884 }
885 data_size = mc_size - sizeof(mc_header);
886 if (data_size > iov_iter_count(iter)) {
887 pr_err("error! Bad data in microcode data file (truncated file?)\n");
888 break;
889 }
890
891 /* For performance reasons, reuse mc area when possible */
892 if (!mc || mc_size > curr_mc_size) {
893 vfree(mc);
894 mc = vmalloc(mc_size);
895 if (!mc)
896 break;
897 curr_mc_size = mc_size;
898 }
899
900 memcpy(mc, &mc_header, sizeof(mc_header));
901 data = mc + sizeof(mc_header);
902 if (!copy_from_iter_full(data, data_size, iter) ||
903 microcode_sanity_check(mc, 1) < 0) {
904 break;
905 }
906
907 csig = uci->cpu_sig.sig;
908 cpf = uci->cpu_sig.pf;
909 if (has_newer_microcode(mc, csig, cpf, new_rev)) {
910 vfree(new_mc);
911 new_rev = mc_header.rev;
912 new_mc = mc;
913 new_mc_size = mc_size;
914 mc = NULL; /* trigger new vmalloc */
915 ret = UCODE_NEW;
916 }
917 }
918
919 vfree(mc);
920
921 if (iov_iter_count(iter)) {
922 vfree(new_mc);
923 return UCODE_ERROR;
924 }
925
926 if (!new_mc)
927 return UCODE_NFOUND;
928
929 vfree(uci->mc);
930 uci->mc = (struct microcode_intel *)new_mc;
931
932 /*
933 * If early loading microcode is supported, save this mc into
934 * permanent memory. So it will be loaded early when a CPU is hot added
935 * or resumes.
936 */
937 save_mc_for_early(new_mc, new_mc_size);
938
939 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
940 cpu, new_rev, uci->cpu_sig.rev);
941
942 return ret;
943}
944
945static bool is_blacklisted(unsigned int cpu)
946{
947 struct cpuinfo_x86 *c = &cpu_data(cpu);
948
949 /*
950 * Late loading on model 79 with microcode revision less than 0x0b000021
951 * and LLC size per core bigger than 2.5MB may result in a system hang.
952 * This behavior is documented in item BDF90, #334165 (Intel Xeon
953 * Processor E7-8800/4800 v4 Product Family).
954 */
955 if (c->x86 == 6 &&
956 c->x86_model == INTEL_FAM6_BROADWELL_X &&
957 c->x86_stepping == 0x01 &&
958 llc_size_per_core > 2621440 &&
959 c->microcode < 0x0b000021) {
960 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
961 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
962 return true;
963 }
964
965 return false;
966}
967
968static enum ucode_state request_microcode_fw(int cpu, struct device *device,
969 bool refresh_fw)
970{
971 struct cpuinfo_x86 *c = &cpu_data(cpu);
972 const struct firmware *firmware;
973 struct iov_iter iter;
974 enum ucode_state ret;
975 struct kvec kvec;
976 char name[30];
977
978 if (is_blacklisted(cpu))
979 return UCODE_NFOUND;
980
981 sprintf(name, "intel-ucode/%02x-%02x-%02x",
982 c->x86, c->x86_model, c->x86_stepping);
983
984 if (request_firmware_direct(&firmware, name, device)) {
985 pr_debug("data file %s load failed\n", name);
986 return UCODE_NFOUND;
987 }
988
989 kvec.iov_base = (void *)firmware->data;
990 kvec.iov_len = firmware->size;
991 iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
992 ret = generic_load_microcode(cpu, &iter);
993
994 release_firmware(firmware);
995
996 return ret;
997}
998
999static enum ucode_state
1000request_microcode_user(int cpu, const void __user *buf, size_t size)
1001{
1002 struct iov_iter iter;
1003 struct iovec iov;
1004
1005 if (is_blacklisted(cpu))
1006 return UCODE_NFOUND;
1007
1008 iov.iov_base = (void __user *)buf;
1009 iov.iov_len = size;
1010 iov_iter_init(&iter, WRITE, &iov, 1, size);
1011
1012 return generic_load_microcode(cpu, &iter);
1013}
1014
1015static struct microcode_ops microcode_intel_ops = {
1016 .request_microcode_user = request_microcode_user,
1017 .request_microcode_fw = request_microcode_fw,
1018 .collect_cpu_info = collect_cpu_info,
1019 .apply_microcode = apply_microcode_intel,
1020};
1021
1022static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
1023{
1024 u64 llc_size = c->x86_cache_size * 1024ULL;
1025
1026 do_div(llc_size, c->x86_max_cores);
1027
1028 return (int)llc_size;
1029}
1030
1031struct microcode_ops * __init init_intel_microcode(void)
1032{
1033 struct cpuinfo_x86 *c = &boot_cpu_data;
1034
1035 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1036 cpu_has(c, X86_FEATURE_IA64)) {
1037 pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1038 return NULL;
1039 }
1040
1041 llc_size_per_core = calc_llc_size_per_core(c);
1042
1043 return µcode_intel_ops;
1044}
1/*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * Intel CPU microcode early update for Linux
8 *
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18/*
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
21 *
22 *#define DEBUG
23 */
24#define pr_fmt(fmt) "microcode: " fmt
25
26#include <linux/earlycpio.h>
27#include <linux/firmware.h>
28#include <linux/uaccess.h>
29#include <linux/vmalloc.h>
30#include <linux/initrd.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/mm.h>
35
36#include <asm/microcode_intel.h>
37#include <asm/processor.h>
38#include <asm/tlbflush.h>
39#include <asm/setup.h>
40#include <asm/msr.h>
41
42/*
43 * Temporary microcode blobs pointers storage. We note here the pointers to
44 * microcode blobs we've got from whatever storage (detached initrd, builtin).
45 * Later on, we put those into final storage mc_saved_data.mc_saved.
46 */
47static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
48
49static struct mc_saved_data {
50 unsigned int num_saved;
51 struct microcode_intel **mc_saved;
52} mc_saved_data;
53
54static enum ucode_state
55load_microcode_early(struct microcode_intel **saved,
56 unsigned int num_saved, struct ucode_cpu_info *uci)
57{
58 struct microcode_intel *ucode_ptr, *new_mc = NULL;
59 struct microcode_header_intel *mc_hdr;
60 int new_rev, ret, i;
61
62 new_rev = uci->cpu_sig.rev;
63
64 for (i = 0; i < num_saved; i++) {
65 ucode_ptr = saved[i];
66 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
67
68 ret = has_newer_microcode(ucode_ptr,
69 uci->cpu_sig.sig,
70 uci->cpu_sig.pf,
71 new_rev);
72 if (!ret)
73 continue;
74
75 new_rev = mc_hdr->rev;
76 new_mc = ucode_ptr;
77 }
78
79 if (!new_mc)
80 return UCODE_NFOUND;
81
82 uci->mc = (struct microcode_intel *)new_mc;
83 return UCODE_OK;
84}
85
86static inline void
87copy_ptrs(struct microcode_intel **mc_saved, unsigned long *mc_ptrs,
88 unsigned long off, int num_saved)
89{
90 int i;
91
92 for (i = 0; i < num_saved; i++)
93 mc_saved[i] = (struct microcode_intel *)(mc_ptrs[i] + off);
94}
95
96#ifdef CONFIG_X86_32
97static void
98microcode_phys(struct microcode_intel **mc_saved_tmp, struct mc_saved_data *mcs)
99{
100 int i;
101 struct microcode_intel ***mc_saved;
102
103 mc_saved = (struct microcode_intel ***)__pa_nodebug(&mcs->mc_saved);
104
105 for (i = 0; i < mcs->num_saved; i++) {
106 struct microcode_intel *p;
107
108 p = *(struct microcode_intel **)__pa_nodebug(mcs->mc_saved + i);
109 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
110 }
111}
112#endif
113
114static enum ucode_state
115load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
116 unsigned long offset, struct ucode_cpu_info *uci)
117{
118 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
119 unsigned int count = mcs->num_saved;
120
121 if (!mcs->mc_saved) {
122 copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
123
124 return load_microcode_early(mc_saved_tmp, count, uci);
125 } else {
126#ifdef CONFIG_X86_32
127 microcode_phys(mc_saved_tmp, mcs);
128 return load_microcode_early(mc_saved_tmp, count, uci);
129#else
130 return load_microcode_early(mcs->mc_saved, count, uci);
131#endif
132 }
133}
134
135/*
136 * Given CPU signature and a microcode patch, this function finds if the
137 * microcode patch has matching family and model with the CPU.
138 */
139static enum ucode_state
140matching_model_microcode(struct microcode_header_intel *mc_header,
141 unsigned long sig)
142{
143 unsigned int fam, model;
144 unsigned int fam_ucode, model_ucode;
145 struct extended_sigtable *ext_header;
146 unsigned long total_size = get_totalsize(mc_header);
147 unsigned long data_size = get_datasize(mc_header);
148 int ext_sigcount, i;
149 struct extended_signature *ext_sig;
150
151 fam = x86_family(sig);
152 model = x86_model(sig);
153
154 fam_ucode = x86_family(mc_header->sig);
155 model_ucode = x86_model(mc_header->sig);
156
157 if (fam == fam_ucode && model == model_ucode)
158 return UCODE_OK;
159
160 /* Look for ext. headers: */
161 if (total_size <= data_size + MC_HEADER_SIZE)
162 return UCODE_NFOUND;
163
164 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
165 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
166 ext_sigcount = ext_header->count;
167
168 for (i = 0; i < ext_sigcount; i++) {
169 fam_ucode = x86_family(ext_sig->sig);
170 model_ucode = x86_model(ext_sig->sig);
171
172 if (fam == fam_ucode && model == model_ucode)
173 return UCODE_OK;
174
175 ext_sig++;
176 }
177 return UCODE_NFOUND;
178}
179
180static int
181save_microcode(struct mc_saved_data *mcs,
182 struct microcode_intel **mc_saved_src,
183 unsigned int num_saved)
184{
185 int i, j;
186 struct microcode_intel **saved_ptr;
187 int ret;
188
189 if (!num_saved)
190 return -EINVAL;
191
192 /*
193 * Copy new microcode data.
194 */
195 saved_ptr = kcalloc(num_saved, sizeof(struct microcode_intel *), GFP_KERNEL);
196 if (!saved_ptr)
197 return -ENOMEM;
198
199 for (i = 0; i < num_saved; i++) {
200 struct microcode_header_intel *mc_hdr;
201 struct microcode_intel *mc;
202 unsigned long size;
203
204 if (!mc_saved_src[i]) {
205 ret = -EINVAL;
206 goto err;
207 }
208
209 mc = mc_saved_src[i];
210 mc_hdr = &mc->hdr;
211 size = get_totalsize(mc_hdr);
212
213 saved_ptr[i] = kmemdup(mc, size, GFP_KERNEL);
214 if (!saved_ptr[i]) {
215 ret = -ENOMEM;
216 goto err;
217 }
218 }
219
220 /*
221 * Point to newly saved microcode.
222 */
223 mcs->mc_saved = saved_ptr;
224 mcs->num_saved = num_saved;
225
226 return 0;
227
228err:
229 for (j = 0; j <= i; j++)
230 kfree(saved_ptr[j]);
231 kfree(saved_ptr);
232
233 return ret;
234}
235
236/*
237 * A microcode patch in ucode_ptr is saved into mc_saved
238 * - if it has matching signature and newer revision compared to an existing
239 * patch mc_saved.
240 * - or if it is a newly discovered microcode patch.
241 *
242 * The microcode patch should have matching model with CPU.
243 *
244 * Returns: The updated number @num_saved of saved microcode patches.
245 */
246static unsigned int _save_mc(struct microcode_intel **mc_saved,
247 u8 *ucode_ptr, unsigned int num_saved)
248{
249 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
250 unsigned int sig, pf;
251 int found = 0, i;
252
253 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
254
255 for (i = 0; i < num_saved; i++) {
256 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
257 sig = mc_saved_hdr->sig;
258 pf = mc_saved_hdr->pf;
259
260 if (!find_matching_signature(ucode_ptr, sig, pf))
261 continue;
262
263 found = 1;
264
265 if (mc_hdr->rev <= mc_saved_hdr->rev)
266 continue;
267
268 /*
269 * Found an older ucode saved earlier. Replace it with
270 * this newer one.
271 */
272 mc_saved[i] = (struct microcode_intel *)ucode_ptr;
273 break;
274 }
275
276 /* Newly detected microcode, save it to memory. */
277 if (i >= num_saved && !found)
278 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
279
280 return num_saved;
281}
282
283/*
284 * Get microcode matching with BSP's model. Only CPUs with the same model as
285 * BSP can stay in the platform.
286 */
287static enum ucode_state __init
288get_matching_model_microcode(unsigned long start, void *data, size_t size,
289 struct mc_saved_data *mcs, unsigned long *mc_ptrs,
290 struct ucode_cpu_info *uci)
291{
292 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
293 struct microcode_header_intel *mc_header;
294 unsigned int num_saved = mcs->num_saved;
295 enum ucode_state state = UCODE_OK;
296 unsigned int leftover = size;
297 u8 *ucode_ptr = data;
298 unsigned int mc_size;
299 int i;
300
301 while (leftover && num_saved < ARRAY_SIZE(mc_saved_tmp)) {
302
303 if (leftover < sizeof(mc_header))
304 break;
305
306 mc_header = (struct microcode_header_intel *)ucode_ptr;
307
308 mc_size = get_totalsize(mc_header);
309 if (!mc_size || mc_size > leftover ||
310 microcode_sanity_check(ucode_ptr, 0) < 0)
311 break;
312
313 leftover -= mc_size;
314
315 /*
316 * Since APs with same family and model as the BSP may boot in
317 * the platform, we need to find and save microcode patches
318 * with the same family and model as the BSP.
319 */
320 if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) {
321 ucode_ptr += mc_size;
322 continue;
323 }
324
325 num_saved = _save_mc(mc_saved_tmp, ucode_ptr, num_saved);
326
327 ucode_ptr += mc_size;
328 }
329
330 if (leftover) {
331 state = UCODE_ERROR;
332 return state;
333 }
334
335 if (!num_saved) {
336 state = UCODE_NFOUND;
337 return state;
338 }
339
340 for (i = 0; i < num_saved; i++)
341 mc_ptrs[i] = (unsigned long)mc_saved_tmp[i] - start;
342
343 mcs->num_saved = num_saved;
344
345 return state;
346}
347
348static int collect_cpu_info_early(struct ucode_cpu_info *uci)
349{
350 unsigned int val[2];
351 unsigned int family, model;
352 struct cpu_signature csig;
353 unsigned int eax, ebx, ecx, edx;
354
355 csig.sig = 0;
356 csig.pf = 0;
357 csig.rev = 0;
358
359 memset(uci, 0, sizeof(*uci));
360
361 eax = 0x00000001;
362 ecx = 0;
363 native_cpuid(&eax, &ebx, &ecx, &edx);
364 csig.sig = eax;
365
366 family = x86_family(csig.sig);
367 model = x86_model(csig.sig);
368
369 if ((model >= 5) || (family > 6)) {
370 /* get processor flags from MSR 0x17 */
371 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
372 csig.pf = 1 << ((val[1] >> 18) & 7);
373 }
374 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
375
376 /* As documented in the SDM: Do a CPUID 1 here */
377 sync_core();
378
379 /* get the current revision from MSR 0x8B */
380 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
381
382 csig.rev = val[1];
383
384 uci->cpu_sig = csig;
385 uci->valid = 1;
386
387 return 0;
388}
389
390static void show_saved_mc(void)
391{
392#ifdef DEBUG
393 int i, j;
394 unsigned int sig, pf, rev, total_size, data_size, date;
395 struct ucode_cpu_info uci;
396
397 if (!mc_saved_data.num_saved) {
398 pr_debug("no microcode data saved.\n");
399 return;
400 }
401 pr_debug("Total microcode saved: %d\n", mc_saved_data.num_saved);
402
403 collect_cpu_info_early(&uci);
404
405 sig = uci.cpu_sig.sig;
406 pf = uci.cpu_sig.pf;
407 rev = uci.cpu_sig.rev;
408 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
409
410 for (i = 0; i < mc_saved_data.num_saved; i++) {
411 struct microcode_header_intel *mc_saved_header;
412 struct extended_sigtable *ext_header;
413 int ext_sigcount;
414 struct extended_signature *ext_sig;
415
416 mc_saved_header = (struct microcode_header_intel *)
417 mc_saved_data.mc_saved[i];
418 sig = mc_saved_header->sig;
419 pf = mc_saved_header->pf;
420 rev = mc_saved_header->rev;
421 total_size = get_totalsize(mc_saved_header);
422 data_size = get_datasize(mc_saved_header);
423 date = mc_saved_header->date;
424
425 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
426 i, sig, pf, rev, total_size,
427 date & 0xffff,
428 date >> 24,
429 (date >> 16) & 0xff);
430
431 /* Look for ext. headers: */
432 if (total_size <= data_size + MC_HEADER_SIZE)
433 continue;
434
435 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
436 ext_sigcount = ext_header->count;
437 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
438
439 for (j = 0; j < ext_sigcount; j++) {
440 sig = ext_sig->sig;
441 pf = ext_sig->pf;
442
443 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
444 j, sig, pf);
445
446 ext_sig++;
447 }
448
449 }
450#endif
451}
452
453#ifdef CONFIG_HOTPLUG_CPU
454static DEFINE_MUTEX(x86_cpu_microcode_mutex);
455/*
456 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
457 * hot added or resumes.
458 *
459 * Please make sure this mc should be a valid microcode patch before calling
460 * this function.
461 */
462int save_mc_for_early(u8 *mc)
463{
464 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
465 unsigned int mc_saved_count_init;
466 unsigned int num_saved;
467 struct microcode_intel **mc_saved;
468 int ret = 0;
469 int i;
470
471 /*
472 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
473 * hotplug.
474 */
475 mutex_lock(&x86_cpu_microcode_mutex);
476
477 mc_saved_count_init = mc_saved_data.num_saved;
478 num_saved = mc_saved_data.num_saved;
479 mc_saved = mc_saved_data.mc_saved;
480
481 if (mc_saved && num_saved)
482 memcpy(mc_saved_tmp, mc_saved,
483 num_saved * sizeof(struct microcode_intel *));
484 /*
485 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
486 * version.
487 */
488 num_saved = _save_mc(mc_saved_tmp, mc, num_saved);
489
490 /*
491 * Save the mc_save_tmp in global mc_saved_data.
492 */
493 ret = save_microcode(&mc_saved_data, mc_saved_tmp, num_saved);
494 if (ret) {
495 pr_err("Cannot save microcode patch.\n");
496 goto out;
497 }
498
499 show_saved_mc();
500
501 /*
502 * Free old saved microcode data.
503 */
504 if (mc_saved) {
505 for (i = 0; i < mc_saved_count_init; i++)
506 kfree(mc_saved[i]);
507 kfree(mc_saved);
508 }
509
510out:
511 mutex_unlock(&x86_cpu_microcode_mutex);
512
513 return ret;
514}
515EXPORT_SYMBOL_GPL(save_mc_for_early);
516#endif
517
518static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
519{
520#ifdef CONFIG_X86_64
521 unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
522 char name[30];
523
524 native_cpuid(&eax, &ebx, &ecx, &edx);
525
526 sprintf(name, "intel-ucode/%02x-%02x-%02x",
527 x86_family(eax), x86_model(eax), x86_stepping(eax));
528
529 return get_builtin_firmware(cp, name);
530#else
531 return false;
532#endif
533}
534
535static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
536static __init enum ucode_state
537scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
538 unsigned long start, unsigned long size,
539 struct ucode_cpu_info *uci)
540{
541 struct cpio_data cd;
542 long offset = 0;
543#ifdef CONFIG_X86_32
544 char *p = (char *)__pa_nodebug(ucode_name);
545#else
546 char *p = ucode_name;
547#endif
548
549 cd.data = NULL;
550 cd.size = 0;
551
552 /* try built-in microcode if no initrd */
553 if (!size) {
554 if (!load_builtin_intel_microcode(&cd))
555 return UCODE_ERROR;
556 } else {
557 cd = find_cpio_data(p, (void *)start, size, &offset);
558 if (!cd.data)
559 return UCODE_ERROR;
560 }
561
562 return get_matching_model_microcode(start, cd.data, cd.size,
563 mcs, mc_ptrs, uci);
564}
565
566/*
567 * Print ucode update info.
568 */
569static void
570print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
571{
572 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
573 uci->cpu_sig.rev,
574 date & 0xffff,
575 date >> 24,
576 (date >> 16) & 0xff);
577}
578
579#ifdef CONFIG_X86_32
580
581static int delay_ucode_info;
582static int current_mc_date;
583
584/*
585 * Print early updated ucode info after printk works. This is delayed info dump.
586 */
587void show_ucode_info_early(void)
588{
589 struct ucode_cpu_info uci;
590
591 if (delay_ucode_info) {
592 collect_cpu_info_early(&uci);
593 print_ucode_info(&uci, current_mc_date);
594 delay_ucode_info = 0;
595 }
596}
597
598/*
599 * At this point, we can not call printk() yet. Keep microcode patch number in
600 * mc_saved_data.mc_saved and delay printing microcode info in
601 * show_ucode_info_early() until printk() works.
602 */
603static void print_ucode(struct ucode_cpu_info *uci)
604{
605 struct microcode_intel *mc;
606 int *delay_ucode_info_p;
607 int *current_mc_date_p;
608
609 mc = uci->mc;
610 if (!mc)
611 return;
612
613 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
614 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
615
616 *delay_ucode_info_p = 1;
617 *current_mc_date_p = mc->hdr.date;
618}
619#else
620
621/*
622 * Flush global tlb. We only do this in x86_64 where paging has been enabled
623 * already and PGE should be enabled as well.
624 */
625static inline void flush_tlb_early(void)
626{
627 __native_flush_tlb_global_irq_disabled();
628}
629
630static inline void print_ucode(struct ucode_cpu_info *uci)
631{
632 struct microcode_intel *mc;
633
634 mc = uci->mc;
635 if (!mc)
636 return;
637
638 print_ucode_info(uci, mc->hdr.date);
639}
640#endif
641
642static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
643{
644 struct microcode_intel *mc;
645 unsigned int val[2];
646
647 mc = uci->mc;
648 if (!mc)
649 return 0;
650
651 /* write microcode via MSR 0x79 */
652 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
653 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
654
655 /* As documented in the SDM: Do a CPUID 1 here */
656 sync_core();
657
658 /* get the current revision from MSR 0x8B */
659 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
660 if (val[1] != mc->hdr.rev)
661 return -1;
662
663#ifdef CONFIG_X86_64
664 /* Flush global tlb. This is precaution. */
665 flush_tlb_early();
666#endif
667 uci->cpu_sig.rev = val[1];
668
669 if (early)
670 print_ucode(uci);
671 else
672 print_ucode_info(uci, mc->hdr.date);
673
674 return 0;
675}
676
677/*
678 * This function converts microcode patch offsets previously stored in
679 * mc_tmp_ptrs to pointers and stores the pointers in mc_saved_data.
680 */
681int __init save_microcode_in_initrd_intel(void)
682{
683 unsigned int count = mc_saved_data.num_saved;
684 struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
685 int ret = 0;
686
687 if (!count)
688 return ret;
689
690 copy_ptrs(mc_saved, mc_tmp_ptrs, get_initrd_start(), count);
691
692 ret = save_microcode(&mc_saved_data, mc_saved, count);
693 if (ret)
694 pr_err("Cannot save microcode patches from initrd.\n");
695
696 show_saved_mc();
697
698 return ret;
699}
700
701static void __init
702_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
703 unsigned long start, unsigned long size)
704{
705 struct ucode_cpu_info uci;
706 enum ucode_state ret;
707
708 collect_cpu_info_early(&uci);
709
710 ret = scan_microcode(mcs, mc_ptrs, start, size, &uci);
711 if (ret != UCODE_OK)
712 return;
713
714 ret = load_microcode(mcs, mc_ptrs, start, &uci);
715 if (ret != UCODE_OK)
716 return;
717
718 apply_microcode_early(&uci, true);
719}
720
721void __init load_ucode_intel_bsp(void)
722{
723 u64 start, size;
724#ifdef CONFIG_X86_32
725 struct boot_params *p;
726
727 p = (struct boot_params *)__pa_nodebug(&boot_params);
728 size = p->hdr.ramdisk_size;
729
730 /*
731 * Set start only if we have an initrd image. We cannot use initrd_start
732 * because it is not set that early yet.
733 */
734 start = (size ? p->hdr.ramdisk_image : 0);
735
736 _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
737 (unsigned long *)__pa_nodebug(&mc_tmp_ptrs),
738 start, size);
739#else
740 size = boot_params.hdr.ramdisk_size;
741 start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
742
743 _load_ucode_intel_bsp(&mc_saved_data, mc_tmp_ptrs, start, size);
744#endif
745}
746
747void load_ucode_intel_ap(void)
748{
749 unsigned long *mcs_tmp_p;
750 struct mc_saved_data *mcs_p;
751 struct ucode_cpu_info uci;
752 enum ucode_state ret;
753#ifdef CONFIG_X86_32
754
755 mcs_tmp_p = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
756 mcs_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
757#else
758 mcs_tmp_p = mc_tmp_ptrs;
759 mcs_p = &mc_saved_data;
760#endif
761
762 /*
763 * If there is no valid ucode previously saved in memory, no need to
764 * update ucode on this AP.
765 */
766 if (!mcs_p->num_saved)
767 return;
768
769 collect_cpu_info_early(&uci);
770 ret = load_microcode(mcs_p, mcs_tmp_p, get_initrd_start_addr(), &uci);
771 if (ret != UCODE_OK)
772 return;
773
774 apply_microcode_early(&uci, true);
775}
776
777void reload_ucode_intel(void)
778{
779 struct ucode_cpu_info uci;
780 enum ucode_state ret;
781
782 if (!mc_saved_data.num_saved)
783 return;
784
785 collect_cpu_info_early(&uci);
786
787 ret = load_microcode_early(mc_saved_data.mc_saved,
788 mc_saved_data.num_saved, &uci);
789 if (ret != UCODE_OK)
790 return;
791
792 apply_microcode_early(&uci, false);
793}
794
795static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
796{
797 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
798 unsigned int val[2];
799
800 memset(csig, 0, sizeof(*csig));
801
802 csig->sig = cpuid_eax(0x00000001);
803
804 if ((c->x86_model >= 5) || (c->x86 > 6)) {
805 /* get processor flags from MSR 0x17 */
806 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
807 csig->pf = 1 << ((val[1] >> 18) & 7);
808 }
809
810 csig->rev = c->microcode;
811 pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
812 cpu_num, csig->sig, csig->pf, csig->rev);
813
814 return 0;
815}
816
817/*
818 * return 0 - no update found
819 * return 1 - found update
820 */
821static int get_matching_mc(struct microcode_intel *mc, int cpu)
822{
823 struct cpu_signature cpu_sig;
824 unsigned int csig, cpf, crev;
825
826 collect_cpu_info(cpu, &cpu_sig);
827
828 csig = cpu_sig.sig;
829 cpf = cpu_sig.pf;
830 crev = cpu_sig.rev;
831
832 return has_newer_microcode(mc, csig, cpf, crev);
833}
834
835static int apply_microcode_intel(int cpu)
836{
837 struct microcode_intel *mc;
838 struct ucode_cpu_info *uci;
839 struct cpuinfo_x86 *c;
840 unsigned int val[2];
841
842 /* We should bind the task to the CPU */
843 if (WARN_ON(raw_smp_processor_id() != cpu))
844 return -1;
845
846 uci = ucode_cpu_info + cpu;
847 mc = uci->mc;
848 if (!mc)
849 return 0;
850
851 /*
852 * Microcode on this CPU could be updated earlier. Only apply the
853 * microcode patch in mc when it is newer than the one on this
854 * CPU.
855 */
856 if (!get_matching_mc(mc, cpu))
857 return 0;
858
859 /* write microcode via MSR 0x79 */
860 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
861 wrmsrl(MSR_IA32_UCODE_REV, 0);
862
863 /* As documented in the SDM: Do a CPUID 1 here */
864 sync_core();
865
866 /* get the current revision from MSR 0x8B */
867 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
868
869 if (val[1] != mc->hdr.rev) {
870 pr_err("CPU%d update to revision 0x%x failed\n",
871 cpu, mc->hdr.rev);
872 return -1;
873 }
874
875 pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
876 cpu, val[1],
877 mc->hdr.date & 0xffff,
878 mc->hdr.date >> 24,
879 (mc->hdr.date >> 16) & 0xff);
880
881 c = &cpu_data(cpu);
882
883 uci->cpu_sig.rev = val[1];
884 c->microcode = val[1];
885
886 return 0;
887}
888
889static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
890 int (*get_ucode_data)(void *, const void *, size_t))
891{
892 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
893 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
894 int new_rev = uci->cpu_sig.rev;
895 unsigned int leftover = size;
896 enum ucode_state state = UCODE_OK;
897 unsigned int curr_mc_size = 0;
898 unsigned int csig, cpf;
899
900 while (leftover) {
901 struct microcode_header_intel mc_header;
902 unsigned int mc_size;
903
904 if (leftover < sizeof(mc_header)) {
905 pr_err("error! Truncated header in microcode data file\n");
906 break;
907 }
908
909 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
910 break;
911
912 mc_size = get_totalsize(&mc_header);
913 if (!mc_size || mc_size > leftover) {
914 pr_err("error! Bad data in microcode data file\n");
915 break;
916 }
917
918 /* For performance reasons, reuse mc area when possible */
919 if (!mc || mc_size > curr_mc_size) {
920 vfree(mc);
921 mc = vmalloc(mc_size);
922 if (!mc)
923 break;
924 curr_mc_size = mc_size;
925 }
926
927 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
928 microcode_sanity_check(mc, 1) < 0) {
929 break;
930 }
931
932 csig = uci->cpu_sig.sig;
933 cpf = uci->cpu_sig.pf;
934 if (has_newer_microcode(mc, csig, cpf, new_rev)) {
935 vfree(new_mc);
936 new_rev = mc_header.rev;
937 new_mc = mc;
938 mc = NULL; /* trigger new vmalloc */
939 }
940
941 ucode_ptr += mc_size;
942 leftover -= mc_size;
943 }
944
945 vfree(mc);
946
947 if (leftover) {
948 vfree(new_mc);
949 state = UCODE_ERROR;
950 goto out;
951 }
952
953 if (!new_mc) {
954 state = UCODE_NFOUND;
955 goto out;
956 }
957
958 vfree(uci->mc);
959 uci->mc = (struct microcode_intel *)new_mc;
960
961 /*
962 * If early loading microcode is supported, save this mc into
963 * permanent memory. So it will be loaded early when a CPU is hot added
964 * or resumes.
965 */
966 save_mc_for_early(new_mc);
967
968 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
969 cpu, new_rev, uci->cpu_sig.rev);
970out:
971 return state;
972}
973
974static int get_ucode_fw(void *to, const void *from, size_t n)
975{
976 memcpy(to, from, n);
977 return 0;
978}
979
980static enum ucode_state request_microcode_fw(int cpu, struct device *device,
981 bool refresh_fw)
982{
983 char name[30];
984 struct cpuinfo_x86 *c = &cpu_data(cpu);
985 const struct firmware *firmware;
986 enum ucode_state ret;
987
988 sprintf(name, "intel-ucode/%02x-%02x-%02x",
989 c->x86, c->x86_model, c->x86_mask);
990
991 if (request_firmware_direct(&firmware, name, device)) {
992 pr_debug("data file %s load failed\n", name);
993 return UCODE_NFOUND;
994 }
995
996 ret = generic_load_microcode(cpu, (void *)firmware->data,
997 firmware->size, &get_ucode_fw);
998
999 release_firmware(firmware);
1000
1001 return ret;
1002}
1003
1004static int get_ucode_user(void *to, const void *from, size_t n)
1005{
1006 return copy_from_user(to, from, n);
1007}
1008
1009static enum ucode_state
1010request_microcode_user(int cpu, const void __user *buf, size_t size)
1011{
1012 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
1013}
1014
1015static void microcode_fini_cpu(int cpu)
1016{
1017 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1018
1019 vfree(uci->mc);
1020 uci->mc = NULL;
1021}
1022
1023static struct microcode_ops microcode_intel_ops = {
1024 .request_microcode_user = request_microcode_user,
1025 .request_microcode_fw = request_microcode_fw,
1026 .collect_cpu_info = collect_cpu_info,
1027 .apply_microcode = apply_microcode_intel,
1028 .microcode_fini_cpu = microcode_fini_cpu,
1029};
1030
1031struct microcode_ops * __init init_intel_microcode(void)
1032{
1033 struct cpuinfo_x86 *c = &boot_cpu_data;
1034
1035 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1036 cpu_has(c, X86_FEATURE_IA64)) {
1037 pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1038 return NULL;
1039 }
1040
1041 return µcode_intel_ops;
1042}
1043