Loading...
1/*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * Intel CPU microcode early update for Linux
8 *
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18/*
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
21 *
22 *#define DEBUG
23 */
24#define pr_fmt(fmt) "microcode: " fmt
25
26#include <linux/earlycpio.h>
27#include <linux/firmware.h>
28#include <linux/uaccess.h>
29#include <linux/vmalloc.h>
30#include <linux/initrd.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/mm.h>
35
36#include <asm/microcode_intel.h>
37#include <asm/processor.h>
38#include <asm/tlbflush.h>
39#include <asm/setup.h>
40#include <asm/msr.h>
41
42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
43
44/* Current microcode patch used in early patching on the APs. */
45struct microcode_intel *intel_ucode_patch;
46
47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
48 unsigned int s2, unsigned int p2)
49{
50 if (s1 != s2)
51 return false;
52
53 /* Processor flags are either both 0 ... */
54 if (!p1 && !p2)
55 return true;
56
57 /* ... or they intersect. */
58 return p1 & p2;
59}
60
61/*
62 * Returns 1 if update has been found, 0 otherwise.
63 */
64static int find_matching_signature(void *mc, unsigned int csig, int cpf)
65{
66 struct microcode_header_intel *mc_hdr = mc;
67 struct extended_sigtable *ext_hdr;
68 struct extended_signature *ext_sig;
69 int i;
70
71 if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
72 return 1;
73
74 /* Look for ext. headers: */
75 if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
76 return 0;
77
78 ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
79 ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
80
81 for (i = 0; i < ext_hdr->count; i++) {
82 if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
83 return 1;
84 ext_sig++;
85 }
86 return 0;
87}
88
89/*
90 * Returns 1 if update has been found, 0 otherwise.
91 */
92static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
93{
94 struct microcode_header_intel *mc_hdr = mc;
95
96 if (mc_hdr->rev <= new_rev)
97 return 0;
98
99 return find_matching_signature(mc, csig, cpf);
100}
101
102/*
103 * Given CPU signature and a microcode patch, this function finds if the
104 * microcode patch has matching family and model with the CPU.
105 *
106 * %true - if there's a match
107 * %false - otherwise
108 */
109static bool microcode_matches(struct microcode_header_intel *mc_header,
110 unsigned long sig)
111{
112 unsigned long total_size = get_totalsize(mc_header);
113 unsigned long data_size = get_datasize(mc_header);
114 struct extended_sigtable *ext_header;
115 unsigned int fam_ucode, model_ucode;
116 struct extended_signature *ext_sig;
117 unsigned int fam, model;
118 int ext_sigcount, i;
119
120 fam = x86_family(sig);
121 model = x86_model(sig);
122
123 fam_ucode = x86_family(mc_header->sig);
124 model_ucode = x86_model(mc_header->sig);
125
126 if (fam == fam_ucode && model == model_ucode)
127 return true;
128
129 /* Look for ext. headers: */
130 if (total_size <= data_size + MC_HEADER_SIZE)
131 return false;
132
133 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
134 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
135 ext_sigcount = ext_header->count;
136
137 for (i = 0; i < ext_sigcount; i++) {
138 fam_ucode = x86_family(ext_sig->sig);
139 model_ucode = x86_model(ext_sig->sig);
140
141 if (fam == fam_ucode && model == model_ucode)
142 return true;
143
144 ext_sig++;
145 }
146 return false;
147}
148
149static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
150{
151 struct ucode_patch *p;
152
153 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
154 if (!p)
155 return ERR_PTR(-ENOMEM);
156
157 p->data = kmemdup(data, size, GFP_KERNEL);
158 if (!p->data) {
159 kfree(p);
160 return ERR_PTR(-ENOMEM);
161 }
162
163 return p;
164}
165
166static void save_microcode_patch(void *data, unsigned int size)
167{
168 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
169 struct ucode_patch *iter, *tmp, *p;
170 bool prev_found = false;
171 unsigned int sig, pf;
172
173 mc_hdr = (struct microcode_header_intel *)data;
174
175 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) {
176 mc_saved_hdr = (struct microcode_header_intel *)iter->data;
177 sig = mc_saved_hdr->sig;
178 pf = mc_saved_hdr->pf;
179
180 if (find_matching_signature(data, sig, pf)) {
181 prev_found = true;
182
183 if (mc_hdr->rev <= mc_saved_hdr->rev)
184 continue;
185
186 p = __alloc_microcode_buf(data, size);
187 if (IS_ERR(p))
188 pr_err("Error allocating buffer %p\n", data);
189 else
190 list_replace(&iter->plist, &p->plist);
191 }
192 }
193
194 /*
195 * There weren't any previous patches found in the list cache; save the
196 * newly found.
197 */
198 if (!prev_found) {
199 p = __alloc_microcode_buf(data, size);
200 if (IS_ERR(p))
201 pr_err("Error allocating buffer for %p\n", data);
202 else
203 list_add_tail(&p->plist, µcode_cache);
204 }
205}
206
207static int microcode_sanity_check(void *mc, int print_err)
208{
209 unsigned long total_size, data_size, ext_table_size;
210 struct microcode_header_intel *mc_header = mc;
211 struct extended_sigtable *ext_header = NULL;
212 u32 sum, orig_sum, ext_sigcount = 0, i;
213 struct extended_signature *ext_sig;
214
215 total_size = get_totalsize(mc_header);
216 data_size = get_datasize(mc_header);
217
218 if (data_size + MC_HEADER_SIZE > total_size) {
219 if (print_err)
220 pr_err("Error: bad microcode data file size.\n");
221 return -EINVAL;
222 }
223
224 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
225 if (print_err)
226 pr_err("Error: invalid/unknown microcode update format.\n");
227 return -EINVAL;
228 }
229
230 ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
231 if (ext_table_size) {
232 u32 ext_table_sum = 0;
233 u32 *ext_tablep;
234
235 if ((ext_table_size < EXT_HEADER_SIZE)
236 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
237 if (print_err)
238 pr_err("Error: truncated extended signature table.\n");
239 return -EINVAL;
240 }
241
242 ext_header = mc + MC_HEADER_SIZE + data_size;
243 if (ext_table_size != exttable_size(ext_header)) {
244 if (print_err)
245 pr_err("Error: extended signature table size mismatch.\n");
246 return -EFAULT;
247 }
248
249 ext_sigcount = ext_header->count;
250
251 /*
252 * Check extended table checksum: the sum of all dwords that
253 * comprise a valid table must be 0.
254 */
255 ext_tablep = (u32 *)ext_header;
256
257 i = ext_table_size / sizeof(u32);
258 while (i--)
259 ext_table_sum += ext_tablep[i];
260
261 if (ext_table_sum) {
262 if (print_err)
263 pr_warn("Bad extended signature table checksum, aborting.\n");
264 return -EINVAL;
265 }
266 }
267
268 /*
269 * Calculate the checksum of update data and header. The checksum of
270 * valid update data and header including the extended signature table
271 * must be 0.
272 */
273 orig_sum = 0;
274 i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
275 while (i--)
276 orig_sum += ((u32 *)mc)[i];
277
278 if (orig_sum) {
279 if (print_err)
280 pr_err("Bad microcode data checksum, aborting.\n");
281 return -EINVAL;
282 }
283
284 if (!ext_table_size)
285 return 0;
286
287 /*
288 * Check extended signature checksum: 0 => valid.
289 */
290 for (i = 0; i < ext_sigcount; i++) {
291 ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
292 EXT_SIGNATURE_SIZE * i;
293
294 sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
295 (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
296 if (sum) {
297 if (print_err)
298 pr_err("Bad extended signature checksum, aborting.\n");
299 return -EINVAL;
300 }
301 }
302 return 0;
303}
304
305/*
306 * Get microcode matching with BSP's model. Only CPUs with the same model as
307 * BSP can stay in the platform.
308 */
309static struct microcode_intel *
310scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
311{
312 struct microcode_header_intel *mc_header;
313 struct microcode_intel *patch = NULL;
314 unsigned int mc_size;
315
316 while (size) {
317 if (size < sizeof(struct microcode_header_intel))
318 break;
319
320 mc_header = (struct microcode_header_intel *)data;
321
322 mc_size = get_totalsize(mc_header);
323 if (!mc_size ||
324 mc_size > size ||
325 microcode_sanity_check(data, 0) < 0)
326 break;
327
328 size -= mc_size;
329
330 if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
331 data += mc_size;
332 continue;
333 }
334
335 if (save) {
336 save_microcode_patch(data, mc_size);
337 goto next;
338 }
339
340
341 if (!patch) {
342 if (!has_newer_microcode(data,
343 uci->cpu_sig.sig,
344 uci->cpu_sig.pf,
345 uci->cpu_sig.rev))
346 goto next;
347
348 } else {
349 struct microcode_header_intel *phdr = &patch->hdr;
350
351 if (!has_newer_microcode(data,
352 phdr->sig,
353 phdr->pf,
354 phdr->rev))
355 goto next;
356 }
357
358 /* We have a newer patch, save it. */
359 patch = data;
360
361next:
362 data += mc_size;
363 }
364
365 if (size)
366 return NULL;
367
368 return patch;
369}
370
371static int collect_cpu_info_early(struct ucode_cpu_info *uci)
372{
373 unsigned int val[2];
374 unsigned int family, model;
375 struct cpu_signature csig = { 0 };
376 unsigned int eax, ebx, ecx, edx;
377
378 memset(uci, 0, sizeof(*uci));
379
380 eax = 0x00000001;
381 ecx = 0;
382 native_cpuid(&eax, &ebx, &ecx, &edx);
383 csig.sig = eax;
384
385 family = x86_family(eax);
386 model = x86_model(eax);
387
388 if ((model >= 5) || (family > 6)) {
389 /* get processor flags from MSR 0x17 */
390 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
391 csig.pf = 1 << ((val[1] >> 18) & 7);
392 }
393
394 csig.rev = intel_get_microcode_revision();
395
396 uci->cpu_sig = csig;
397 uci->valid = 1;
398
399 return 0;
400}
401
402static void show_saved_mc(void)
403{
404#ifdef DEBUG
405 int i = 0, j;
406 unsigned int sig, pf, rev, total_size, data_size, date;
407 struct ucode_cpu_info uci;
408 struct ucode_patch *p;
409
410 if (list_empty(µcode_cache)) {
411 pr_debug("no microcode data saved.\n");
412 return;
413 }
414
415 collect_cpu_info_early(&uci);
416
417 sig = uci.cpu_sig.sig;
418 pf = uci.cpu_sig.pf;
419 rev = uci.cpu_sig.rev;
420 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
421
422 list_for_each_entry(p, µcode_cache, plist) {
423 struct microcode_header_intel *mc_saved_header;
424 struct extended_sigtable *ext_header;
425 struct extended_signature *ext_sig;
426 int ext_sigcount;
427
428 mc_saved_header = (struct microcode_header_intel *)p->data;
429
430 sig = mc_saved_header->sig;
431 pf = mc_saved_header->pf;
432 rev = mc_saved_header->rev;
433 date = mc_saved_header->date;
434
435 total_size = get_totalsize(mc_saved_header);
436 data_size = get_datasize(mc_saved_header);
437
438 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
439 i++, sig, pf, rev, total_size,
440 date & 0xffff,
441 date >> 24,
442 (date >> 16) & 0xff);
443
444 /* Look for ext. headers: */
445 if (total_size <= data_size + MC_HEADER_SIZE)
446 continue;
447
448 ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
449 ext_sigcount = ext_header->count;
450 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
451
452 for (j = 0; j < ext_sigcount; j++) {
453 sig = ext_sig->sig;
454 pf = ext_sig->pf;
455
456 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
457 j, sig, pf);
458
459 ext_sig++;
460 }
461 }
462#endif
463}
464
465/*
466 * Save this microcode patch. It will be loaded early when a CPU is
467 * hot-added or resumes.
468 */
469static void save_mc_for_early(u8 *mc, unsigned int size)
470{
471#ifdef CONFIG_HOTPLUG_CPU
472 /* Synchronization during CPU hotplug. */
473 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
474
475 mutex_lock(&x86_cpu_microcode_mutex);
476
477 save_microcode_patch(mc, size);
478 show_saved_mc();
479
480 mutex_unlock(&x86_cpu_microcode_mutex);
481#endif
482}
483
484static bool load_builtin_intel_microcode(struct cpio_data *cp)
485{
486 unsigned int eax = 1, ebx, ecx = 0, edx;
487 char name[30];
488
489 if (IS_ENABLED(CONFIG_X86_32))
490 return false;
491
492 native_cpuid(&eax, &ebx, &ecx, &edx);
493
494 sprintf(name, "intel-ucode/%02x-%02x-%02x",
495 x86_family(eax), x86_model(eax), x86_stepping(eax));
496
497 return get_builtin_firmware(cp, name);
498}
499
500/*
501 * Print ucode update info.
502 */
503static void
504print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
505{
506 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
507 uci->cpu_sig.rev,
508 date & 0xffff,
509 date >> 24,
510 (date >> 16) & 0xff);
511}
512
513#ifdef CONFIG_X86_32
514
515static int delay_ucode_info;
516static int current_mc_date;
517
518/*
519 * Print early updated ucode info after printk works. This is delayed info dump.
520 */
521void show_ucode_info_early(void)
522{
523 struct ucode_cpu_info uci;
524
525 if (delay_ucode_info) {
526 collect_cpu_info_early(&uci);
527 print_ucode_info(&uci, current_mc_date);
528 delay_ucode_info = 0;
529 }
530}
531
532/*
533 * At this point, we can not call printk() yet. Delay printing microcode info in
534 * show_ucode_info_early() until printk() works.
535 */
536static void print_ucode(struct ucode_cpu_info *uci)
537{
538 struct microcode_intel *mc;
539 int *delay_ucode_info_p;
540 int *current_mc_date_p;
541
542 mc = uci->mc;
543 if (!mc)
544 return;
545
546 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
547 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
548
549 *delay_ucode_info_p = 1;
550 *current_mc_date_p = mc->hdr.date;
551}
552#else
553
554/*
555 * Flush global tlb. We only do this in x86_64 where paging has been enabled
556 * already and PGE should be enabled as well.
557 */
558static inline void flush_tlb_early(void)
559{
560 __native_flush_tlb_global_irq_disabled();
561}
562
563static inline void print_ucode(struct ucode_cpu_info *uci)
564{
565 struct microcode_intel *mc;
566
567 mc = uci->mc;
568 if (!mc)
569 return;
570
571 print_ucode_info(uci, mc->hdr.date);
572}
573#endif
574
575static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
576{
577 struct microcode_intel *mc;
578 u32 rev;
579
580 mc = uci->mc;
581 if (!mc)
582 return 0;
583
584 /* write microcode via MSR 0x79 */
585 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
586
587 rev = intel_get_microcode_revision();
588 if (rev != mc->hdr.rev)
589 return -1;
590
591#ifdef CONFIG_X86_64
592 /* Flush global tlb. This is precaution. */
593 flush_tlb_early();
594#endif
595 uci->cpu_sig.rev = rev;
596
597 if (early)
598 print_ucode(uci);
599 else
600 print_ucode_info(uci, mc->hdr.date);
601
602 return 0;
603}
604
605int __init save_microcode_in_initrd_intel(void)
606{
607 struct ucode_cpu_info uci;
608 struct cpio_data cp;
609
610 if (!load_builtin_intel_microcode(&cp))
611 cp = find_microcode_in_initrd(ucode_path, false);
612
613 if (!(cp.data && cp.size))
614 return 0;
615
616 collect_cpu_info_early(&uci);
617
618 scan_microcode(cp.data, cp.size, &uci, true);
619
620 show_saved_mc();
621
622 return 0;
623}
624
625/*
626 * @res_patch, output: a pointer to the patch we found.
627 */
628static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
629{
630 static const char *path;
631 struct cpio_data cp;
632 bool use_pa;
633
634 if (IS_ENABLED(CONFIG_X86_32)) {
635 path = (const char *)__pa_nodebug(ucode_path);
636 use_pa = true;
637 } else {
638 path = ucode_path;
639 use_pa = false;
640 }
641
642 /* try built-in microcode first */
643 if (!load_builtin_intel_microcode(&cp))
644 cp = find_microcode_in_initrd(path, use_pa);
645
646 if (!(cp.data && cp.size))
647 return NULL;
648
649 collect_cpu_info_early(uci);
650
651 return scan_microcode(cp.data, cp.size, uci, false);
652}
653
654void __init load_ucode_intel_bsp(void)
655{
656 struct microcode_intel *patch;
657 struct ucode_cpu_info uci;
658
659 patch = __load_ucode_intel(&uci);
660 if (!patch)
661 return;
662
663 uci.mc = patch;
664
665 apply_microcode_early(&uci, true);
666}
667
668void load_ucode_intel_ap(void)
669{
670 struct microcode_intel *patch, **iup;
671 struct ucode_cpu_info uci;
672
673 if (IS_ENABLED(CONFIG_X86_32))
674 iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
675 else
676 iup = &intel_ucode_patch;
677
678reget:
679 if (!*iup) {
680 patch = __load_ucode_intel(&uci);
681 if (!patch)
682 return;
683
684 *iup = patch;
685 }
686
687 uci.mc = *iup;
688
689 if (apply_microcode_early(&uci, true)) {
690 /* Mixed-silicon system? Try to refetch the proper patch: */
691 *iup = NULL;
692
693 goto reget;
694 }
695}
696
697static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
698{
699 struct microcode_header_intel *phdr;
700 struct ucode_patch *iter, *tmp;
701
702 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) {
703
704 phdr = (struct microcode_header_intel *)iter->data;
705
706 if (phdr->rev <= uci->cpu_sig.rev)
707 continue;
708
709 if (!find_matching_signature(phdr,
710 uci->cpu_sig.sig,
711 uci->cpu_sig.pf))
712 continue;
713
714 return iter->data;
715 }
716 return NULL;
717}
718
719void reload_ucode_intel(void)
720{
721 struct microcode_intel *p;
722 struct ucode_cpu_info uci;
723
724 collect_cpu_info_early(&uci);
725
726 p = find_patch(&uci);
727 if (!p)
728 return;
729
730 uci.mc = p;
731
732 apply_microcode_early(&uci, false);
733}
734
735static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
736{
737 static struct cpu_signature prev;
738 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
739 unsigned int val[2];
740
741 memset(csig, 0, sizeof(*csig));
742
743 csig->sig = cpuid_eax(0x00000001);
744
745 if ((c->x86_model >= 5) || (c->x86 > 6)) {
746 /* get processor flags from MSR 0x17 */
747 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
748 csig->pf = 1 << ((val[1] >> 18) & 7);
749 }
750
751 csig->rev = c->microcode;
752
753 /* No extra locking on prev, races are harmless. */
754 if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
755 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
756 csig->sig, csig->pf, csig->rev);
757 prev = *csig;
758 }
759
760 return 0;
761}
762
763static int apply_microcode_intel(int cpu)
764{
765 struct microcode_intel *mc;
766 struct ucode_cpu_info *uci;
767 struct cpuinfo_x86 *c;
768 static int prev_rev;
769 u32 rev;
770
771 /* We should bind the task to the CPU */
772 if (WARN_ON(raw_smp_processor_id() != cpu))
773 return -1;
774
775 uci = ucode_cpu_info + cpu;
776 mc = uci->mc;
777 if (!mc) {
778 /* Look for a newer patch in our cache: */
779 mc = find_patch(uci);
780 if (!mc)
781 return 0;
782 }
783
784 /* write microcode via MSR 0x79 */
785 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
786
787 rev = intel_get_microcode_revision();
788
789 if (rev != mc->hdr.rev) {
790 pr_err("CPU%d update to revision 0x%x failed\n",
791 cpu, mc->hdr.rev);
792 return -1;
793 }
794
795 if (rev != prev_rev) {
796 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
797 rev,
798 mc->hdr.date & 0xffff,
799 mc->hdr.date >> 24,
800 (mc->hdr.date >> 16) & 0xff);
801 prev_rev = rev;
802 }
803
804 c = &cpu_data(cpu);
805
806 uci->cpu_sig.rev = rev;
807 c->microcode = rev;
808
809 return 0;
810}
811
812static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
813 int (*get_ucode_data)(void *, const void *, size_t))
814{
815 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
816 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
817 int new_rev = uci->cpu_sig.rev;
818 unsigned int leftover = size;
819 unsigned int curr_mc_size = 0, new_mc_size = 0;
820 unsigned int csig, cpf;
821
822 while (leftover) {
823 struct microcode_header_intel mc_header;
824 unsigned int mc_size;
825
826 if (leftover < sizeof(mc_header)) {
827 pr_err("error! Truncated header in microcode data file\n");
828 break;
829 }
830
831 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
832 break;
833
834 mc_size = get_totalsize(&mc_header);
835 if (!mc_size || mc_size > leftover) {
836 pr_err("error! Bad data in microcode data file\n");
837 break;
838 }
839
840 /* For performance reasons, reuse mc area when possible */
841 if (!mc || mc_size > curr_mc_size) {
842 vfree(mc);
843 mc = vmalloc(mc_size);
844 if (!mc)
845 break;
846 curr_mc_size = mc_size;
847 }
848
849 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
850 microcode_sanity_check(mc, 1) < 0) {
851 break;
852 }
853
854 csig = uci->cpu_sig.sig;
855 cpf = uci->cpu_sig.pf;
856 if (has_newer_microcode(mc, csig, cpf, new_rev)) {
857 vfree(new_mc);
858 new_rev = mc_header.rev;
859 new_mc = mc;
860 new_mc_size = mc_size;
861 mc = NULL; /* trigger new vmalloc */
862 }
863
864 ucode_ptr += mc_size;
865 leftover -= mc_size;
866 }
867
868 vfree(mc);
869
870 if (leftover) {
871 vfree(new_mc);
872 return UCODE_ERROR;
873 }
874
875 if (!new_mc)
876 return UCODE_NFOUND;
877
878 vfree(uci->mc);
879 uci->mc = (struct microcode_intel *)new_mc;
880
881 /*
882 * If early loading microcode is supported, save this mc into
883 * permanent memory. So it will be loaded early when a CPU is hot added
884 * or resumes.
885 */
886 save_mc_for_early(new_mc, new_mc_size);
887
888 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
889 cpu, new_rev, uci->cpu_sig.rev);
890
891 return UCODE_OK;
892}
893
894static int get_ucode_fw(void *to, const void *from, size_t n)
895{
896 memcpy(to, from, n);
897 return 0;
898}
899
900static enum ucode_state request_microcode_fw(int cpu, struct device *device,
901 bool refresh_fw)
902{
903 char name[30];
904 struct cpuinfo_x86 *c = &cpu_data(cpu);
905 const struct firmware *firmware;
906 enum ucode_state ret;
907
908 sprintf(name, "intel-ucode/%02x-%02x-%02x",
909 c->x86, c->x86_model, c->x86_mask);
910
911 if (request_firmware_direct(&firmware, name, device)) {
912 pr_debug("data file %s load failed\n", name);
913 return UCODE_NFOUND;
914 }
915
916 ret = generic_load_microcode(cpu, (void *)firmware->data,
917 firmware->size, &get_ucode_fw);
918
919 release_firmware(firmware);
920
921 return ret;
922}
923
924static int get_ucode_user(void *to, const void *from, size_t n)
925{
926 return copy_from_user(to, from, n);
927}
928
929static enum ucode_state
930request_microcode_user(int cpu, const void __user *buf, size_t size)
931{
932 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
933}
934
935static struct microcode_ops microcode_intel_ops = {
936 .request_microcode_user = request_microcode_user,
937 .request_microcode_fw = request_microcode_fw,
938 .collect_cpu_info = collect_cpu_info,
939 .apply_microcode = apply_microcode_intel,
940};
941
942struct microcode_ops * __init init_intel_microcode(void)
943{
944 struct cpuinfo_x86 *c = &boot_cpu_data;
945
946 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
947 cpu_has(c, X86_FEATURE_IA64)) {
948 pr_err("Intel CPU family 0x%x not supported\n", c->x86);
949 return NULL;
950 }
951
952 return µcode_intel_ops;
953}
1/*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * Intel CPU microcode early update for Linux
8 *
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18/*
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
21 *
22 *#define DEBUG
23 */
24#define pr_fmt(fmt) "microcode: " fmt
25
26#include <linux/earlycpio.h>
27#include <linux/firmware.h>
28#include <linux/uaccess.h>
29#include <linux/vmalloc.h>
30#include <linux/initrd.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/mm.h>
35
36#include <asm/microcode_intel.h>
37#include <asm/processor.h>
38#include <asm/tlbflush.h>
39#include <asm/setup.h>
40#include <asm/msr.h>
41
42/*
43 * Temporary microcode blobs pointers storage. We note here the pointers to
44 * microcode blobs we've got from whatever storage (detached initrd, builtin).
45 * Later on, we put those into final storage mc_saved_data.mc_saved.
46 */
47static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
48
49static struct mc_saved_data {
50 unsigned int num_saved;
51 struct microcode_intel **mc_saved;
52} mc_saved_data;
53
54static enum ucode_state
55load_microcode_early(struct microcode_intel **saved,
56 unsigned int num_saved, struct ucode_cpu_info *uci)
57{
58 struct microcode_intel *ucode_ptr, *new_mc = NULL;
59 struct microcode_header_intel *mc_hdr;
60 int new_rev, ret, i;
61
62 new_rev = uci->cpu_sig.rev;
63
64 for (i = 0; i < num_saved; i++) {
65 ucode_ptr = saved[i];
66 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
67
68 ret = has_newer_microcode(ucode_ptr,
69 uci->cpu_sig.sig,
70 uci->cpu_sig.pf,
71 new_rev);
72 if (!ret)
73 continue;
74
75 new_rev = mc_hdr->rev;
76 new_mc = ucode_ptr;
77 }
78
79 if (!new_mc)
80 return UCODE_NFOUND;
81
82 uci->mc = (struct microcode_intel *)new_mc;
83 return UCODE_OK;
84}
85
86static inline void
87copy_ptrs(struct microcode_intel **mc_saved, unsigned long *mc_ptrs,
88 unsigned long off, int num_saved)
89{
90 int i;
91
92 for (i = 0; i < num_saved; i++)
93 mc_saved[i] = (struct microcode_intel *)(mc_ptrs[i] + off);
94}
95
96#ifdef CONFIG_X86_32
97static void
98microcode_phys(struct microcode_intel **mc_saved_tmp, struct mc_saved_data *mcs)
99{
100 int i;
101 struct microcode_intel ***mc_saved;
102
103 mc_saved = (struct microcode_intel ***)__pa_nodebug(&mcs->mc_saved);
104
105 for (i = 0; i < mcs->num_saved; i++) {
106 struct microcode_intel *p;
107
108 p = *(struct microcode_intel **)__pa_nodebug(mcs->mc_saved + i);
109 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
110 }
111}
112#endif
113
114static enum ucode_state
115load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
116 unsigned long offset, struct ucode_cpu_info *uci)
117{
118 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
119 unsigned int count = mcs->num_saved;
120
121 if (!mcs->mc_saved) {
122 copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
123
124 return load_microcode_early(mc_saved_tmp, count, uci);
125 } else {
126#ifdef CONFIG_X86_32
127 microcode_phys(mc_saved_tmp, mcs);
128 return load_microcode_early(mc_saved_tmp, count, uci);
129#else
130 return load_microcode_early(mcs->mc_saved, count, uci);
131#endif
132 }
133}
134
135/*
136 * Given CPU signature and a microcode patch, this function finds if the
137 * microcode patch has matching family and model with the CPU.
138 */
139static enum ucode_state
140matching_model_microcode(struct microcode_header_intel *mc_header,
141 unsigned long sig)
142{
143 unsigned int fam, model;
144 unsigned int fam_ucode, model_ucode;
145 struct extended_sigtable *ext_header;
146 unsigned long total_size = get_totalsize(mc_header);
147 unsigned long data_size = get_datasize(mc_header);
148 int ext_sigcount, i;
149 struct extended_signature *ext_sig;
150
151 fam = x86_family(sig);
152 model = x86_model(sig);
153
154 fam_ucode = x86_family(mc_header->sig);
155 model_ucode = x86_model(mc_header->sig);
156
157 if (fam == fam_ucode && model == model_ucode)
158 return UCODE_OK;
159
160 /* Look for ext. headers: */
161 if (total_size <= data_size + MC_HEADER_SIZE)
162 return UCODE_NFOUND;
163
164 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
165 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
166 ext_sigcount = ext_header->count;
167
168 for (i = 0; i < ext_sigcount; i++) {
169 fam_ucode = x86_family(ext_sig->sig);
170 model_ucode = x86_model(ext_sig->sig);
171
172 if (fam == fam_ucode && model == model_ucode)
173 return UCODE_OK;
174
175 ext_sig++;
176 }
177 return UCODE_NFOUND;
178}
179
180static int
181save_microcode(struct mc_saved_data *mcs,
182 struct microcode_intel **mc_saved_src,
183 unsigned int num_saved)
184{
185 int i, j;
186 struct microcode_intel **saved_ptr;
187 int ret;
188
189 if (!num_saved)
190 return -EINVAL;
191
192 /*
193 * Copy new microcode data.
194 */
195 saved_ptr = kcalloc(num_saved, sizeof(struct microcode_intel *), GFP_KERNEL);
196 if (!saved_ptr)
197 return -ENOMEM;
198
199 for (i = 0; i < num_saved; i++) {
200 struct microcode_header_intel *mc_hdr;
201 struct microcode_intel *mc;
202 unsigned long size;
203
204 if (!mc_saved_src[i]) {
205 ret = -EINVAL;
206 goto err;
207 }
208
209 mc = mc_saved_src[i];
210 mc_hdr = &mc->hdr;
211 size = get_totalsize(mc_hdr);
212
213 saved_ptr[i] = kmemdup(mc, size, GFP_KERNEL);
214 if (!saved_ptr[i]) {
215 ret = -ENOMEM;
216 goto err;
217 }
218 }
219
220 /*
221 * Point to newly saved microcode.
222 */
223 mcs->mc_saved = saved_ptr;
224 mcs->num_saved = num_saved;
225
226 return 0;
227
228err:
229 for (j = 0; j <= i; j++)
230 kfree(saved_ptr[j]);
231 kfree(saved_ptr);
232
233 return ret;
234}
235
236/*
237 * A microcode patch in ucode_ptr is saved into mc_saved
238 * - if it has matching signature and newer revision compared to an existing
239 * patch mc_saved.
240 * - or if it is a newly discovered microcode patch.
241 *
242 * The microcode patch should have matching model with CPU.
243 *
244 * Returns: The updated number @num_saved of saved microcode patches.
245 */
246static unsigned int _save_mc(struct microcode_intel **mc_saved,
247 u8 *ucode_ptr, unsigned int num_saved)
248{
249 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
250 unsigned int sig, pf;
251 int found = 0, i;
252
253 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
254
255 for (i = 0; i < num_saved; i++) {
256 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
257 sig = mc_saved_hdr->sig;
258 pf = mc_saved_hdr->pf;
259
260 if (!find_matching_signature(ucode_ptr, sig, pf))
261 continue;
262
263 found = 1;
264
265 if (mc_hdr->rev <= mc_saved_hdr->rev)
266 continue;
267
268 /*
269 * Found an older ucode saved earlier. Replace it with
270 * this newer one.
271 */
272 mc_saved[i] = (struct microcode_intel *)ucode_ptr;
273 break;
274 }
275
276 /* Newly detected microcode, save it to memory. */
277 if (i >= num_saved && !found)
278 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
279
280 return num_saved;
281}
282
283/*
284 * Get microcode matching with BSP's model. Only CPUs with the same model as
285 * BSP can stay in the platform.
286 */
287static enum ucode_state __init
288get_matching_model_microcode(unsigned long start, void *data, size_t size,
289 struct mc_saved_data *mcs, unsigned long *mc_ptrs,
290 struct ucode_cpu_info *uci)
291{
292 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
293 struct microcode_header_intel *mc_header;
294 unsigned int num_saved = mcs->num_saved;
295 enum ucode_state state = UCODE_OK;
296 unsigned int leftover = size;
297 u8 *ucode_ptr = data;
298 unsigned int mc_size;
299 int i;
300
301 while (leftover && num_saved < ARRAY_SIZE(mc_saved_tmp)) {
302
303 if (leftover < sizeof(mc_header))
304 break;
305
306 mc_header = (struct microcode_header_intel *)ucode_ptr;
307
308 mc_size = get_totalsize(mc_header);
309 if (!mc_size || mc_size > leftover ||
310 microcode_sanity_check(ucode_ptr, 0) < 0)
311 break;
312
313 leftover -= mc_size;
314
315 /*
316 * Since APs with same family and model as the BSP may boot in
317 * the platform, we need to find and save microcode patches
318 * with the same family and model as the BSP.
319 */
320 if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) {
321 ucode_ptr += mc_size;
322 continue;
323 }
324
325 num_saved = _save_mc(mc_saved_tmp, ucode_ptr, num_saved);
326
327 ucode_ptr += mc_size;
328 }
329
330 if (leftover) {
331 state = UCODE_ERROR;
332 return state;
333 }
334
335 if (!num_saved) {
336 state = UCODE_NFOUND;
337 return state;
338 }
339
340 for (i = 0; i < num_saved; i++)
341 mc_ptrs[i] = (unsigned long)mc_saved_tmp[i] - start;
342
343 mcs->num_saved = num_saved;
344
345 return state;
346}
347
348static int collect_cpu_info_early(struct ucode_cpu_info *uci)
349{
350 unsigned int val[2];
351 unsigned int family, model;
352 struct cpu_signature csig;
353 unsigned int eax, ebx, ecx, edx;
354
355 csig.sig = 0;
356 csig.pf = 0;
357 csig.rev = 0;
358
359 memset(uci, 0, sizeof(*uci));
360
361 eax = 0x00000001;
362 ecx = 0;
363 native_cpuid(&eax, &ebx, &ecx, &edx);
364 csig.sig = eax;
365
366 family = x86_family(csig.sig);
367 model = x86_model(csig.sig);
368
369 if ((model >= 5) || (family > 6)) {
370 /* get processor flags from MSR 0x17 */
371 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
372 csig.pf = 1 << ((val[1] >> 18) & 7);
373 }
374 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
375
376 /* As documented in the SDM: Do a CPUID 1 here */
377 sync_core();
378
379 /* get the current revision from MSR 0x8B */
380 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
381
382 csig.rev = val[1];
383
384 uci->cpu_sig = csig;
385 uci->valid = 1;
386
387 return 0;
388}
389
390static void show_saved_mc(void)
391{
392#ifdef DEBUG
393 int i, j;
394 unsigned int sig, pf, rev, total_size, data_size, date;
395 struct ucode_cpu_info uci;
396
397 if (!mc_saved_data.num_saved) {
398 pr_debug("no microcode data saved.\n");
399 return;
400 }
401 pr_debug("Total microcode saved: %d\n", mc_saved_data.num_saved);
402
403 collect_cpu_info_early(&uci);
404
405 sig = uci.cpu_sig.sig;
406 pf = uci.cpu_sig.pf;
407 rev = uci.cpu_sig.rev;
408 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
409
410 for (i = 0; i < mc_saved_data.num_saved; i++) {
411 struct microcode_header_intel *mc_saved_header;
412 struct extended_sigtable *ext_header;
413 int ext_sigcount;
414 struct extended_signature *ext_sig;
415
416 mc_saved_header = (struct microcode_header_intel *)
417 mc_saved_data.mc_saved[i];
418 sig = mc_saved_header->sig;
419 pf = mc_saved_header->pf;
420 rev = mc_saved_header->rev;
421 total_size = get_totalsize(mc_saved_header);
422 data_size = get_datasize(mc_saved_header);
423 date = mc_saved_header->date;
424
425 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
426 i, sig, pf, rev, total_size,
427 date & 0xffff,
428 date >> 24,
429 (date >> 16) & 0xff);
430
431 /* Look for ext. headers: */
432 if (total_size <= data_size + MC_HEADER_SIZE)
433 continue;
434
435 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
436 ext_sigcount = ext_header->count;
437 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
438
439 for (j = 0; j < ext_sigcount; j++) {
440 sig = ext_sig->sig;
441 pf = ext_sig->pf;
442
443 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
444 j, sig, pf);
445
446 ext_sig++;
447 }
448
449 }
450#endif
451}
452
453#ifdef CONFIG_HOTPLUG_CPU
454static DEFINE_MUTEX(x86_cpu_microcode_mutex);
455/*
456 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
457 * hot added or resumes.
458 *
459 * Please make sure this mc should be a valid microcode patch before calling
460 * this function.
461 */
462int save_mc_for_early(u8 *mc)
463{
464 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
465 unsigned int mc_saved_count_init;
466 unsigned int num_saved;
467 struct microcode_intel **mc_saved;
468 int ret = 0;
469 int i;
470
471 /*
472 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
473 * hotplug.
474 */
475 mutex_lock(&x86_cpu_microcode_mutex);
476
477 mc_saved_count_init = mc_saved_data.num_saved;
478 num_saved = mc_saved_data.num_saved;
479 mc_saved = mc_saved_data.mc_saved;
480
481 if (mc_saved && num_saved)
482 memcpy(mc_saved_tmp, mc_saved,
483 num_saved * sizeof(struct microcode_intel *));
484 /*
485 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
486 * version.
487 */
488 num_saved = _save_mc(mc_saved_tmp, mc, num_saved);
489
490 /*
491 * Save the mc_save_tmp in global mc_saved_data.
492 */
493 ret = save_microcode(&mc_saved_data, mc_saved_tmp, num_saved);
494 if (ret) {
495 pr_err("Cannot save microcode patch.\n");
496 goto out;
497 }
498
499 show_saved_mc();
500
501 /*
502 * Free old saved microcode data.
503 */
504 if (mc_saved) {
505 for (i = 0; i < mc_saved_count_init; i++)
506 kfree(mc_saved[i]);
507 kfree(mc_saved);
508 }
509
510out:
511 mutex_unlock(&x86_cpu_microcode_mutex);
512
513 return ret;
514}
515EXPORT_SYMBOL_GPL(save_mc_for_early);
516#endif
517
518static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
519{
520#ifdef CONFIG_X86_64
521 unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
522 char name[30];
523
524 native_cpuid(&eax, &ebx, &ecx, &edx);
525
526 sprintf(name, "intel-ucode/%02x-%02x-%02x",
527 x86_family(eax), x86_model(eax), x86_stepping(eax));
528
529 return get_builtin_firmware(cp, name);
530#else
531 return false;
532#endif
533}
534
535static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
536static __init enum ucode_state
537scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
538 unsigned long start, unsigned long size,
539 struct ucode_cpu_info *uci)
540{
541 struct cpio_data cd;
542 long offset = 0;
543#ifdef CONFIG_X86_32
544 char *p = (char *)__pa_nodebug(ucode_name);
545#else
546 char *p = ucode_name;
547#endif
548
549 cd.data = NULL;
550 cd.size = 0;
551
552 /* try built-in microcode if no initrd */
553 if (!size) {
554 if (!load_builtin_intel_microcode(&cd))
555 return UCODE_ERROR;
556 } else {
557 cd = find_cpio_data(p, (void *)start, size, &offset);
558 if (!cd.data)
559 return UCODE_ERROR;
560 }
561
562 return get_matching_model_microcode(start, cd.data, cd.size,
563 mcs, mc_ptrs, uci);
564}
565
566/*
567 * Print ucode update info.
568 */
569static void
570print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
571{
572 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
573 uci->cpu_sig.rev,
574 date & 0xffff,
575 date >> 24,
576 (date >> 16) & 0xff);
577}
578
579#ifdef CONFIG_X86_32
580
581static int delay_ucode_info;
582static int current_mc_date;
583
584/*
585 * Print early updated ucode info after printk works. This is delayed info dump.
586 */
587void show_ucode_info_early(void)
588{
589 struct ucode_cpu_info uci;
590
591 if (delay_ucode_info) {
592 collect_cpu_info_early(&uci);
593 print_ucode_info(&uci, current_mc_date);
594 delay_ucode_info = 0;
595 }
596}
597
598/*
599 * At this point, we can not call printk() yet. Keep microcode patch number in
600 * mc_saved_data.mc_saved and delay printing microcode info in
601 * show_ucode_info_early() until printk() works.
602 */
603static void print_ucode(struct ucode_cpu_info *uci)
604{
605 struct microcode_intel *mc;
606 int *delay_ucode_info_p;
607 int *current_mc_date_p;
608
609 mc = uci->mc;
610 if (!mc)
611 return;
612
613 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
614 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
615
616 *delay_ucode_info_p = 1;
617 *current_mc_date_p = mc->hdr.date;
618}
619#else
620
621/*
622 * Flush global tlb. We only do this in x86_64 where paging has been enabled
623 * already and PGE should be enabled as well.
624 */
625static inline void flush_tlb_early(void)
626{
627 __native_flush_tlb_global_irq_disabled();
628}
629
630static inline void print_ucode(struct ucode_cpu_info *uci)
631{
632 struct microcode_intel *mc;
633
634 mc = uci->mc;
635 if (!mc)
636 return;
637
638 print_ucode_info(uci, mc->hdr.date);
639}
640#endif
641
642static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
643{
644 struct microcode_intel *mc;
645 unsigned int val[2];
646
647 mc = uci->mc;
648 if (!mc)
649 return 0;
650
651 /* write microcode via MSR 0x79 */
652 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
653 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
654
655 /* As documented in the SDM: Do a CPUID 1 here */
656 sync_core();
657
658 /* get the current revision from MSR 0x8B */
659 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
660 if (val[1] != mc->hdr.rev)
661 return -1;
662
663#ifdef CONFIG_X86_64
664 /* Flush global tlb. This is precaution. */
665 flush_tlb_early();
666#endif
667 uci->cpu_sig.rev = val[1];
668
669 if (early)
670 print_ucode(uci);
671 else
672 print_ucode_info(uci, mc->hdr.date);
673
674 return 0;
675}
676
677/*
678 * This function converts microcode patch offsets previously stored in
679 * mc_tmp_ptrs to pointers and stores the pointers in mc_saved_data.
680 */
681int __init save_microcode_in_initrd_intel(void)
682{
683 unsigned int count = mc_saved_data.num_saved;
684 struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
685 int ret = 0;
686
687 if (!count)
688 return ret;
689
690 copy_ptrs(mc_saved, mc_tmp_ptrs, get_initrd_start(), count);
691
692 ret = save_microcode(&mc_saved_data, mc_saved, count);
693 if (ret)
694 pr_err("Cannot save microcode patches from initrd.\n");
695
696 show_saved_mc();
697
698 return ret;
699}
700
701static void __init
702_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
703 unsigned long start, unsigned long size)
704{
705 struct ucode_cpu_info uci;
706 enum ucode_state ret;
707
708 collect_cpu_info_early(&uci);
709
710 ret = scan_microcode(mcs, mc_ptrs, start, size, &uci);
711 if (ret != UCODE_OK)
712 return;
713
714 ret = load_microcode(mcs, mc_ptrs, start, &uci);
715 if (ret != UCODE_OK)
716 return;
717
718 apply_microcode_early(&uci, true);
719}
720
721void __init load_ucode_intel_bsp(void)
722{
723 u64 start, size;
724#ifdef CONFIG_X86_32
725 struct boot_params *p;
726
727 p = (struct boot_params *)__pa_nodebug(&boot_params);
728 size = p->hdr.ramdisk_size;
729
730 /*
731 * Set start only if we have an initrd image. We cannot use initrd_start
732 * because it is not set that early yet.
733 */
734 start = (size ? p->hdr.ramdisk_image : 0);
735
736 _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
737 (unsigned long *)__pa_nodebug(&mc_tmp_ptrs),
738 start, size);
739#else
740 size = boot_params.hdr.ramdisk_size;
741 start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
742
743 _load_ucode_intel_bsp(&mc_saved_data, mc_tmp_ptrs, start, size);
744#endif
745}
746
747void load_ucode_intel_ap(void)
748{
749 unsigned long *mcs_tmp_p;
750 struct mc_saved_data *mcs_p;
751 struct ucode_cpu_info uci;
752 enum ucode_state ret;
753#ifdef CONFIG_X86_32
754
755 mcs_tmp_p = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
756 mcs_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
757#else
758 mcs_tmp_p = mc_tmp_ptrs;
759 mcs_p = &mc_saved_data;
760#endif
761
762 /*
763 * If there is no valid ucode previously saved in memory, no need to
764 * update ucode on this AP.
765 */
766 if (!mcs_p->num_saved)
767 return;
768
769 collect_cpu_info_early(&uci);
770 ret = load_microcode(mcs_p, mcs_tmp_p, get_initrd_start_addr(), &uci);
771 if (ret != UCODE_OK)
772 return;
773
774 apply_microcode_early(&uci, true);
775}
776
777void reload_ucode_intel(void)
778{
779 struct ucode_cpu_info uci;
780 enum ucode_state ret;
781
782 if (!mc_saved_data.num_saved)
783 return;
784
785 collect_cpu_info_early(&uci);
786
787 ret = load_microcode_early(mc_saved_data.mc_saved,
788 mc_saved_data.num_saved, &uci);
789 if (ret != UCODE_OK)
790 return;
791
792 apply_microcode_early(&uci, false);
793}
794
795static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
796{
797 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
798 unsigned int val[2];
799
800 memset(csig, 0, sizeof(*csig));
801
802 csig->sig = cpuid_eax(0x00000001);
803
804 if ((c->x86_model >= 5) || (c->x86 > 6)) {
805 /* get processor flags from MSR 0x17 */
806 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
807 csig->pf = 1 << ((val[1] >> 18) & 7);
808 }
809
810 csig->rev = c->microcode;
811 pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
812 cpu_num, csig->sig, csig->pf, csig->rev);
813
814 return 0;
815}
816
817/*
818 * return 0 - no update found
819 * return 1 - found update
820 */
821static int get_matching_mc(struct microcode_intel *mc, int cpu)
822{
823 struct cpu_signature cpu_sig;
824 unsigned int csig, cpf, crev;
825
826 collect_cpu_info(cpu, &cpu_sig);
827
828 csig = cpu_sig.sig;
829 cpf = cpu_sig.pf;
830 crev = cpu_sig.rev;
831
832 return has_newer_microcode(mc, csig, cpf, crev);
833}
834
835static int apply_microcode_intel(int cpu)
836{
837 struct microcode_intel *mc;
838 struct ucode_cpu_info *uci;
839 struct cpuinfo_x86 *c;
840 unsigned int val[2];
841
842 /* We should bind the task to the CPU */
843 if (WARN_ON(raw_smp_processor_id() != cpu))
844 return -1;
845
846 uci = ucode_cpu_info + cpu;
847 mc = uci->mc;
848 if (!mc)
849 return 0;
850
851 /*
852 * Microcode on this CPU could be updated earlier. Only apply the
853 * microcode patch in mc when it is newer than the one on this
854 * CPU.
855 */
856 if (!get_matching_mc(mc, cpu))
857 return 0;
858
859 /* write microcode via MSR 0x79 */
860 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
861 wrmsrl(MSR_IA32_UCODE_REV, 0);
862
863 /* As documented in the SDM: Do a CPUID 1 here */
864 sync_core();
865
866 /* get the current revision from MSR 0x8B */
867 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
868
869 if (val[1] != mc->hdr.rev) {
870 pr_err("CPU%d update to revision 0x%x failed\n",
871 cpu, mc->hdr.rev);
872 return -1;
873 }
874
875 pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
876 cpu, val[1],
877 mc->hdr.date & 0xffff,
878 mc->hdr.date >> 24,
879 (mc->hdr.date >> 16) & 0xff);
880
881 c = &cpu_data(cpu);
882
883 uci->cpu_sig.rev = val[1];
884 c->microcode = val[1];
885
886 return 0;
887}
888
889static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
890 int (*get_ucode_data)(void *, const void *, size_t))
891{
892 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
893 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
894 int new_rev = uci->cpu_sig.rev;
895 unsigned int leftover = size;
896 enum ucode_state state = UCODE_OK;
897 unsigned int curr_mc_size = 0;
898 unsigned int csig, cpf;
899
900 while (leftover) {
901 struct microcode_header_intel mc_header;
902 unsigned int mc_size;
903
904 if (leftover < sizeof(mc_header)) {
905 pr_err("error! Truncated header in microcode data file\n");
906 break;
907 }
908
909 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
910 break;
911
912 mc_size = get_totalsize(&mc_header);
913 if (!mc_size || mc_size > leftover) {
914 pr_err("error! Bad data in microcode data file\n");
915 break;
916 }
917
918 /* For performance reasons, reuse mc area when possible */
919 if (!mc || mc_size > curr_mc_size) {
920 vfree(mc);
921 mc = vmalloc(mc_size);
922 if (!mc)
923 break;
924 curr_mc_size = mc_size;
925 }
926
927 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
928 microcode_sanity_check(mc, 1) < 0) {
929 break;
930 }
931
932 csig = uci->cpu_sig.sig;
933 cpf = uci->cpu_sig.pf;
934 if (has_newer_microcode(mc, csig, cpf, new_rev)) {
935 vfree(new_mc);
936 new_rev = mc_header.rev;
937 new_mc = mc;
938 mc = NULL; /* trigger new vmalloc */
939 }
940
941 ucode_ptr += mc_size;
942 leftover -= mc_size;
943 }
944
945 vfree(mc);
946
947 if (leftover) {
948 vfree(new_mc);
949 state = UCODE_ERROR;
950 goto out;
951 }
952
953 if (!new_mc) {
954 state = UCODE_NFOUND;
955 goto out;
956 }
957
958 vfree(uci->mc);
959 uci->mc = (struct microcode_intel *)new_mc;
960
961 /*
962 * If early loading microcode is supported, save this mc into
963 * permanent memory. So it will be loaded early when a CPU is hot added
964 * or resumes.
965 */
966 save_mc_for_early(new_mc);
967
968 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
969 cpu, new_rev, uci->cpu_sig.rev);
970out:
971 return state;
972}
973
974static int get_ucode_fw(void *to, const void *from, size_t n)
975{
976 memcpy(to, from, n);
977 return 0;
978}
979
980static enum ucode_state request_microcode_fw(int cpu, struct device *device,
981 bool refresh_fw)
982{
983 char name[30];
984 struct cpuinfo_x86 *c = &cpu_data(cpu);
985 const struct firmware *firmware;
986 enum ucode_state ret;
987
988 sprintf(name, "intel-ucode/%02x-%02x-%02x",
989 c->x86, c->x86_model, c->x86_mask);
990
991 if (request_firmware_direct(&firmware, name, device)) {
992 pr_debug("data file %s load failed\n", name);
993 return UCODE_NFOUND;
994 }
995
996 ret = generic_load_microcode(cpu, (void *)firmware->data,
997 firmware->size, &get_ucode_fw);
998
999 release_firmware(firmware);
1000
1001 return ret;
1002}
1003
1004static int get_ucode_user(void *to, const void *from, size_t n)
1005{
1006 return copy_from_user(to, from, n);
1007}
1008
1009static enum ucode_state
1010request_microcode_user(int cpu, const void __user *buf, size_t size)
1011{
1012 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
1013}
1014
1015static void microcode_fini_cpu(int cpu)
1016{
1017 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1018
1019 vfree(uci->mc);
1020 uci->mc = NULL;
1021}
1022
1023static struct microcode_ops microcode_intel_ops = {
1024 .request_microcode_user = request_microcode_user,
1025 .request_microcode_fw = request_microcode_fw,
1026 .collect_cpu_info = collect_cpu_info,
1027 .apply_microcode = apply_microcode_intel,
1028 .microcode_fini_cpu = microcode_fini_cpu,
1029};
1030
1031struct microcode_ops * __init init_intel_microcode(void)
1032{
1033 struct cpuinfo_x86 *c = &boot_cpu_data;
1034
1035 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1036 cpu_has(c, X86_FEATURE_IA64)) {
1037 pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1038 return NULL;
1039 }
1040
1041 return µcode_intel_ops;
1042}
1043