Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/sched.h>
3#include <linux/sched/clock.h>
4
5#include <asm/cpufeature.h>
6
7#include "cpu.h"
8
9#define MSR_ZHAOXIN_FCR57 0x00001257
10
11#define ACE_PRESENT (1 << 6)
12#define ACE_ENABLED (1 << 7)
13#define ACE_FCR (1 << 7) /* MSR_ZHAOXIN_FCR */
14
15#define RNG_PRESENT (1 << 2)
16#define RNG_ENABLED (1 << 3)
17#define RNG_ENABLE (1 << 8) /* MSR_ZHAOXIN_RNG */
18
19#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
20#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
21#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
22#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
23#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
24#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
25
26static void init_zhaoxin_cap(struct cpuinfo_x86 *c)
27{
28 u32 lo, hi;
29
30 /* Test for Extended Feature Flags presence */
31 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
32 u32 tmp = cpuid_edx(0xC0000001);
33
34 /* Enable ACE unit, if present and disabled */
35 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
36 rdmsr(MSR_ZHAOXIN_FCR57, lo, hi);
37 /* Enable ACE unit */
38 lo |= ACE_FCR;
39 wrmsr(MSR_ZHAOXIN_FCR57, lo, hi);
40 pr_info("CPU: Enabled ACE h/w crypto\n");
41 }
42
43 /* Enable RNG unit, if present and disabled */
44 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
45 rdmsr(MSR_ZHAOXIN_FCR57, lo, hi);
46 /* Enable RNG unit */
47 lo |= RNG_ENABLE;
48 wrmsr(MSR_ZHAOXIN_FCR57, lo, hi);
49 pr_info("CPU: Enabled h/w RNG\n");
50 }
51
52 /*
53 * Store Extended Feature Flags as word 5 of the CPU
54 * capability bit array
55 */
56 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
57 }
58
59 if (c->x86 >= 0x6)
60 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
61
62 cpu_detect_cache_sizes(c);
63}
64
65static void early_init_zhaoxin(struct cpuinfo_x86 *c)
66{
67 if (c->x86 >= 0x6)
68 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
69#ifdef CONFIG_X86_64
70 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
71#endif
72 if (c->x86_power & (1 << 8)) {
73 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
74 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
75 }
76
77 if (c->cpuid_level >= 0x00000001) {
78 u32 eax, ebx, ecx, edx;
79
80 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
81 /*
82 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
83 * apicids which are reserved per package. Store the resulting
84 * shift value for the package management code.
85 */
86 if (edx & (1U << 28))
87 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
88 }
89
90}
91
92static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c)
93{
94 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
95
96 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
97 msr_ctl = vmx_msr_high | vmx_msr_low;
98
99 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
100 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
101 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
102 set_cpu_cap(c, X86_FEATURE_VNMI);
103 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
104 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
105 vmx_msr_low, vmx_msr_high);
106 msr_ctl2 = vmx_msr_high | vmx_msr_low;
107 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
108 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
109 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
110 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
111 set_cpu_cap(c, X86_FEATURE_EPT);
112 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
113 set_cpu_cap(c, X86_FEATURE_VPID);
114 }
115}
116
117static void init_zhaoxin(struct cpuinfo_x86 *c)
118{
119 early_init_zhaoxin(c);
120 init_intel_cacheinfo(c);
121 detect_num_cpu_cores(c);
122#ifdef CONFIG_X86_32
123 detect_ht(c);
124#endif
125
126 if (c->cpuid_level > 9) {
127 unsigned int eax = cpuid_eax(10);
128
129 /*
130 * Check for version and the number of counters
131 * Version(eax[7:0]) can't be 0;
132 * Counters(eax[15:8]) should be greater than 1;
133 */
134 if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
135 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
136 }
137
138 if (c->x86 >= 0x6)
139 init_zhaoxin_cap(c);
140#ifdef CONFIG_X86_64
141 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
142#endif
143
144 if (cpu_has(c, X86_FEATURE_VMX))
145 zhaoxin_detect_vmx_virtcap(c);
146}
147
148#ifdef CONFIG_X86_32
149static unsigned int
150zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size)
151{
152 return size;
153}
154#endif
155
156static const struct cpu_dev zhaoxin_cpu_dev = {
157 .c_vendor = "zhaoxin",
158 .c_ident = { " Shanghai " },
159 .c_early_init = early_init_zhaoxin,
160 .c_init = init_zhaoxin,
161#ifdef CONFIG_X86_32
162 .legacy_cache_size = zhaoxin_size_cache,
163#endif
164 .c_x86_vendor = X86_VENDOR_ZHAOXIN,
165};
166
167cpu_dev_register(zhaoxin_cpu_dev);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/sched.h>
3#include <linux/sched/clock.h>
4
5#include <asm/cpu.h>
6#include <asm/cpufeature.h>
7
8#include "cpu.h"
9
10#define MSR_ZHAOXIN_FCR57 0x00001257
11
12#define ACE_PRESENT (1 << 6)
13#define ACE_ENABLED (1 << 7)
14#define ACE_FCR (1 << 7) /* MSR_ZHAOXIN_FCR */
15
16#define RNG_PRESENT (1 << 2)
17#define RNG_ENABLED (1 << 3)
18#define RNG_ENABLE (1 << 8) /* MSR_ZHAOXIN_RNG */
19
20static void init_zhaoxin_cap(struct cpuinfo_x86 *c)
21{
22 u32 lo, hi;
23
24 /* Test for Extended Feature Flags presence */
25 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
26 u32 tmp = cpuid_edx(0xC0000001);
27
28 /* Enable ACE unit, if present and disabled */
29 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
30 rdmsr(MSR_ZHAOXIN_FCR57, lo, hi);
31 /* Enable ACE unit */
32 lo |= ACE_FCR;
33 wrmsr(MSR_ZHAOXIN_FCR57, lo, hi);
34 pr_info("CPU: Enabled ACE h/w crypto\n");
35 }
36
37 /* Enable RNG unit, if present and disabled */
38 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
39 rdmsr(MSR_ZHAOXIN_FCR57, lo, hi);
40 /* Enable RNG unit */
41 lo |= RNG_ENABLE;
42 wrmsr(MSR_ZHAOXIN_FCR57, lo, hi);
43 pr_info("CPU: Enabled h/w RNG\n");
44 }
45
46 /*
47 * Store Extended Feature Flags as word 5 of the CPU
48 * capability bit array
49 */
50 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
51 }
52
53 if (c->x86 >= 0x6)
54 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
55}
56
57static void early_init_zhaoxin(struct cpuinfo_x86 *c)
58{
59 if (c->x86 >= 0x6)
60 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
61#ifdef CONFIG_X86_64
62 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
63#endif
64 if (c->x86_power & (1 << 8)) {
65 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
66 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
67 }
68}
69
70static void init_zhaoxin(struct cpuinfo_x86 *c)
71{
72 early_init_zhaoxin(c);
73 init_intel_cacheinfo(c);
74
75 if (c->cpuid_level > 9) {
76 unsigned int eax = cpuid_eax(10);
77
78 /*
79 * Check for version and the number of counters
80 * Version(eax[7:0]) can't be 0;
81 * Counters(eax[15:8]) should be greater than 1;
82 */
83 if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
84 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
85 }
86
87 if (c->x86 >= 0x6)
88 init_zhaoxin_cap(c);
89#ifdef CONFIG_X86_64
90 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
91#endif
92
93 init_ia32_feat_ctl(c);
94}
95
96#ifdef CONFIG_X86_32
97static unsigned int
98zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size)
99{
100 return size;
101}
102#endif
103
104static const struct cpu_dev zhaoxin_cpu_dev = {
105 .c_vendor = "zhaoxin",
106 .c_ident = { " Shanghai " },
107 .c_early_init = early_init_zhaoxin,
108 .c_init = init_zhaoxin,
109#ifdef CONFIG_X86_32
110 .legacy_cache_size = zhaoxin_size_cache,
111#endif
112 .c_x86_vendor = X86_VENDOR_ZHAOXIN,
113};
114
115cpu_dev_register(zhaoxin_cpu_dev);