Loading...
1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/device.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/security.h>
12
13#include <asm/cpu.h>
14#include <asm/processor.h>
15#include <linux/atomic.h>
16#include <asm/system.h>
17#include <asm/hardirq.h>
18#include <asm/mmu_context.h>
19#include <asm/mipsmtregs.h>
20#include <asm/r4kcache.h>
21#include <asm/cacheflush.h>
22
23int vpelimit;
24
25static int __init maxvpes(char *str)
26{
27 get_option(&str, &vpelimit);
28
29 return 1;
30}
31
32__setup("maxvpes=", maxvpes);
33
34int tclimit;
35
36static int __init maxtcs(char *str)
37{
38 get_option(&str, &tclimit);
39
40 return 1;
41}
42
43__setup("maxtcs=", maxtcs);
44
45/*
46 * Dump new MIPS MT state for the core. Does not leave TCs halted.
47 * Takes an argument which taken to be a pre-call MVPControl value.
48 */
49
50void mips_mt_regdump(unsigned long mvpctl)
51{
52 unsigned long flags;
53 unsigned long vpflags;
54 unsigned long mvpconf0;
55 int nvpe;
56 int ntc;
57 int i;
58 int tc;
59 unsigned long haltval;
60 unsigned long tcstatval;
61#ifdef CONFIG_MIPS_MT_SMTC
62 void smtc_soft_dump(void);
63#endif /* CONFIG_MIPT_MT_SMTC */
64
65 local_irq_save(flags);
66 vpflags = dvpe();
67 printk("=== MIPS MT State Dump ===\n");
68 printk("-- Global State --\n");
69 printk(" MVPControl Passed: %08lx\n", mvpctl);
70 printk(" MVPControl Read: %08lx\n", vpflags);
71 printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
72 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
73 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
74 printk("-- per-VPE State --\n");
75 for (i = 0; i < nvpe; i++) {
76 for (tc = 0; tc < ntc; tc++) {
77 settc(tc);
78 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
79 printk(" VPE %d\n", i);
80 printk(" VPEControl : %08lx\n",
81 read_vpe_c0_vpecontrol());
82 printk(" VPEConf0 : %08lx\n",
83 read_vpe_c0_vpeconf0());
84 printk(" VPE%d.Status : %08lx\n",
85 i, read_vpe_c0_status());
86 printk(" VPE%d.EPC : %08lx %pS\n",
87 i, read_vpe_c0_epc(),
88 (void *) read_vpe_c0_epc());
89 printk(" VPE%d.Cause : %08lx\n",
90 i, read_vpe_c0_cause());
91 printk(" VPE%d.Config7 : %08lx\n",
92 i, read_vpe_c0_config7());
93 break; /* Next VPE */
94 }
95 }
96 }
97 printk("-- per-TC State --\n");
98 for (tc = 0; tc < ntc; tc++) {
99 settc(tc);
100 if (read_tc_c0_tcbind() == read_c0_tcbind()) {
101 /* Are we dumping ourself? */
102 haltval = 0; /* Then we're not halted, and mustn't be */
103 tcstatval = flags; /* And pre-dump TCStatus is flags */
104 printk(" TC %d (current TC with VPE EPC above)\n", tc);
105 } else {
106 haltval = read_tc_c0_tchalt();
107 write_tc_c0_tchalt(1);
108 tcstatval = read_tc_c0_tcstatus();
109 printk(" TC %d\n", tc);
110 }
111 printk(" TCStatus : %08lx\n", tcstatval);
112 printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
113 printk(" TCRestart : %08lx %pS\n",
114 read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
115 printk(" TCHalt : %08lx\n", haltval);
116 printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
117 if (!haltval)
118 write_tc_c0_tchalt(0);
119 }
120#ifdef CONFIG_MIPS_MT_SMTC
121 smtc_soft_dump();
122#endif /* CONFIG_MIPT_MT_SMTC */
123 printk("===========================\n");
124 evpe(vpflags);
125 local_irq_restore(flags);
126}
127
128static int mt_opt_norps;
129static int mt_opt_rpsctl = -1;
130static int mt_opt_nblsu = -1;
131static int mt_opt_forceconfig7;
132static int mt_opt_config7 = -1;
133
134static int __init rps_disable(char *s)
135{
136 mt_opt_norps = 1;
137 return 1;
138}
139__setup("norps", rps_disable);
140
141static int __init rpsctl_set(char *str)
142{
143 get_option(&str, &mt_opt_rpsctl);
144 return 1;
145}
146__setup("rpsctl=", rpsctl_set);
147
148static int __init nblsu_set(char *str)
149{
150 get_option(&str, &mt_opt_nblsu);
151 return 1;
152}
153__setup("nblsu=", nblsu_set);
154
155static int __init config7_set(char *str)
156{
157 get_option(&str, &mt_opt_config7);
158 mt_opt_forceconfig7 = 1;
159 return 1;
160}
161__setup("config7=", config7_set);
162
163/* Experimental cache flush control parameters that should go away some day */
164int mt_protiflush;
165int mt_protdflush;
166int mt_n_iflushes = 1;
167int mt_n_dflushes = 1;
168
169static int __init set_protiflush(char *s)
170{
171 mt_protiflush = 1;
172 return 1;
173}
174__setup("protiflush", set_protiflush);
175
176static int __init set_protdflush(char *s)
177{
178 mt_protdflush = 1;
179 return 1;
180}
181__setup("protdflush", set_protdflush);
182
183static int __init niflush(char *s)
184{
185 get_option(&s, &mt_n_iflushes);
186 return 1;
187}
188__setup("niflush=", niflush);
189
190static int __init ndflush(char *s)
191{
192 get_option(&s, &mt_n_dflushes);
193 return 1;
194}
195__setup("ndflush=", ndflush);
196
197static unsigned int itc_base;
198
199static int __init set_itc_base(char *str)
200{
201 get_option(&str, &itc_base);
202 return 1;
203}
204
205__setup("itcbase=", set_itc_base);
206
207void mips_mt_set_cpuoptions(void)
208{
209 unsigned int oconfig7 = read_c0_config7();
210 unsigned int nconfig7 = oconfig7;
211
212 if (mt_opt_norps) {
213 printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
214 }
215 if (mt_opt_rpsctl >= 0) {
216 printk("34K return prediction stack override set to %d.\n",
217 mt_opt_rpsctl);
218 if (mt_opt_rpsctl)
219 nconfig7 |= (1 << 2);
220 else
221 nconfig7 &= ~(1 << 2);
222 }
223 if (mt_opt_nblsu >= 0) {
224 printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
225 if (mt_opt_nblsu)
226 nconfig7 |= (1 << 5);
227 else
228 nconfig7 &= ~(1 << 5);
229 }
230 if (mt_opt_forceconfig7) {
231 printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
232 nconfig7 = mt_opt_config7;
233 }
234 if (oconfig7 != nconfig7) {
235 __asm__ __volatile("sync");
236 write_c0_config7(nconfig7);
237 ehb();
238 printk("Config7: 0x%08x\n", read_c0_config7());
239 }
240
241 /* Report Cache management debug options */
242 if (mt_protiflush)
243 printk("I-cache flushes single-threaded\n");
244 if (mt_protdflush)
245 printk("D-cache flushes single-threaded\n");
246 if (mt_n_iflushes != 1)
247 printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
248 if (mt_n_dflushes != 1)
249 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
250
251 if (itc_base != 0) {
252 /*
253 * Configure ITC mapping. This code is very
254 * specific to the 34K core family, which uses
255 * a special mode bit ("ITC") in the ErrCtl
256 * register to enable access to ITC control
257 * registers via cache "tag" operations.
258 */
259 unsigned long ectlval;
260 unsigned long itcblkgrn;
261
262 /* ErrCtl register is known as "ecc" to Linux */
263 ectlval = read_c0_ecc();
264 write_c0_ecc(ectlval | (0x1 << 26));
265 ehb();
266#define INDEX_0 (0x80000000)
267#define INDEX_8 (0x80000008)
268 /* Read "cache tag" for Dcache pseudo-index 8 */
269 cache_op(Index_Load_Tag_D, INDEX_8);
270 ehb();
271 itcblkgrn = read_c0_dtaglo();
272 itcblkgrn &= 0xfffe0000;
273 /* Set for 128 byte pitch of ITC cells */
274 itcblkgrn |= 0x00000c00;
275 /* Stage in Tag register */
276 write_c0_dtaglo(itcblkgrn);
277 ehb();
278 /* Write out to ITU with CACHE op */
279 cache_op(Index_Store_Tag_D, INDEX_8);
280 /* Now set base address, and turn ITC on with 0x1 bit */
281 write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
282 ehb();
283 /* Write out to ITU with CACHE op */
284 cache_op(Index_Store_Tag_D, INDEX_0);
285 write_c0_ecc(ectlval);
286 ehb();
287 printk("Mapped %ld ITC cells starting at 0x%08x\n",
288 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
289 }
290}
291
292/*
293 * Function to protect cache flushes from concurrent execution
294 * depends on MP software model chosen.
295 */
296
297void mt_cflush_lockdown(void)
298{
299#ifdef CONFIG_MIPS_MT_SMTC
300 void smtc_cflush_lockdown(void);
301
302 smtc_cflush_lockdown();
303#endif /* CONFIG_MIPS_MT_SMTC */
304 /* FILL IN VSMP and AP/SP VERSIONS HERE */
305}
306
307void mt_cflush_release(void)
308{
309#ifdef CONFIG_MIPS_MT_SMTC
310 void smtc_cflush_release(void);
311
312 smtc_cflush_release();
313#endif /* CONFIG_MIPS_MT_SMTC */
314 /* FILL IN VSMP and AP/SP VERSIONS HERE */
315}
316
317struct class *mt_class;
318
319static int __init mt_init(void)
320{
321 struct class *mtc;
322
323 mtc = class_create(THIS_MODULE, "mt");
324 if (IS_ERR(mtc))
325 return PTR_ERR(mtc);
326
327 mt_class = mtc;
328
329 return 0;
330}
331
332subsys_initcall(mt_init);
1/*
2 * General MIPS MT support routines, usable in AP/SP and SMVP.
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/device.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/export.h>
10#include <linux/interrupt.h>
11#include <linux/security.h>
12
13#include <asm/cpu.h>
14#include <asm/processor.h>
15#include <linux/atomic.h>
16#include <asm/hardirq.h>
17#include <asm/mmu_context.h>
18#include <asm/mipsmtregs.h>
19#include <asm/r4kcache.h>
20#include <asm/cacheflush.h>
21
22int vpelimit;
23
24static int __init maxvpes(char *str)
25{
26 get_option(&str, &vpelimit);
27
28 return 1;
29}
30
31__setup("maxvpes=", maxvpes);
32
33int tclimit;
34
35static int __init maxtcs(char *str)
36{
37 get_option(&str, &tclimit);
38
39 return 1;
40}
41
42__setup("maxtcs=", maxtcs);
43
44/*
45 * Dump new MIPS MT state for the core. Does not leave TCs halted.
46 * Takes an argument which taken to be a pre-call MVPControl value.
47 */
48
49void mips_mt_regdump(unsigned long mvpctl)
50{
51 unsigned long flags;
52 unsigned long vpflags;
53 unsigned long mvpconf0;
54 int nvpe;
55 int ntc;
56 int i;
57 int tc;
58 unsigned long haltval;
59 unsigned long tcstatval;
60
61 local_irq_save(flags);
62 vpflags = dvpe();
63 printk("=== MIPS MT State Dump ===\n");
64 printk("-- Global State --\n");
65 printk(" MVPControl Passed: %08lx\n", mvpctl);
66 printk(" MVPControl Read: %08lx\n", vpflags);
67 printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
68 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
69 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
70 printk("-- per-VPE State --\n");
71 for (i = 0; i < nvpe; i++) {
72 for (tc = 0; tc < ntc; tc++) {
73 settc(tc);
74 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
75 printk(" VPE %d\n", i);
76 printk(" VPEControl : %08lx\n",
77 read_vpe_c0_vpecontrol());
78 printk(" VPEConf0 : %08lx\n",
79 read_vpe_c0_vpeconf0());
80 printk(" VPE%d.Status : %08lx\n",
81 i, read_vpe_c0_status());
82 printk(" VPE%d.EPC : %08lx %pS\n",
83 i, read_vpe_c0_epc(),
84 (void *) read_vpe_c0_epc());
85 printk(" VPE%d.Cause : %08lx\n",
86 i, read_vpe_c0_cause());
87 printk(" VPE%d.Config7 : %08lx\n",
88 i, read_vpe_c0_config7());
89 break; /* Next VPE */
90 }
91 }
92 }
93 printk("-- per-TC State --\n");
94 for (tc = 0; tc < ntc; tc++) {
95 settc(tc);
96 if (read_tc_c0_tcbind() == read_c0_tcbind()) {
97 /* Are we dumping ourself? */
98 haltval = 0; /* Then we're not halted, and mustn't be */
99 tcstatval = flags; /* And pre-dump TCStatus is flags */
100 printk(" TC %d (current TC with VPE EPC above)\n", tc);
101 } else {
102 haltval = read_tc_c0_tchalt();
103 write_tc_c0_tchalt(1);
104 tcstatval = read_tc_c0_tcstatus();
105 printk(" TC %d\n", tc);
106 }
107 printk(" TCStatus : %08lx\n", tcstatval);
108 printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
109 printk(" TCRestart : %08lx %pS\n",
110 read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
111 printk(" TCHalt : %08lx\n", haltval);
112 printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
113 if (!haltval)
114 write_tc_c0_tchalt(0);
115 }
116 printk("===========================\n");
117 evpe(vpflags);
118 local_irq_restore(flags);
119}
120
121static int mt_opt_norps;
122static int mt_opt_rpsctl = -1;
123static int mt_opt_nblsu = -1;
124static int mt_opt_forceconfig7;
125static int mt_opt_config7 = -1;
126
127static int __init rps_disable(char *s)
128{
129 mt_opt_norps = 1;
130 return 1;
131}
132__setup("norps", rps_disable);
133
134static int __init rpsctl_set(char *str)
135{
136 get_option(&str, &mt_opt_rpsctl);
137 return 1;
138}
139__setup("rpsctl=", rpsctl_set);
140
141static int __init nblsu_set(char *str)
142{
143 get_option(&str, &mt_opt_nblsu);
144 return 1;
145}
146__setup("nblsu=", nblsu_set);
147
148static int __init config7_set(char *str)
149{
150 get_option(&str, &mt_opt_config7);
151 mt_opt_forceconfig7 = 1;
152 return 1;
153}
154__setup("config7=", config7_set);
155
156/* Experimental cache flush control parameters that should go away some day */
157int mt_protiflush;
158int mt_protdflush;
159int mt_n_iflushes = 1;
160int mt_n_dflushes = 1;
161
162static int __init set_protiflush(char *s)
163{
164 mt_protiflush = 1;
165 return 1;
166}
167__setup("protiflush", set_protiflush);
168
169static int __init set_protdflush(char *s)
170{
171 mt_protdflush = 1;
172 return 1;
173}
174__setup("protdflush", set_protdflush);
175
176static int __init niflush(char *s)
177{
178 get_option(&s, &mt_n_iflushes);
179 return 1;
180}
181__setup("niflush=", niflush);
182
183static int __init ndflush(char *s)
184{
185 get_option(&s, &mt_n_dflushes);
186 return 1;
187}
188__setup("ndflush=", ndflush);
189
190static unsigned int itc_base;
191
192static int __init set_itc_base(char *str)
193{
194 get_option(&str, &itc_base);
195 return 1;
196}
197
198__setup("itcbase=", set_itc_base);
199
200void mips_mt_set_cpuoptions(void)
201{
202 unsigned int oconfig7 = read_c0_config7();
203 unsigned int nconfig7 = oconfig7;
204
205 if (mt_opt_norps) {
206 printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
207 }
208 if (mt_opt_rpsctl >= 0) {
209 printk("34K return prediction stack override set to %d.\n",
210 mt_opt_rpsctl);
211 if (mt_opt_rpsctl)
212 nconfig7 |= (1 << 2);
213 else
214 nconfig7 &= ~(1 << 2);
215 }
216 if (mt_opt_nblsu >= 0) {
217 printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
218 if (mt_opt_nblsu)
219 nconfig7 |= (1 << 5);
220 else
221 nconfig7 &= ~(1 << 5);
222 }
223 if (mt_opt_forceconfig7) {
224 printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
225 nconfig7 = mt_opt_config7;
226 }
227 if (oconfig7 != nconfig7) {
228 __asm__ __volatile("sync");
229 write_c0_config7(nconfig7);
230 ehb();
231 printk("Config7: 0x%08x\n", read_c0_config7());
232 }
233
234 /* Report Cache management debug options */
235 if (mt_protiflush)
236 printk("I-cache flushes single-threaded\n");
237 if (mt_protdflush)
238 printk("D-cache flushes single-threaded\n");
239 if (mt_n_iflushes != 1)
240 printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
241 if (mt_n_dflushes != 1)
242 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
243
244 if (itc_base != 0) {
245 /*
246 * Configure ITC mapping. This code is very
247 * specific to the 34K core family, which uses
248 * a special mode bit ("ITC") in the ErrCtl
249 * register to enable access to ITC control
250 * registers via cache "tag" operations.
251 */
252 unsigned long ectlval;
253 unsigned long itcblkgrn;
254
255 /* ErrCtl register is known as "ecc" to Linux */
256 ectlval = read_c0_ecc();
257 write_c0_ecc(ectlval | (0x1 << 26));
258 ehb();
259#define INDEX_0 (0x80000000)
260#define INDEX_8 (0x80000008)
261 /* Read "cache tag" for Dcache pseudo-index 8 */
262 cache_op(Index_Load_Tag_D, INDEX_8);
263 ehb();
264 itcblkgrn = read_c0_dtaglo();
265 itcblkgrn &= 0xfffe0000;
266 /* Set for 128 byte pitch of ITC cells */
267 itcblkgrn |= 0x00000c00;
268 /* Stage in Tag register */
269 write_c0_dtaglo(itcblkgrn);
270 ehb();
271 /* Write out to ITU with CACHE op */
272 cache_op(Index_Store_Tag_D, INDEX_8);
273 /* Now set base address, and turn ITC on with 0x1 bit */
274 write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
275 ehb();
276 /* Write out to ITU with CACHE op */
277 cache_op(Index_Store_Tag_D, INDEX_0);
278 write_c0_ecc(ectlval);
279 ehb();
280 printk("Mapped %ld ITC cells starting at 0x%08x\n",
281 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
282 }
283}
284
285/*
286 * Function to protect cache flushes from concurrent execution
287 * depends on MP software model chosen.
288 */
289
290void mt_cflush_lockdown(void)
291{
292 /* FILL IN VSMP and AP/SP VERSIONS HERE */
293}
294
295void mt_cflush_release(void)
296{
297 /* FILL IN VSMP and AP/SP VERSIONS HERE */
298}
299
300struct class *mt_class;
301
302static int __init mt_init(void)
303{
304 struct class *mtc;
305
306 mtc = class_create(THIS_MODULE, "mt");
307 if (IS_ERR(mtc))
308 return PTR_ERR(mtc);
309
310 mt_class = mtc;
311
312 return 0;
313}
314
315subsys_initcall(mt_init);