Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2006 Chris Dearman (chris@mips.com),
  4 */
  5#include <linux/init.h>
  6#include <linux/kernel.h>
  7#include <linux/sched.h>
  8#include <linux/mm.h>
  9
 10#include <asm/cpu-type.h>
 11#include <asm/mipsregs.h>
 12#include <asm/bcache.h>
 13#include <asm/cacheops.h>
 14#include <asm/page.h>
 15#include <asm/pgtable.h>
 16#include <asm/mmu_context.h>
 17#include <asm/r4kcache.h>
 18#include <asm/mips-cps.h>
 19#include <asm/bootinfo.h>
 20
 21/*
 22 * MIPS32/MIPS64 L2 cache handling
 23 */
 24
 25/*
 26 * Writeback and invalidate the secondary cache before DMA.
 27 */
 28static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
 29{
 30	blast_scache_range(addr, addr + size);
 31}
 32
 33/*
 34 * Invalidate the secondary cache before DMA.
 35 */
 36static void mips_sc_inv(unsigned long addr, unsigned long size)
 37{
 38	unsigned long lsize = cpu_scache_line_size();
 39	unsigned long almask = ~(lsize - 1);
 40
 41	cache_op(Hit_Writeback_Inv_SD, addr & almask);
 42	cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
 43	blast_inv_scache_range(addr, addr + size);
 44}
 45
 46static void mips_sc_enable(void)
 47{
 48	/* L2 cache is permanently enabled */
 49}
 50
 51static void mips_sc_disable(void)
 52{
 53	/* L2 cache is permanently enabled */
 54}
 55
 56static void mips_sc_prefetch_enable(void)
 57{
 58	unsigned long pftctl;
 59
 60	if (mips_cm_revision() < CM_REV_CM2_5)
 61		return;
 62
 63	/*
 64	 * If there is one or more L2 prefetch unit present then enable
 65	 * prefetching for both code & data, for all ports.
 66	 */
 67	pftctl = read_gcr_l2_pft_control();
 68	if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT) {
 69		pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK;
 70		pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK;
 71		pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN;
 72		write_gcr_l2_pft_control(pftctl);
 73
 74		set_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
 75					 CM_GCR_L2_PFT_CONTROL_B_CEN);
 
 
 76	}
 77}
 78
 79static void mips_sc_prefetch_disable(void)
 80{
 
 
 81	if (mips_cm_revision() < CM_REV_CM2_5)
 82		return;
 83
 84	clear_gcr_l2_pft_control(CM_GCR_L2_PFT_CONTROL_PFTEN);
 85	clear_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
 86				   CM_GCR_L2_PFT_CONTROL_B_CEN);
 
 
 
 
 
 87}
 88
 89static bool mips_sc_prefetch_is_enabled(void)
 90{
 91	unsigned long pftctl;
 92
 93	if (mips_cm_revision() < CM_REV_CM2_5)
 94		return false;
 95
 96	pftctl = read_gcr_l2_pft_control();
 97	if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT))
 98		return false;
 99	return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN);
100}
101
102static struct bcache_ops mips_sc_ops = {
103	.bc_enable = mips_sc_enable,
104	.bc_disable = mips_sc_disable,
105	.bc_wback_inv = mips_sc_wback_inv,
106	.bc_inv = mips_sc_inv,
107	.bc_prefetch_enable = mips_sc_prefetch_enable,
108	.bc_prefetch_disable = mips_sc_prefetch_disable,
109	.bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
110};
111
112/*
113 * Check if the L2 cache controller is activated on a particular platform.
114 * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
115 * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
116 * cache being disabled.  However there is no guarantee for this to be
117 * true on all platforms.  In an act of stupidity the spec defined bits
118 * 12..15 as implementation defined so below function will eventually have
119 * to be replaced by a platform specific probe.
120 */
121static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
122{
123	unsigned int config2 = read_c0_config2();
124	unsigned int tmp;
125
126	/* Check the bypass bit (L2B) */
127	switch (current_cpu_type()) {
128	case CPU_34K:
129	case CPU_74K:
130	case CPU_1004K:
131	case CPU_1074K:
132	case CPU_INTERAPTIV:
133	case CPU_PROAPTIV:
134	case CPU_P5600:
135	case CPU_BMIPS5000:
136	case CPU_QEMU_GENERIC:
137	case CPU_P6600:
138		if (config2 & (1 << 12))
139			return 0;
140	}
141
142	tmp = (config2 >> 4) & 0x0f;
143	if (0 < tmp && tmp <= 7)
144		c->scache.linesz = 2 << tmp;
145	else
146		return 0;
147	return 1;
148}
149
150static int __init mips_sc_probe_cm3(void)
151{
152	struct cpuinfo_mips *c = &current_cpu_data;
153	unsigned long cfg = read_gcr_l2_config();
154	unsigned long sets, line_sz, assoc;
155
156	if (cfg & CM_GCR_L2_CONFIG_BYPASS)
157		return 0;
158
159	sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE;
160	sets >>= __ffs(CM_GCR_L2_CONFIG_SET_SIZE);
161	if (sets)
162		c->scache.sets = 64 << sets;
163
164	line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE;
165	line_sz >>= __ffs(CM_GCR_L2_CONFIG_LINE_SIZE);
166	if (line_sz)
167		c->scache.linesz = 2 << line_sz;
168
169	assoc = cfg & CM_GCR_L2_CONFIG_ASSOC;
170	assoc >>= __ffs(CM_GCR_L2_CONFIG_ASSOC);
171	c->scache.ways = assoc + 1;
172	c->scache.waysize = c->scache.sets * c->scache.linesz;
173	c->scache.waybit = __ffs(c->scache.waysize);
174
175	if (c->scache.linesz) {
176		c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
177		c->options |= MIPS_CPU_INCLUSIVE_CACHES;
178		return 1;
179	}
180
181	return 0;
182}
183
184static inline int __init mips_sc_probe(void)
185{
186	struct cpuinfo_mips *c = &current_cpu_data;
187	unsigned int config1, config2;
188	unsigned int tmp;
189
190	/* Mark as not present until probe completed */
191	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
192
193	if (mips_cm_revision() >= CM_REV_CM3)
194		return mips_sc_probe_cm3();
195
196	/* Ignore anything but MIPSxx processors */
197	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
198			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
199			      MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
200		return 0;
201
202	/* Does this MIPS32/MIPS64 CPU have a config2 register? */
203	config1 = read_c0_config1();
204	if (!(config1 & MIPS_CONF_M))
205		return 0;
206
207	config2 = read_c0_config2();
208
209	if (!mips_sc_is_activated(c))
210		return 0;
211
212	tmp = (config2 >> 8) & 0x0f;
213	if (tmp <= 7)
214		c->scache.sets = 64 << tmp;
215	else
216		return 0;
217
218	tmp = (config2 >> 0) & 0x0f;
219	if (tmp <= 7)
220		c->scache.ways = tmp + 1;
221	else
222		return 0;
223
224	/*
225	 * According to config2 it would be 5-ways, but that is contradicted
226	 * by all documentation.
227	 */
228	if (current_cpu_type() == CPU_JZRISC &&
229				mips_machtype == MACH_INGENIC_JZ4770)
230		c->scache.ways = 4;
231
232	c->scache.waysize = c->scache.sets * c->scache.linesz;
233	c->scache.waybit = __ffs(c->scache.waysize);
234
235	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
236
237	return 1;
238}
239
240int mips_sc_init(void)
241{
242	int found = mips_sc_probe();
243	if (found) {
244		mips_sc_enable();
245		mips_sc_prefetch_enable();
246		bcops = &mips_sc_ops;
247	}
248	return found;
249}
v4.6
 
  1/*
  2 * Copyright (C) 2006 Chris Dearman (chris@mips.com),
  3 */
  4#include <linux/init.h>
  5#include <linux/kernel.h>
  6#include <linux/sched.h>
  7#include <linux/mm.h>
  8
  9#include <asm/cpu-type.h>
 10#include <asm/mipsregs.h>
 11#include <asm/bcache.h>
 12#include <asm/cacheops.h>
 13#include <asm/page.h>
 14#include <asm/pgtable.h>
 15#include <asm/mmu_context.h>
 16#include <asm/r4kcache.h>
 17#include <asm/mips-cm.h>
 
 18
 19/*
 20 * MIPS32/MIPS64 L2 cache handling
 21 */
 22
 23/*
 24 * Writeback and invalidate the secondary cache before DMA.
 25 */
 26static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
 27{
 28	blast_scache_range(addr, addr + size);
 29}
 30
 31/*
 32 * Invalidate the secondary cache before DMA.
 33 */
 34static void mips_sc_inv(unsigned long addr, unsigned long size)
 35{
 36	unsigned long lsize = cpu_scache_line_size();
 37	unsigned long almask = ~(lsize - 1);
 38
 39	cache_op(Hit_Writeback_Inv_SD, addr & almask);
 40	cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
 41	blast_inv_scache_range(addr, addr + size);
 42}
 43
 44static void mips_sc_enable(void)
 45{
 46	/* L2 cache is permanently enabled */
 47}
 48
 49static void mips_sc_disable(void)
 50{
 51	/* L2 cache is permanently enabled */
 52}
 53
 54static void mips_sc_prefetch_enable(void)
 55{
 56	unsigned long pftctl;
 57
 58	if (mips_cm_revision() < CM_REV_CM2_5)
 59		return;
 60
 61	/*
 62	 * If there is one or more L2 prefetch unit present then enable
 63	 * prefetching for both code & data, for all ports.
 64	 */
 65	pftctl = read_gcr_l2_pft_control();
 66	if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) {
 67		pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK;
 68		pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK;
 69		pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK;
 70		write_gcr_l2_pft_control(pftctl);
 71
 72		pftctl = read_gcr_l2_pft_control_b();
 73		pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK;
 74		pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK;
 75		write_gcr_l2_pft_control_b(pftctl);
 76	}
 77}
 78
 79static void mips_sc_prefetch_disable(void)
 80{
 81	unsigned long pftctl;
 82
 83	if (mips_cm_revision() < CM_REV_CM2_5)
 84		return;
 85
 86	pftctl = read_gcr_l2_pft_control();
 87	pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK;
 88	write_gcr_l2_pft_control(pftctl);
 89
 90	pftctl = read_gcr_l2_pft_control_b();
 91	pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK;
 92	pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK;
 93	write_gcr_l2_pft_control_b(pftctl);
 94}
 95
 96static bool mips_sc_prefetch_is_enabled(void)
 97{
 98	unsigned long pftctl;
 99
100	if (mips_cm_revision() < CM_REV_CM2_5)
101		return false;
102
103	pftctl = read_gcr_l2_pft_control();
104	if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK))
105		return false;
106	return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK);
107}
108
109static struct bcache_ops mips_sc_ops = {
110	.bc_enable = mips_sc_enable,
111	.bc_disable = mips_sc_disable,
112	.bc_wback_inv = mips_sc_wback_inv,
113	.bc_inv = mips_sc_inv,
114	.bc_prefetch_enable = mips_sc_prefetch_enable,
115	.bc_prefetch_disable = mips_sc_prefetch_disable,
116	.bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
117};
118
119/*
120 * Check if the L2 cache controller is activated on a particular platform.
121 * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
122 * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
123 * cache being disabled.  However there is no guarantee for this to be
124 * true on all platforms.  In an act of stupidity the spec defined bits
125 * 12..15 as implementation defined so below function will eventually have
126 * to be replaced by a platform specific probe.
127 */
128static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
129{
130	unsigned int config2 = read_c0_config2();
131	unsigned int tmp;
132
133	/* Check the bypass bit (L2B) */
134	switch (current_cpu_type()) {
135	case CPU_34K:
136	case CPU_74K:
137	case CPU_1004K:
138	case CPU_1074K:
139	case CPU_INTERAPTIV:
140	case CPU_PROAPTIV:
141	case CPU_P5600:
142	case CPU_BMIPS5000:
143	case CPU_QEMU_GENERIC:
 
144		if (config2 & (1 << 12))
145			return 0;
146	}
147
148	tmp = (config2 >> 4) & 0x0f;
149	if (0 < tmp && tmp <= 7)
150		c->scache.linesz = 2 << tmp;
151	else
152		return 0;
153	return 1;
154}
155
156static int __init mips_sc_probe_cm3(void)
157{
158	struct cpuinfo_mips *c = &current_cpu_data;
159	unsigned long cfg = read_gcr_l2_config();
160	unsigned long sets, line_sz, assoc;
161
162	if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK)
163		return 0;
164
165	sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
166	sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
167	if (sets)
168		c->scache.sets = 64 << sets;
169
170	line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
171	line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
172	if (line_sz)
173		c->scache.linesz = 2 << line_sz;
174
175	assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
176	assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
177	c->scache.ways = assoc + 1;
178	c->scache.waysize = c->scache.sets * c->scache.linesz;
179	c->scache.waybit = __ffs(c->scache.waysize);
180
181	if (c->scache.linesz) {
182		c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
 
183		return 1;
184	}
185
186	return 0;
187}
188
189static inline int __init mips_sc_probe(void)
190{
191	struct cpuinfo_mips *c = &current_cpu_data;
192	unsigned int config1, config2;
193	unsigned int tmp;
194
195	/* Mark as not present until probe completed */
196	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
197
198	if (mips_cm_revision() >= CM_REV_CM3)
199		return mips_sc_probe_cm3();
200
201	/* Ignore anything but MIPSxx processors */
202	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
203			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
204			      MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
205		return 0;
206
207	/* Does this MIPS32/MIPS64 CPU have a config2 register? */
208	config1 = read_c0_config1();
209	if (!(config1 & MIPS_CONF_M))
210		return 0;
211
212	config2 = read_c0_config2();
213
214	if (!mips_sc_is_activated(c))
215		return 0;
216
217	tmp = (config2 >> 8) & 0x0f;
218	if (tmp <= 7)
219		c->scache.sets = 64 << tmp;
220	else
221		return 0;
222
223	tmp = (config2 >> 0) & 0x0f;
224	if (tmp <= 7)
225		c->scache.ways = tmp + 1;
226	else
227		return 0;
 
 
 
 
 
 
 
 
228
229	c->scache.waysize = c->scache.sets * c->scache.linesz;
230	c->scache.waybit = __ffs(c->scache.waysize);
231
232	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
233
234	return 1;
235}
236
237int mips_sc_init(void)
238{
239	int found = mips_sc_probe();
240	if (found) {
241		mips_sc_enable();
242		mips_sc_prefetch_enable();
243		bcops = &mips_sc_ops;
244	}
245	return found;
246}