Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C) 2006 Chris Dearman (chris@mips.com),
  3 */
  4#include <linux/init.h>
  5#include <linux/kernel.h>
  6#include <linux/sched.h>
  7#include <linux/mm.h>
  8
 
  9#include <asm/mipsregs.h>
 10#include <asm/bcache.h>
 11#include <asm/cacheops.h>
 12#include <asm/page.h>
 13#include <asm/pgtable.h>
 14#include <asm/system.h>
 15#include <asm/mmu_context.h>
 16#include <asm/r4kcache.h>
 
 
 17
 18/*
 19 * MIPS32/MIPS64 L2 cache handling
 20 */
 21
 22/*
 23 * Writeback and invalidate the secondary cache before DMA.
 24 */
 25static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
 26{
 27	blast_scache_range(addr, addr + size);
 28}
 29
 30/*
 31 * Invalidate the secondary cache before DMA.
 32 */
 33static void mips_sc_inv(unsigned long addr, unsigned long size)
 34{
 35	unsigned long lsize = cpu_scache_line_size();
 36	unsigned long almask = ~(lsize - 1);
 37
 38	cache_op(Hit_Writeback_Inv_SD, addr & almask);
 39	cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
 40	blast_inv_scache_range(addr, addr + size);
 41}
 42
 43static void mips_sc_enable(void)
 44{
 45	/* L2 cache is permanently enabled */
 46}
 47
 48static void mips_sc_disable(void)
 49{
 50	/* L2 cache is permanently enabled */
 51}
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53static struct bcache_ops mips_sc_ops = {
 54	.bc_enable = mips_sc_enable,
 55	.bc_disable = mips_sc_disable,
 56	.bc_wback_inv = mips_sc_wback_inv,
 57	.bc_inv = mips_sc_inv
 
 
 
 58};
 59
 60/*
 61 * Check if the L2 cache controller is activated on a particular platform.
 62 * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
 63 * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
 64 * cache being disabled.  However there is no guarantee for this to be
 65 * true on all platforms.  In an act of stupidity the spec defined bits
 66 * 12..15 as implementation defined so below function will eventually have
 67 * to be replaced by a platform specific probe.
 68 */
 69static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
 70{
 71	unsigned int config2 = read_c0_config2();
 72	unsigned int tmp;
 73
 74	/* Check the bypass bit (L2B) */
 75	switch (c->cputype) {
 76	case CPU_34K:
 77	case CPU_74K:
 78	case CPU_1004K:
 
 
 
 
 79	case CPU_BMIPS5000:
 
 
 80		if (config2 & (1 << 12))
 81			return 0;
 82	}
 83
 84	tmp = (config2 >> 4) & 0x0f;
 85	if (0 < tmp && tmp <= 7)
 86		c->scache.linesz = 2 << tmp;
 87	else
 88		return 0;
 89	return 1;
 90}
 91
 92static inline int __init mips_sc_probe(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93{
 94	struct cpuinfo_mips *c = &current_cpu_data;
 95	unsigned int config1, config2;
 96	unsigned int tmp;
 97
 98	/* Mark as not present until probe completed */
 99	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
100
 
 
 
101	/* Ignore anything but MIPSxx processors */
102	if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
103	    c->isa_level != MIPS_CPU_ISA_M32R2 &&
104	    c->isa_level != MIPS_CPU_ISA_M64R1 &&
105	    c->isa_level != MIPS_CPU_ISA_M64R2)
106		return 0;
107
108	/* Does this MIPS32/MIPS64 CPU have a config2 register? */
109	config1 = read_c0_config1();
110	if (!(config1 & MIPS_CONF_M))
111		return 0;
112
113	config2 = read_c0_config2();
114
115	if (!mips_sc_is_activated(c))
116		return 0;
117
118	tmp = (config2 >> 8) & 0x0f;
119	if (0 <= tmp && tmp <= 7)
120		c->scache.sets = 64 << tmp;
121	else
122		return 0;
123
124	tmp = (config2 >> 0) & 0x0f;
125	if (0 <= tmp && tmp <= 7)
126		c->scache.ways = tmp + 1;
127	else
128		return 0;
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130	c->scache.waysize = c->scache.sets * c->scache.linesz;
131	c->scache.waybit = __ffs(c->scache.waysize);
132
133	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
134
135	return 1;
136}
137
138int __cpuinit mips_sc_init(void)
139{
140	int found = mips_sc_probe();
141	if (found) {
142		mips_sc_enable();
 
143		bcops = &mips_sc_ops;
144	}
145	return found;
146}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2006 Chris Dearman (chris@mips.com),
  4 */
  5#include <linux/init.h>
  6#include <linux/kernel.h>
  7#include <linux/sched.h>
  8#include <linux/mm.h>
  9
 10#include <asm/cpu-type.h>
 11#include <asm/mipsregs.h>
 12#include <asm/bcache.h>
 13#include <asm/cacheops.h>
 14#include <asm/page.h>
 
 
 15#include <asm/mmu_context.h>
 16#include <asm/r4kcache.h>
 17#include <asm/mips-cps.h>
 18#include <asm/bootinfo.h>
 19
 20/*
 21 * MIPS32/MIPS64 L2 cache handling
 22 */
 23
 24/*
 25 * Writeback and invalidate the secondary cache before DMA.
 26 */
 27static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
 28{
 29	blast_scache_range(addr, addr + size);
 30}
 31
 32/*
 33 * Invalidate the secondary cache before DMA.
 34 */
 35static void mips_sc_inv(unsigned long addr, unsigned long size)
 36{
 37	unsigned long lsize = cpu_scache_line_size();
 38	unsigned long almask = ~(lsize - 1);
 39
 40	cache_op(Hit_Writeback_Inv_SD, addr & almask);
 41	cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
 42	blast_inv_scache_range(addr, addr + size);
 43}
 44
 45static void mips_sc_enable(void)
 46{
 47	/* L2 cache is permanently enabled */
 48}
 49
 50static void mips_sc_disable(void)
 51{
 52	/* L2 cache is permanently enabled */
 53}
 54
 55static void mips_sc_prefetch_enable(void)
 56{
 57	unsigned long pftctl;
 58
 59	if (mips_cm_revision() < CM_REV_CM2_5)
 60		return;
 61
 62	/*
 63	 * If there is one or more L2 prefetch unit present then enable
 64	 * prefetching for both code & data, for all ports.
 65	 */
 66	pftctl = read_gcr_l2_pft_control();
 67	if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT) {
 68		pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK;
 69		pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK;
 70		pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN;
 71		write_gcr_l2_pft_control(pftctl);
 72
 73		set_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
 74					 CM_GCR_L2_PFT_CONTROL_B_CEN);
 75	}
 76}
 77
 78static void mips_sc_prefetch_disable(void)
 79{
 80	if (mips_cm_revision() < CM_REV_CM2_5)
 81		return;
 82
 83	clear_gcr_l2_pft_control(CM_GCR_L2_PFT_CONTROL_PFTEN);
 84	clear_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
 85				   CM_GCR_L2_PFT_CONTROL_B_CEN);
 86}
 87
 88static bool mips_sc_prefetch_is_enabled(void)
 89{
 90	unsigned long pftctl;
 91
 92	if (mips_cm_revision() < CM_REV_CM2_5)
 93		return false;
 94
 95	pftctl = read_gcr_l2_pft_control();
 96	if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT))
 97		return false;
 98	return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN);
 99}
100
101static struct bcache_ops mips_sc_ops = {
102	.bc_enable = mips_sc_enable,
103	.bc_disable = mips_sc_disable,
104	.bc_wback_inv = mips_sc_wback_inv,
105	.bc_inv = mips_sc_inv,
106	.bc_prefetch_enable = mips_sc_prefetch_enable,
107	.bc_prefetch_disable = mips_sc_prefetch_disable,
108	.bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
109};
110
111/*
112 * Check if the L2 cache controller is activated on a particular platform.
113 * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
114 * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
115 * cache being disabled.  However there is no guarantee for this to be
116 * true on all platforms.  In an act of stupidity the spec defined bits
117 * 12..15 as implementation defined so below function will eventually have
118 * to be replaced by a platform specific probe.
119 */
120static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
121{
122	unsigned int config2 = read_c0_config2();
123	unsigned int tmp;
124
125	/* Check the bypass bit (L2B) */
126	switch (current_cpu_type()) {
127	case CPU_34K:
128	case CPU_74K:
129	case CPU_1004K:
130	case CPU_1074K:
131	case CPU_INTERAPTIV:
132	case CPU_PROAPTIV:
133	case CPU_P5600:
134	case CPU_BMIPS5000:
135	case CPU_QEMU_GENERIC:
136	case CPU_P6600:
137		if (config2 & (1 << 12))
138			return 0;
139	}
140
141	tmp = (config2 >> 4) & 0x0f;
142	if (0 < tmp && tmp <= 7)
143		c->scache.linesz = 2 << tmp;
144	else
145		return 0;
146	return 1;
147}
148
149static int mips_sc_probe_cm3(void)
150{
151	struct cpuinfo_mips *c = &current_cpu_data;
152	unsigned long cfg = read_gcr_l2_config();
153	unsigned long sets, line_sz, assoc;
154
155	if (cfg & CM_GCR_L2_CONFIG_BYPASS)
156		return 0;
157
158	sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE;
159	sets >>= __ffs(CM_GCR_L2_CONFIG_SET_SIZE);
160	if (sets)
161		c->scache.sets = 64 << sets;
162
163	line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE;
164	line_sz >>= __ffs(CM_GCR_L2_CONFIG_LINE_SIZE);
165	if (line_sz)
166		c->scache.linesz = 2 << line_sz;
167
168	assoc = cfg & CM_GCR_L2_CONFIG_ASSOC;
169	assoc >>= __ffs(CM_GCR_L2_CONFIG_ASSOC);
170	c->scache.ways = assoc + 1;
171	c->scache.waysize = c->scache.sets * c->scache.linesz;
172	c->scache.waybit = __ffs(c->scache.waysize);
173
174	if (c->scache.linesz) {
175		c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
176		c->options |= MIPS_CPU_INCLUSIVE_CACHES;
177		return 1;
178	}
179
180	return 0;
181}
182
183static inline int mips_sc_probe(void)
184{
185	struct cpuinfo_mips *c = &current_cpu_data;
186	unsigned int config1, config2;
187	unsigned int tmp;
188
189	/* Mark as not present until probe completed */
190	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
191
192	if (mips_cm_revision() >= CM_REV_CM3)
193		return mips_sc_probe_cm3();
194
195	/* Ignore anything but MIPSxx processors */
196	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
197			      MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
198			      MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
199			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
200		return 0;
201
202	/* Does this MIPS32/MIPS64 CPU have a config2 register? */
203	config1 = read_c0_config1();
204	if (!(config1 & MIPS_CONF_M))
205		return 0;
206
207	config2 = read_c0_config2();
208
209	if (!mips_sc_is_activated(c))
210		return 0;
211
212	tmp = (config2 >> 8) & 0x0f;
213	if (tmp <= 7)
214		c->scache.sets = 64 << tmp;
215	else
216		return 0;
217
218	tmp = (config2 >> 0) & 0x0f;
219	if (tmp <= 7)
220		c->scache.ways = tmp + 1;
221	else
222		return 0;
223
224	if (current_cpu_type() == CPU_XBURST) {
225		switch (mips_machtype) {
226		/*
227		 * According to config2 it would be 5-ways, but that is
228		 * contradicted by all documentation.
229		 */
230		case MACH_INGENIC_JZ4770:
231		case MACH_INGENIC_JZ4775:
232			c->scache.ways = 4;
233			break;
234
235		/*
236		 * According to config2 it would be 5-ways and 512-sets,
237		 * but that is contradicted by all documentation.
238		 */
239		case MACH_INGENIC_X1000:
240		case MACH_INGENIC_X1000E:
241			c->scache.sets = 256;
242			c->scache.ways = 4;
243			break;
244		}
245	}
246
247	c->scache.waysize = c->scache.sets * c->scache.linesz;
248	c->scache.waybit = __ffs(c->scache.waysize);
249
250	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
251
252	return 1;
253}
254
255int mips_sc_init(void)
256{
257	int found = mips_sc_probe();
258	if (found) {
259		mips_sc_enable();
260		mips_sc_prefetch_enable();
261		bcops = &mips_sc_ops;
262	}
263	return found;
264}