Loading...
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/errno.h>
12#include <linux/percpu.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/spinlock.h>
16
17#include <asm/mips-cps.h>
18
19void __iomem *mips_cpc_base;
20
21static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
22
23static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
24
25phys_addr_t __weak mips_cpc_default_phys_base(void)
26{
27 struct device_node *cpc_node;
28 struct resource res;
29 int err;
30
31 cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
32 if (cpc_node) {
33 err = of_address_to_resource(cpc_node, 0, &res);
34 if (!err)
35 return res.start;
36 }
37
38 return 0;
39}
40
41/**
42 * mips_cpc_phys_base - retrieve the physical base address of the CPC
43 *
44 * This function returns the physical base address of the Cluster Power
45 * Controller memory mapped registers, or 0 if no Cluster Power Controller
46 * is present.
47 */
48static phys_addr_t mips_cpc_phys_base(void)
49{
50 unsigned long cpc_base;
51
52 if (!mips_cm_present())
53 return 0;
54
55 if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
56 return 0;
57
58 /* If the CPC is already enabled, leave it so */
59 cpc_base = read_gcr_cpc_base();
60 if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
61 return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
62
63 /* Otherwise, use the default address */
64 cpc_base = mips_cpc_default_phys_base();
65 if (!cpc_base)
66 return cpc_base;
67
68 /* Enable the CPC, mapped at the default address */
69 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
70 return cpc_base;
71}
72
73int mips_cpc_probe(void)
74{
75 phys_addr_t addr;
76 unsigned int cpu;
77
78 for_each_possible_cpu(cpu)
79 spin_lock_init(&per_cpu(cpc_core_lock, cpu));
80
81 addr = mips_cpc_phys_base();
82 if (!addr)
83 return -ENODEV;
84
85 mips_cpc_base = ioremap_nocache(addr, 0x8000);
86 if (!mips_cpc_base)
87 return -ENXIO;
88
89 return 0;
90}
91
92void mips_cpc_lock_other(unsigned int core)
93{
94 unsigned int curr_core;
95
96 if (mips_cm_revision() >= CM_REV_CM3)
97 /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
98 return;
99
100 preempt_disable();
101 curr_core = cpu_core(¤t_cpu_data);
102 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
103 per_cpu(cpc_core_lock_flags, curr_core));
104 write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
105
106 /*
107 * Ensure the core-other region reflects the appropriate core &
108 * VP before any accesses to it occur.
109 */
110 mb();
111}
112
113void mips_cpc_unlock_other(void)
114{
115 unsigned int curr_core;
116
117 if (mips_cm_revision() >= CM_REV_CM3)
118 /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
119 return;
120
121 curr_core = cpu_core(¤t_cpu_data);
122 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
123 per_cpu(cpc_core_lock_flags, curr_core));
124 preempt_enable();
125}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <linux/errno.h>
8#include <linux/percpu.h>
9#include <linux/of.h>
10#include <linux/of_address.h>
11#include <linux/spinlock.h>
12
13#include <asm/mips-cps.h>
14
15void __iomem *mips_cpc_base;
16
17static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
18
19static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
20
21phys_addr_t __weak mips_cpc_default_phys_base(void)
22{
23 struct device_node *cpc_node;
24 struct resource res;
25 int err;
26
27 cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
28 if (cpc_node) {
29 err = of_address_to_resource(cpc_node, 0, &res);
30 if (!err)
31 return res.start;
32 }
33
34 return 0;
35}
36
37/**
38 * mips_cpc_phys_base - retrieve the physical base address of the CPC
39 *
40 * This function returns the physical base address of the Cluster Power
41 * Controller memory mapped registers, or 0 if no Cluster Power Controller
42 * is present.
43 */
44static phys_addr_t mips_cpc_phys_base(void)
45{
46 unsigned long cpc_base;
47
48 if (!mips_cm_present())
49 return 0;
50
51 if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
52 return 0;
53
54 /* If the CPC is already enabled, leave it so */
55 cpc_base = read_gcr_cpc_base();
56 if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
57 return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
58
59 /* Otherwise, use the default address */
60 cpc_base = mips_cpc_default_phys_base();
61 if (!cpc_base)
62 return cpc_base;
63
64 /* Enable the CPC, mapped at the default address */
65 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
66 return cpc_base;
67}
68
69int mips_cpc_probe(void)
70{
71 phys_addr_t addr;
72 unsigned int cpu;
73
74 for_each_possible_cpu(cpu)
75 spin_lock_init(&per_cpu(cpc_core_lock, cpu));
76
77 addr = mips_cpc_phys_base();
78 if (!addr)
79 return -ENODEV;
80
81 mips_cpc_base = ioremap(addr, 0x8000);
82 if (!mips_cpc_base)
83 return -ENXIO;
84
85 return 0;
86}
87
88void mips_cpc_lock_other(unsigned int core)
89{
90 unsigned int curr_core;
91
92 if (mips_cm_revision() >= CM_REV_CM3)
93 /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
94 return;
95
96 preempt_disable();
97 curr_core = cpu_core(¤t_cpu_data);
98 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
99 per_cpu(cpc_core_lock_flags, curr_core));
100 write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
101
102 /*
103 * Ensure the core-other region reflects the appropriate core &
104 * VP before any accesses to it occur.
105 */
106 mb();
107}
108
109void mips_cpc_unlock_other(void)
110{
111 unsigned int curr_core;
112
113 if (mips_cm_revision() >= CM_REV_CM3)
114 /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
115 return;
116
117 curr_core = cpu_core(¤t_cpu_data);
118 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
119 per_cpu(cpc_core_lock_flags, curr_core));
120 preempt_enable();
121}