Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * DT idle states parsing code.
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 */
8
9#define pr_fmt(fmt) "DT idle-states: " fmt
10
11#include <linux/cpuidle.h>
12#include <linux/cpumask.h>
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17
18#include "dt_idle_states.h"
19
20static int init_state_node(struct cpuidle_state *idle_state,
21 const struct of_device_id *match_id,
22 struct device_node *state_node)
23{
24 int err;
25 const char *desc;
26
27 /*
28 * CPUidle drivers are expected to initialize the const void *data
29 * pointer of the passed in struct of_device_id array to the idle
30 * state enter function.
31 */
32 idle_state->enter = match_id->data;
33 /*
34 * Since this is not a "coupled" state, it's safe to assume interrupts
35 * won't be enabled when it exits allowing the tick to be frozen
36 * safely. So enter() can be also enter_s2idle() callback.
37 */
38 idle_state->enter_s2idle = match_id->data;
39
40 err = of_property_read_u32(state_node, "wakeup-latency-us",
41 &idle_state->exit_latency);
42 if (err) {
43 u32 entry_latency, exit_latency;
44
45 err = of_property_read_u32(state_node, "entry-latency-us",
46 &entry_latency);
47 if (err) {
48 pr_debug(" * %pOF missing entry-latency-us property\n",
49 state_node);
50 return -EINVAL;
51 }
52
53 err = of_property_read_u32(state_node, "exit-latency-us",
54 &exit_latency);
55 if (err) {
56 pr_debug(" * %pOF missing exit-latency-us property\n",
57 state_node);
58 return -EINVAL;
59 }
60 /*
61 * If wakeup-latency-us is missing, default to entry+exit
62 * latencies as defined in idle states bindings
63 */
64 idle_state->exit_latency = entry_latency + exit_latency;
65 }
66
67 err = of_property_read_u32(state_node, "min-residency-us",
68 &idle_state->target_residency);
69 if (err) {
70 pr_debug(" * %pOF missing min-residency-us property\n",
71 state_node);
72 return -EINVAL;
73 }
74
75 err = of_property_read_string(state_node, "idle-state-name", &desc);
76 if (err)
77 desc = state_node->name;
78
79 idle_state->flags = CPUIDLE_FLAG_RCU_IDLE;
80 if (of_property_read_bool(state_node, "local-timer-stop"))
81 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
82 /*
83 * TODO:
84 * replace with kstrdup and pointer assignment when name
85 * and desc become string pointers
86 */
87 strscpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN);
88 strscpy(idle_state->desc, desc, CPUIDLE_DESC_LEN);
89 return 0;
90}
91
92/*
93 * Check that the idle state is uniform across all CPUs in the CPUidle driver
94 * cpumask
95 */
96static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
97 const cpumask_t *cpumask)
98{
99 int cpu;
100 struct device_node *cpu_node, *curr_state_node;
101 bool valid = true;
102
103 /*
104 * Compare idle state phandles for index idx on all CPUs in the
105 * CPUidle driver cpumask. Start from next logical cpu following
106 * cpumask_first(cpumask) since that's the CPU state_node was
107 * retrieved from. If a mismatch is found bail out straight
108 * away since we certainly hit a firmware misconfiguration.
109 */
110 for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
111 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
112 cpu_node = of_cpu_device_node_get(cpu);
113 curr_state_node = of_get_cpu_state_node(cpu_node, idx);
114 if (state_node != curr_state_node)
115 valid = false;
116
117 of_node_put(curr_state_node);
118 of_node_put(cpu_node);
119 if (!valid)
120 break;
121 }
122
123 return valid;
124}
125
126/**
127 * dt_init_idle_driver() - Parse the DT idle states and initialize the
128 * idle driver states array
129 * @drv: Pointer to CPU idle driver to be initialized
130 * @matches: Array of of_device_id match structures to search in for
131 * compatible idle state nodes. The data pointer for each valid
132 * struct of_device_id entry in the matches array must point to
133 * a function with the following signature, that corresponds to
134 * the CPUidle state enter function signature:
135 *
136 * int (*)(struct cpuidle_device *dev,
137 * struct cpuidle_driver *drv,
138 * int index);
139 *
140 * @start_idx: First idle state index to be initialized
141 *
142 * If DT idle states are detected and are valid the state count and states
143 * array entries in the cpuidle driver are initialized accordingly starting
144 * from index start_idx.
145 *
146 * Return: number of valid DT idle states parsed, <0 on failure
147 */
148int dt_init_idle_driver(struct cpuidle_driver *drv,
149 const struct of_device_id *matches,
150 unsigned int start_idx)
151{
152 struct cpuidle_state *idle_state;
153 struct device_node *state_node, *cpu_node;
154 const struct of_device_id *match_id;
155 int i, err = 0;
156 const cpumask_t *cpumask;
157 unsigned int state_idx = start_idx;
158
159 if (state_idx >= CPUIDLE_STATE_MAX)
160 return -EINVAL;
161 /*
162 * We get the idle states for the first logical cpu in the
163 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
164 * and we check through idle_state_valid() if they are uniform
165 * across CPUs, otherwise we hit a firmware misconfiguration.
166 */
167 cpumask = drv->cpumask ? : cpu_possible_mask;
168 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
169
170 for (i = 0; ; i++) {
171 state_node = of_get_cpu_state_node(cpu_node, i);
172 if (!state_node)
173 break;
174
175 match_id = of_match_node(matches, state_node);
176 if (!match_id) {
177 err = -ENODEV;
178 break;
179 }
180
181 if (!of_device_is_available(state_node)) {
182 of_node_put(state_node);
183 continue;
184 }
185
186 if (!idle_state_valid(state_node, i, cpumask)) {
187 pr_warn("%pOF idle state not valid, bailing out\n",
188 state_node);
189 err = -EINVAL;
190 break;
191 }
192
193 if (state_idx == CPUIDLE_STATE_MAX) {
194 pr_warn("State index reached static CPU idle driver states array size\n");
195 break;
196 }
197
198 idle_state = &drv->states[state_idx++];
199 err = init_state_node(idle_state, match_id, state_node);
200 if (err) {
201 pr_err("Parsing idle state node %pOF failed with err %d\n",
202 state_node, err);
203 err = -EINVAL;
204 break;
205 }
206 of_node_put(state_node);
207 }
208
209 of_node_put(state_node);
210 of_node_put(cpu_node);
211 if (err)
212 return err;
213
214 /* Set the number of total supported idle states. */
215 drv->state_count = state_idx;
216
217 /*
218 * Return the number of present and valid DT idle states, which can
219 * also be 0 on platforms with missing DT idle states or legacy DT
220 * configuration predating the DT idle states bindings.
221 */
222 return state_idx - start_idx;
223}
224EXPORT_SYMBOL_GPL(dt_init_idle_driver);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * DT idle states parsing code.
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 */
8
9#define pr_fmt(fmt) "DT idle-states: " fmt
10
11#include <linux/cpuidle.h>
12#include <linux/cpumask.h>
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18
19#include "dt_idle_states.h"
20
21static int init_state_node(struct cpuidle_state *idle_state,
22 const struct of_device_id *match_id,
23 struct device_node *state_node)
24{
25 int err;
26 const char *desc;
27
28 /*
29 * CPUidle drivers are expected to initialize the const void *data
30 * pointer of the passed in struct of_device_id array to the idle
31 * state enter function.
32 */
33 idle_state->enter = match_id->data;
34 /*
35 * Since this is not a "coupled" state, it's safe to assume interrupts
36 * won't be enabled when it exits allowing the tick to be frozen
37 * safely. So enter() can be also enter_s2idle() callback.
38 */
39 idle_state->enter_s2idle = match_id->data;
40
41 err = of_property_read_u32(state_node, "wakeup-latency-us",
42 &idle_state->exit_latency);
43 if (err) {
44 u32 entry_latency, exit_latency;
45
46 err = of_property_read_u32(state_node, "entry-latency-us",
47 &entry_latency);
48 if (err) {
49 pr_debug(" * %pOF missing entry-latency-us property\n",
50 state_node);
51 return -EINVAL;
52 }
53
54 err = of_property_read_u32(state_node, "exit-latency-us",
55 &exit_latency);
56 if (err) {
57 pr_debug(" * %pOF missing exit-latency-us property\n",
58 state_node);
59 return -EINVAL;
60 }
61 /*
62 * If wakeup-latency-us is missing, default to entry+exit
63 * latencies as defined in idle states bindings
64 */
65 idle_state->exit_latency = entry_latency + exit_latency;
66 }
67
68 err = of_property_read_u32(state_node, "min-residency-us",
69 &idle_state->target_residency);
70 if (err) {
71 pr_debug(" * %pOF missing min-residency-us property\n",
72 state_node);
73 return -EINVAL;
74 }
75
76 err = of_property_read_string(state_node, "idle-state-name", &desc);
77 if (err)
78 desc = state_node->name;
79
80 idle_state->flags = 0;
81 if (of_property_read_bool(state_node, "local-timer-stop"))
82 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
83 /*
84 * TODO:
85 * replace with kstrdup and pointer assignment when name
86 * and desc become string pointers
87 */
88 strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
89 strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1);
90 return 0;
91}
92
93/*
94 * Check that the idle state is uniform across all CPUs in the CPUidle driver
95 * cpumask
96 */
97static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
98 const cpumask_t *cpumask)
99{
100 int cpu;
101 struct device_node *cpu_node, *curr_state_node;
102 bool valid = true;
103
104 /*
105 * Compare idle state phandles for index idx on all CPUs in the
106 * CPUidle driver cpumask. Start from next logical cpu following
107 * cpumask_first(cpumask) since that's the CPU state_node was
108 * retrieved from. If a mismatch is found bail out straight
109 * away since we certainly hit a firmware misconfiguration.
110 */
111 for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
112 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
113 cpu_node = of_cpu_device_node_get(cpu);
114 curr_state_node = of_get_cpu_state_node(cpu_node, idx);
115 if (state_node != curr_state_node)
116 valid = false;
117
118 of_node_put(curr_state_node);
119 of_node_put(cpu_node);
120 if (!valid)
121 break;
122 }
123
124 return valid;
125}
126
127/**
128 * dt_init_idle_driver() - Parse the DT idle states and initialize the
129 * idle driver states array
130 * @drv: Pointer to CPU idle driver to be initialized
131 * @matches: Array of of_device_id match structures to search in for
132 * compatible idle state nodes. The data pointer for each valid
133 * struct of_device_id entry in the matches array must point to
134 * a function with the following signature, that corresponds to
135 * the CPUidle state enter function signature:
136 *
137 * int (*)(struct cpuidle_device *dev,
138 * struct cpuidle_driver *drv,
139 * int index);
140 *
141 * @start_idx: First idle state index to be initialized
142 *
143 * If DT idle states are detected and are valid the state count and states
144 * array entries in the cpuidle driver are initialized accordingly starting
145 * from index start_idx.
146 *
147 * Return: number of valid DT idle states parsed, <0 on failure
148 */
149int dt_init_idle_driver(struct cpuidle_driver *drv,
150 const struct of_device_id *matches,
151 unsigned int start_idx)
152{
153 struct cpuidle_state *idle_state;
154 struct device_node *state_node, *cpu_node;
155 const struct of_device_id *match_id;
156 int i, err = 0;
157 const cpumask_t *cpumask;
158 unsigned int state_idx = start_idx;
159
160 if (state_idx >= CPUIDLE_STATE_MAX)
161 return -EINVAL;
162 /*
163 * We get the idle states for the first logical cpu in the
164 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
165 * and we check through idle_state_valid() if they are uniform
166 * across CPUs, otherwise we hit a firmware misconfiguration.
167 */
168 cpumask = drv->cpumask ? : cpu_possible_mask;
169 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
170
171 for (i = 0; ; i++) {
172 state_node = of_get_cpu_state_node(cpu_node, i);
173 if (!state_node)
174 break;
175
176 match_id = of_match_node(matches, state_node);
177 if (!match_id) {
178 err = -ENODEV;
179 break;
180 }
181
182 if (!of_device_is_available(state_node)) {
183 of_node_put(state_node);
184 continue;
185 }
186
187 if (!idle_state_valid(state_node, i, cpumask)) {
188 pr_warn("%pOF idle state not valid, bailing out\n",
189 state_node);
190 err = -EINVAL;
191 break;
192 }
193
194 if (state_idx == CPUIDLE_STATE_MAX) {
195 pr_warn("State index reached static CPU idle driver states array size\n");
196 break;
197 }
198
199 idle_state = &drv->states[state_idx++];
200 err = init_state_node(idle_state, match_id, state_node);
201 if (err) {
202 pr_err("Parsing idle state node %pOF failed with err %d\n",
203 state_node, err);
204 err = -EINVAL;
205 break;
206 }
207 of_node_put(state_node);
208 }
209
210 of_node_put(state_node);
211 of_node_put(cpu_node);
212 if (err)
213 return err;
214
215 /* Set the number of total supported idle states. */
216 drv->state_count = state_idx;
217
218 /*
219 * Return the number of present and valid DT idle states, which can
220 * also be 0 on platforms with missing DT idle states or legacy DT
221 * configuration predating the DT idle states bindings.
222 */
223 return state_idx - start_idx;
224}
225EXPORT_SYMBOL_GPL(dt_init_idle_driver);