Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * AM33XX Arch Power Management Routines
4 *
5 * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/
6 * Dave Gerlach
7 */
8
9#include <linux/cpuidle.h>
10#include <linux/platform_data/pm33xx.h>
11#include <linux/suspend.h>
12#include <asm/cpuidle.h>
13#include <asm/smp_scu.h>
14#include <asm/suspend.h>
15#include <linux/errno.h>
16#include <linux/clk.h>
17#include <linux/cpu.h>
18#include <linux/platform_data/gpio-omap.h>
19#include <linux/wkup_m3_ipc.h>
20#include <linux/of.h>
21#include <linux/rtc.h>
22
23#include "cm33xx.h"
24#include "common.h"
25#include "control.h"
26#include "clockdomain.h"
27#include "iomap.h"
28#include "pm.h"
29#include "powerdomain.h"
30#include "prm33xx.h"
31#include "soc.h"
32#include "sram.h"
33#include "omap-secure.h"
34
35static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
36static struct clockdomain *gfx_l4ls_clkdm;
37static void __iomem *scu_base;
38
39static int (*idle_fn)(u32 wfi_flags);
40
41struct amx3_idle_state {
42 int wfi_flags;
43};
44
45static struct amx3_idle_state *idle_states;
46
47static int am43xx_map_scu(void)
48{
49 scu_base = ioremap(scu_a9_get_base(), SZ_256);
50
51 if (!scu_base)
52 return -ENOMEM;
53
54 return 0;
55}
56
57static int am33xx_check_off_mode_enable(void)
58{
59 if (enable_off_mode)
60 pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
61
62 /* off mode not supported on am335x so return 0 always */
63 return 0;
64}
65
66static int am43xx_check_off_mode_enable(void)
67{
68 /*
69 * Check for am437x-gp-evm which has the right Hardware design to
70 * support this mode reliably.
71 */
72 if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode)
73 return enable_off_mode;
74 else if (enable_off_mode)
75 pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
76
77 return 0;
78}
79
80static int amx3_common_init(int (*idle)(u32 wfi_flags))
81{
82 gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
83 per_pwrdm = pwrdm_lookup("per_pwrdm");
84 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
85
86 if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm))
87 return -ENODEV;
88
89 (void)clkdm_for_each(omap_pm_clkdms_setup, NULL);
90
91 /* CEFUSE domain can be turned off post bootup */
92 cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
93 if (!cefuse_pwrdm)
94 pr_err("PM: Failed to get cefuse_pwrdm\n");
95 else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
96 pr_info("PM: Leaving EFUSE power domain active\n");
97 else
98 omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
99
100 idle_fn = idle;
101
102 return 0;
103}
104
105static int am33xx_suspend_init(int (*idle)(u32 wfi_flags))
106{
107 gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm");
108
109 if (!gfx_l4ls_clkdm) {
110 pr_err("PM: Cannot lookup gfx_l4ls_clkdm clockdomains\n");
111 return -ENODEV;
112 }
113
114 return amx3_common_init(idle);
115}
116
117static int am43xx_suspend_init(int (*idle)(u32 wfi_flags))
118{
119 int ret = 0;
120
121 ret = am43xx_map_scu();
122 if (ret) {
123 pr_err("PM: Could not ioremap SCU\n");
124 return ret;
125 }
126
127 ret = amx3_common_init(idle);
128
129 return ret;
130}
131
132static int amx3_suspend_deinit(void)
133{
134 idle_fn = NULL;
135 return 0;
136}
137
138static void amx3_pre_suspend_common(void)
139{
140 omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF);
141}
142
143static void amx3_post_suspend_common(void)
144{
145 int status;
146 /*
147 * Because gfx_pwrdm is the only one under MPU control,
148 * comment on transition status
149 */
150 status = pwrdm_read_pwrst(gfx_pwrdm);
151 if (status != PWRDM_POWER_OFF)
152 pr_err("PM: GFX domain did not transition: %x\n", status);
153}
154
155static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long),
156 unsigned long args)
157{
158 int ret = 0;
159
160 amx3_pre_suspend_common();
161 ret = cpu_suspend(args, fn);
162 amx3_post_suspend_common();
163
164 /*
165 * BUG: GFX_L4LS clock domain needs to be woken up to
166 * ensure thet L4LS clock domain does not get stuck in
167 * transition. If that happens L3 module does not get
168 * disabled, thereby leading to PER power domain
169 * transition failing
170 */
171
172 clkdm_wakeup(gfx_l4ls_clkdm);
173 clkdm_sleep(gfx_l4ls_clkdm);
174
175 return ret;
176}
177
178static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
179 unsigned long args)
180{
181 int ret = 0;
182
183 /* Suspend secure side on HS devices */
184 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
185 if (optee_available)
186 omap_smccc_smc(AM43xx_PPA_SVC_PM_SUSPEND, 0);
187 else
188 omap_secure_dispatcher(AM43xx_PPA_SVC_PM_SUSPEND,
189 FLAG_START_CRITICAL,
190 0, 0, 0, 0, 0);
191 }
192
193 amx3_pre_suspend_common();
194 scu_power_mode(scu_base, SCU_PM_POWEROFF);
195 ret = cpu_suspend(args, fn);
196 scu_power_mode(scu_base, SCU_PM_NORMAL);
197
198 if (!am43xx_check_off_mode_enable())
199 amx3_post_suspend_common();
200
201 /*
202 * Resume secure side on HS devices.
203 *
204 * Note that even on systems with OP-TEE available this resume call is
205 * issued to the ROM. This is because upon waking from suspend the ROM
206 * is restored as the secure monitor. On systems with OP-TEE ROM will
207 * restore OP-TEE during this call.
208 */
209 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
210 omap_secure_dispatcher(AM43xx_PPA_SVC_PM_RESUME,
211 FLAG_START_CRITICAL,
212 0, 0, 0, 0, 0);
213
214 return ret;
215}
216
217static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
218{
219 int ret = 0;
220
221 if (omap_irq_pending() || need_resched())
222 return ret;
223
224 ret = cpu_suspend(args, fn);
225
226 return ret;
227}
228
229static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
230{
231 int ret = 0;
232
233 if (!scu_base)
234 return 0;
235
236 scu_power_mode(scu_base, SCU_PM_DORMANT);
237 ret = cpu_suspend(args, fn);
238 scu_power_mode(scu_base, SCU_PM_NORMAL);
239
240 return ret;
241}
242
243static void amx3_begin_suspend(void)
244{
245 cpu_idle_poll_ctrl(true);
246}
247
248static void amx3_finish_suspend(void)
249{
250 cpu_idle_poll_ctrl(false);
251}
252
253
254static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
255{
256 if (soc_is_am33xx())
257 return &am33xx_pm_sram;
258 else if (soc_is_am437x())
259 return &am43xx_pm_sram;
260 else
261 return NULL;
262}
263
264static void am43xx_save_context(void)
265{
266}
267
268static void am33xx_save_context(void)
269{
270 omap_intc_save_context();
271}
272
273static void am33xx_restore_context(void)
274{
275 omap_intc_restore_context();
276}
277
278static void am43xx_restore_context(void)
279{
280 /*
281 * HACK: restore dpll_per_clkdcoldo register contents, to avoid
282 * breaking suspend-resume
283 */
284 writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14));
285}
286
287static struct am33xx_pm_platform_data am33xx_ops = {
288 .init = am33xx_suspend_init,
289 .deinit = amx3_suspend_deinit,
290 .soc_suspend = am33xx_suspend,
291 .cpu_suspend = am33xx_cpu_suspend,
292 .begin_suspend = amx3_begin_suspend,
293 .finish_suspend = amx3_finish_suspend,
294 .get_sram_addrs = amx3_get_sram_addrs,
295 .save_context = am33xx_save_context,
296 .restore_context = am33xx_restore_context,
297 .check_off_mode_enable = am33xx_check_off_mode_enable,
298};
299
300static struct am33xx_pm_platform_data am43xx_ops = {
301 .init = am43xx_suspend_init,
302 .deinit = amx3_suspend_deinit,
303 .soc_suspend = am43xx_suspend,
304 .cpu_suspend = am43xx_cpu_suspend,
305 .begin_suspend = amx3_begin_suspend,
306 .finish_suspend = amx3_finish_suspend,
307 .get_sram_addrs = amx3_get_sram_addrs,
308 .save_context = am43xx_save_context,
309 .restore_context = am43xx_restore_context,
310 .check_off_mode_enable = am43xx_check_off_mode_enable,
311};
312
313static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
314{
315 if (soc_is_am33xx())
316 return &am33xx_ops;
317 else if (soc_is_am437x())
318 return &am43xx_ops;
319 else
320 return NULL;
321}
322
323#ifdef CONFIG_SUSPEND
324/*
325 * Block system suspend initially. Later on pm33xx sets up it's own
326 * platform_suspend_ops after probe. That depends also on loaded
327 * wkup_m3_ipc and booted am335x-pm-firmware.elf.
328 */
329static int amx3_suspend_block(suspend_state_t state)
330{
331 pr_warn("PM not initialized for pm33xx, wkup_m3_ipc, or am335x-pm-firmware.elf\n");
332
333 return -EINVAL;
334}
335
336static int amx3_pm_valid(suspend_state_t state)
337{
338 switch (state) {
339 case PM_SUSPEND_STANDBY:
340 return 1;
341 default:
342 return 0;
343 }
344}
345
346static const struct platform_suspend_ops amx3_blocked_pm_ops = {
347 .begin = amx3_suspend_block,
348 .valid = amx3_pm_valid,
349};
350
351static void __init amx3_block_suspend(void)
352{
353 suspend_set_ops(&amx3_blocked_pm_ops);
354}
355#else
356static inline void amx3_block_suspend(void)
357{
358}
359#endif /* CONFIG_SUSPEND */
360
361int __init amx3_common_pm_init(void)
362{
363 struct am33xx_pm_platform_data *pdata;
364 struct platform_device_info devinfo;
365
366 pdata = am33xx_pm_get_pdata();
367
368 memset(&devinfo, 0, sizeof(devinfo));
369 devinfo.name = "pm33xx";
370 devinfo.data = pdata;
371 devinfo.size_data = sizeof(*pdata);
372 devinfo.id = -1;
373 platform_device_register_full(&devinfo);
374 amx3_block_suspend();
375
376 return 0;
377}
378
379static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
380{
381 struct device_node *state_node;
382 struct amx3_idle_state states[CPUIDLE_STATE_MAX];
383 int i;
384 int state_count = 1;
385
386 for (i = 0; ; i++) {
387 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
388 if (!state_node)
389 break;
390
391 if (!of_device_is_available(state_node))
392 continue;
393
394 if (i == CPUIDLE_STATE_MAX) {
395 pr_warn("%s: cpuidle states reached max possible\n",
396 __func__);
397 break;
398 }
399
400 states[state_count].wfi_flags = 0;
401
402 if (of_property_read_bool(state_node, "ti,idle-wkup-m3"))
403 states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
404 WFI_FLAG_FLUSH_CACHE;
405
406 state_count++;
407 }
408
409 idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL);
410 if (!idle_states)
411 return -ENOMEM;
412
413 for (i = 1; i < state_count; i++)
414 idle_states[i].wfi_flags = states[i].wfi_flags;
415
416 return 0;
417}
418
419static int amx3_idle_enter(unsigned long index)
420{
421 struct amx3_idle_state *idle_state = &idle_states[index];
422
423 if (!idle_state)
424 return -EINVAL;
425
426 if (idle_fn)
427 idle_fn(idle_state->wfi_flags);
428
429 return 0;
430}
431
432static struct cpuidle_ops amx3_cpuidle_ops __initdata = {
433 .init = amx3_idle_init,
434 .suspend = amx3_idle_enter,
435};
436
437CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops);
438CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * AM33XX Arch Power Management Routines
4 *
5 * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/
6 * Dave Gerlach
7 */
8
9#include <linux/cpuidle.h>
10#include <linux/platform_data/pm33xx.h>
11#include <linux/suspend.h>
12#include <asm/cpuidle.h>
13#include <asm/smp_scu.h>
14#include <asm/suspend.h>
15#include <linux/errno.h>
16#include <linux/clk.h>
17#include <linux/cpu.h>
18#include <linux/platform_data/gpio-omap.h>
19#include <linux/pinctrl/pinmux.h>
20#include <linux/wkup_m3_ipc.h>
21#include <linux/of.h>
22#include <linux/rtc.h>
23
24#include "cm33xx.h"
25#include "common.h"
26#include "control.h"
27#include "clockdomain.h"
28#include "iomap.h"
29#include "pm.h"
30#include "powerdomain.h"
31#include "prm33xx.h"
32#include "soc.h"
33#include "sram.h"
34#include "omap-secure.h"
35
36static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
37static struct clockdomain *gfx_l4ls_clkdm;
38static void __iomem *scu_base;
39
40static int (*idle_fn)(u32 wfi_flags);
41
42struct amx3_idle_state {
43 int wfi_flags;
44};
45
46static struct amx3_idle_state *idle_states;
47
48static int am43xx_map_scu(void)
49{
50 scu_base = ioremap(scu_a9_get_base(), SZ_256);
51
52 if (!scu_base)
53 return -ENOMEM;
54
55 return 0;
56}
57
58static int am33xx_check_off_mode_enable(void)
59{
60 if (enable_off_mode)
61 pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
62
63 /* off mode not supported on am335x so return 0 always */
64 return 0;
65}
66
67static int am43xx_check_off_mode_enable(void)
68{
69 /*
70 * Check for am437x-gp-evm which has the right Hardware design to
71 * support this mode reliably.
72 */
73 if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode)
74 return enable_off_mode;
75 else if (enable_off_mode)
76 pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
77
78 return 0;
79}
80
81static int amx3_common_init(int (*idle)(u32 wfi_flags))
82{
83 gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
84 per_pwrdm = pwrdm_lookup("per_pwrdm");
85 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
86
87 if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm))
88 return -ENODEV;
89
90 (void)clkdm_for_each(omap_pm_clkdms_setup, NULL);
91
92 /* CEFUSE domain can be turned off post bootup */
93 cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
94 if (!cefuse_pwrdm)
95 pr_err("PM: Failed to get cefuse_pwrdm\n");
96 else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
97 pr_info("PM: Leaving EFUSE power domain active\n");
98 else
99 omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
100
101 idle_fn = idle;
102
103 return 0;
104}
105
106static int am33xx_suspend_init(int (*idle)(u32 wfi_flags))
107{
108 int ret;
109
110 gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm");
111
112 if (!gfx_l4ls_clkdm) {
113 pr_err("PM: Cannot lookup gfx_l4ls_clkdm clockdomains\n");
114 return -ENODEV;
115 }
116
117 ret = amx3_common_init(idle);
118
119 return ret;
120}
121
122static int am43xx_suspend_init(int (*idle)(u32 wfi_flags))
123{
124 int ret = 0;
125
126 ret = am43xx_map_scu();
127 if (ret) {
128 pr_err("PM: Could not ioremap SCU\n");
129 return ret;
130 }
131
132 ret = amx3_common_init(idle);
133
134 return ret;
135}
136
137static int amx3_suspend_deinit(void)
138{
139 idle_fn = NULL;
140 return 0;
141}
142
143static void amx3_pre_suspend_common(void)
144{
145 omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF);
146}
147
148static void amx3_post_suspend_common(void)
149{
150 int status;
151 /*
152 * Because gfx_pwrdm is the only one under MPU control,
153 * comment on transition status
154 */
155 status = pwrdm_read_pwrst(gfx_pwrdm);
156 if (status != PWRDM_POWER_OFF)
157 pr_err("PM: GFX domain did not transition: %x\n", status);
158}
159
160static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long),
161 unsigned long args)
162{
163 int ret = 0;
164
165 amx3_pre_suspend_common();
166 ret = cpu_suspend(args, fn);
167 amx3_post_suspend_common();
168
169 /*
170 * BUG: GFX_L4LS clock domain needs to be woken up to
171 * ensure thet L4LS clock domain does not get stuck in
172 * transition. If that happens L3 module does not get
173 * disabled, thereby leading to PER power domain
174 * transition failing
175 */
176
177 clkdm_wakeup(gfx_l4ls_clkdm);
178 clkdm_sleep(gfx_l4ls_clkdm);
179
180 return ret;
181}
182
183static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
184 unsigned long args)
185{
186 int ret = 0;
187
188 /* Suspend secure side on HS devices */
189 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
190 if (optee_available)
191 omap_smccc_smc(AM43xx_PPA_SVC_PM_SUSPEND, 0);
192 else
193 omap_secure_dispatcher(AM43xx_PPA_SVC_PM_SUSPEND,
194 FLAG_START_CRITICAL,
195 0, 0, 0, 0, 0);
196 }
197
198 amx3_pre_suspend_common();
199 scu_power_mode(scu_base, SCU_PM_POWEROFF);
200 ret = cpu_suspend(args, fn);
201 scu_power_mode(scu_base, SCU_PM_NORMAL);
202
203 if (!am43xx_check_off_mode_enable())
204 amx3_post_suspend_common();
205
206 /*
207 * Resume secure side on HS devices.
208 *
209 * Note that even on systems with OP-TEE available this resume call is
210 * issued to the ROM. This is because upon waking from suspend the ROM
211 * is restored as the secure monitor. On systems with OP-TEE ROM will
212 * restore OP-TEE during this call.
213 */
214 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
215 omap_secure_dispatcher(AM43xx_PPA_SVC_PM_RESUME,
216 FLAG_START_CRITICAL,
217 0, 0, 0, 0, 0);
218
219 return ret;
220}
221
222static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
223{
224 int ret = 0;
225
226 if (omap_irq_pending() || need_resched())
227 return ret;
228
229 ret = cpu_suspend(args, fn);
230
231 return ret;
232}
233
234static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
235{
236 int ret = 0;
237
238 if (!scu_base)
239 return 0;
240
241 scu_power_mode(scu_base, SCU_PM_DORMANT);
242 ret = cpu_suspend(args, fn);
243 scu_power_mode(scu_base, SCU_PM_NORMAL);
244
245 return ret;
246}
247
248static void amx3_begin_suspend(void)
249{
250 cpu_idle_poll_ctrl(true);
251}
252
253static void amx3_finish_suspend(void)
254{
255 cpu_idle_poll_ctrl(false);
256}
257
258
259static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
260{
261 if (soc_is_am33xx())
262 return &am33xx_pm_sram;
263 else if (soc_is_am437x())
264 return &am43xx_pm_sram;
265 else
266 return NULL;
267}
268
269static void am43xx_save_context(void)
270{
271}
272
273static void am33xx_save_context(void)
274{
275 omap_intc_save_context();
276}
277
278static void am33xx_restore_context(void)
279{
280 omap_intc_restore_context();
281}
282
283static void am43xx_restore_context(void)
284{
285 /*
286 * HACK: restore dpll_per_clkdcoldo register contents, to avoid
287 * breaking suspend-resume
288 */
289 writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14));
290}
291
292static struct am33xx_pm_platform_data am33xx_ops = {
293 .init = am33xx_suspend_init,
294 .deinit = amx3_suspend_deinit,
295 .soc_suspend = am33xx_suspend,
296 .cpu_suspend = am33xx_cpu_suspend,
297 .begin_suspend = amx3_begin_suspend,
298 .finish_suspend = amx3_finish_suspend,
299 .get_sram_addrs = amx3_get_sram_addrs,
300 .save_context = am33xx_save_context,
301 .restore_context = am33xx_restore_context,
302 .check_off_mode_enable = am33xx_check_off_mode_enable,
303};
304
305static struct am33xx_pm_platform_data am43xx_ops = {
306 .init = am43xx_suspend_init,
307 .deinit = amx3_suspend_deinit,
308 .soc_suspend = am43xx_suspend,
309 .cpu_suspend = am43xx_cpu_suspend,
310 .begin_suspend = amx3_begin_suspend,
311 .finish_suspend = amx3_finish_suspend,
312 .get_sram_addrs = amx3_get_sram_addrs,
313 .save_context = am43xx_save_context,
314 .restore_context = am43xx_restore_context,
315 .check_off_mode_enable = am43xx_check_off_mode_enable,
316};
317
318static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
319{
320 if (soc_is_am33xx())
321 return &am33xx_ops;
322 else if (soc_is_am437x())
323 return &am43xx_ops;
324 else
325 return NULL;
326}
327
328#ifdef CONFIG_SUSPEND
329/*
330 * Block system suspend initially. Later on pm33xx sets up it's own
331 * platform_suspend_ops after probe. That depends also on loaded
332 * wkup_m3_ipc and booted am335x-pm-firmware.elf.
333 */
334static int amx3_suspend_block(suspend_state_t state)
335{
336 pr_warn("PM not initialized for pm33xx, wkup_m3_ipc, or am335x-pm-firmware.elf\n");
337
338 return -EINVAL;
339}
340
341static int amx3_pm_valid(suspend_state_t state)
342{
343 switch (state) {
344 case PM_SUSPEND_STANDBY:
345 return 1;
346 default:
347 return 0;
348 }
349}
350
351static const struct platform_suspend_ops amx3_blocked_pm_ops = {
352 .begin = amx3_suspend_block,
353 .valid = amx3_pm_valid,
354};
355
356static void __init amx3_block_suspend(void)
357{
358 suspend_set_ops(&amx3_blocked_pm_ops);
359}
360#else
361static inline void amx3_block_suspend(void)
362{
363}
364#endif /* CONFIG_SUSPEND */
365
366int __init amx3_common_pm_init(void)
367{
368 struct am33xx_pm_platform_data *pdata;
369 struct platform_device_info devinfo;
370
371 pdata = am33xx_pm_get_pdata();
372
373 memset(&devinfo, 0, sizeof(devinfo));
374 devinfo.name = "pm33xx";
375 devinfo.data = pdata;
376 devinfo.size_data = sizeof(*pdata);
377 devinfo.id = -1;
378 platform_device_register_full(&devinfo);
379 amx3_block_suspend();
380
381 return 0;
382}
383
384static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
385{
386 struct device_node *state_node;
387 struct amx3_idle_state states[CPUIDLE_STATE_MAX];
388 int i;
389 int state_count = 1;
390
391 for (i = 0; ; i++) {
392 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
393 if (!state_node)
394 break;
395
396 if (!of_device_is_available(state_node))
397 continue;
398
399 if (i == CPUIDLE_STATE_MAX) {
400 pr_warn("%s: cpuidle states reached max possible\n",
401 __func__);
402 break;
403 }
404
405 states[state_count].wfi_flags = 0;
406
407 if (of_property_read_bool(state_node, "ti,idle-wkup-m3"))
408 states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
409 WFI_FLAG_FLUSH_CACHE;
410
411 state_count++;
412 }
413
414 idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL);
415 if (!idle_states)
416 return -ENOMEM;
417
418 for (i = 1; i < state_count; i++)
419 idle_states[i].wfi_flags = states[i].wfi_flags;
420
421 return 0;
422}
423
424static int amx3_idle_enter(unsigned long index)
425{
426 struct amx3_idle_state *idle_state = &idle_states[index];
427
428 if (!idle_state)
429 return -EINVAL;
430
431 if (idle_fn)
432 idle_fn(idle_state->wfi_flags);
433
434 return 0;
435}
436
437static struct cpuidle_ops amx3_cpuidle_ops __initdata = {
438 .init = amx3_idle_init,
439 .suspend = amx3_idle_enter,
440};
441
442CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops);
443CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops);