Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ti-sysc.c - Texas Instruments sysc interconnect target driver
4 *
5 * TI SoCs have an interconnect target wrapper IP for many devices. The wrapper
6 * IP manages clock gating, resets, and PM capabilities for the connected devices.
7 *
8 * Copyright (C) 2017-2024 Texas Instruments Incorporated - https://www.ti.com/
9 *
10 * Many features are based on the earlier omap_hwmod arch code with thanks to all
11 * the people who developed and debugged the code over the years:
12 *
13 * Copyright (C) 2009-2011 Nokia Corporation
14 * Copyright (C) 2011-2021 Texas Instruments Incorporated - https://www.ti.com/
15 */
16
17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/clkdev.h>
20#include <linux/cpu_pm.h>
21#include <linux/delay.h>
22#include <linux/list.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/pm_domain.h>
26#include <linux/pm_runtime.h>
27#include <linux/reset.h>
28#include <linux/of_address.h>
29#include <linux/of_platform.h>
30#include <linux/slab.h>
31#include <linux/sys_soc.h>
32#include <linux/timekeeping.h>
33#include <linux/iopoll.h>
34
35#include <linux/platform_data/ti-sysc.h>
36
37#include <dt-bindings/bus/ti-sysc.h>
38
39#define DIS_ISP BIT(2)
40#define DIS_IVA BIT(1)
41#define DIS_SGX BIT(0)
42
43#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
44
45#define MAX_MODULE_SOFTRESET_WAIT 10000
46
47enum sysc_soc {
48 SOC_UNKNOWN,
49 SOC_2420,
50 SOC_2430,
51 SOC_3430,
52 SOC_AM35,
53 SOC_3630,
54 SOC_4430,
55 SOC_4460,
56 SOC_4470,
57 SOC_5430,
58 SOC_AM3,
59 SOC_AM4,
60 SOC_DRA7,
61};
62
63struct sysc_address {
64 unsigned long base;
65 struct list_head node;
66};
67
68struct sysc_module {
69 struct sysc *ddata;
70 struct list_head node;
71};
72
73struct sysc_soc_info {
74 unsigned long general_purpose:1;
75 enum sysc_soc soc;
76 struct mutex list_lock; /* disabled and restored modules list lock */
77 struct list_head disabled_modules;
78 struct list_head restored_modules;
79 struct notifier_block nb;
80};
81
82enum sysc_clocks {
83 SYSC_FCK,
84 SYSC_ICK,
85 SYSC_OPTFCK0,
86 SYSC_OPTFCK1,
87 SYSC_OPTFCK2,
88 SYSC_OPTFCK3,
89 SYSC_OPTFCK4,
90 SYSC_OPTFCK5,
91 SYSC_OPTFCK6,
92 SYSC_OPTFCK7,
93 SYSC_MAX_CLOCKS,
94};
95
96static struct sysc_soc_info *sysc_soc;
97static const char * const reg_names[] = { "rev", "sysc", "syss", };
98static const char * const clock_names[SYSC_MAX_CLOCKS] = {
99 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
100 "opt5", "opt6", "opt7",
101};
102
103#define SYSC_IDLEMODE_MASK 3
104#define SYSC_CLOCKACTIVITY_MASK 3
105
106/**
107 * struct sysc - TI sysc interconnect target module registers and capabilities
108 * @dev: struct device pointer
109 * @module_pa: physical address of the interconnect target module
110 * @module_size: size of the interconnect target module
111 * @module_va: virtual address of the interconnect target module
112 * @offsets: register offsets from module base
113 * @mdata: ti-sysc to hwmod translation data for a module
114 * @clocks: clocks used by the interconnect target module
115 * @clock_roles: clock role names for the found clocks
116 * @nr_clocks: number of clocks used by the interconnect target module
117 * @rsts: resets used by the interconnect target module
118 * @legacy_mode: configured for legacy mode if set
119 * @cap: interconnect target module capabilities
120 * @cfg: interconnect target module configuration
121 * @cookie: data used by legacy platform callbacks
122 * @name: name if available
123 * @revision: interconnect target module revision
124 * @sysconfig: saved sysconfig register value
125 * @reserved: target module is reserved and already in use
126 * @enabled: sysc runtime enabled status
127 * @needs_resume: runtime resume needed on resume from suspend
128 * @child_needs_resume: runtime resume needed for child on resume from suspend
129 * @idle_work: work structure used to perform delayed idle on a module
130 * @pre_reset_quirk: module specific pre-reset quirk
131 * @post_reset_quirk: module specific post-reset quirk
132 * @reset_done_quirk: module specific reset done quirk
133 * @module_enable_quirk: module specific enable quirk
134 * @module_disable_quirk: module specific disable quirk
135 * @module_unlock_quirk: module specific sysconfig unlock quirk
136 * @module_lock_quirk: module specific sysconfig lock quirk
137 */
138struct sysc {
139 struct device *dev;
140 u64 module_pa;
141 u32 module_size;
142 void __iomem *module_va;
143 int offsets[SYSC_MAX_REGS];
144 struct ti_sysc_module_data *mdata;
145 struct clk **clocks;
146 const char **clock_roles;
147 int nr_clocks;
148 struct reset_control *rsts;
149 const char *legacy_mode;
150 const struct sysc_capabilities *cap;
151 struct sysc_config cfg;
152 struct ti_sysc_cookie cookie;
153 const char *name;
154 u32 revision;
155 u32 sysconfig;
156 unsigned int reserved:1;
157 unsigned int enabled:1;
158 unsigned int needs_resume:1;
159 unsigned int child_needs_resume:1;
160 struct delayed_work idle_work;
161 void (*pre_reset_quirk)(struct sysc *sysc);
162 void (*post_reset_quirk)(struct sysc *sysc);
163 void (*reset_done_quirk)(struct sysc *sysc);
164 void (*module_enable_quirk)(struct sysc *sysc);
165 void (*module_disable_quirk)(struct sysc *sysc);
166 void (*module_unlock_quirk)(struct sysc *sysc);
167 void (*module_lock_quirk)(struct sysc *sysc);
168};
169
170static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
171 bool is_child);
172static int sysc_reset(struct sysc *ddata);
173
174static void sysc_write(struct sysc *ddata, int offset, u32 value)
175{
176 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
177 writew_relaxed(value & 0xffff, ddata->module_va + offset);
178
179 /* Only i2c revision has LO and HI register with stride of 4 */
180 if (ddata->offsets[SYSC_REVISION] >= 0 &&
181 offset == ddata->offsets[SYSC_REVISION]) {
182 u16 hi = value >> 16;
183
184 writew_relaxed(hi, ddata->module_va + offset + 4);
185 }
186
187 return;
188 }
189
190 writel_relaxed(value, ddata->module_va + offset);
191}
192
193static u32 sysc_read(struct sysc *ddata, int offset)
194{
195 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
196 u32 val;
197
198 val = readw_relaxed(ddata->module_va + offset);
199
200 /* Only i2c revision has LO and HI register with stride of 4 */
201 if (ddata->offsets[SYSC_REVISION] >= 0 &&
202 offset == ddata->offsets[SYSC_REVISION]) {
203 u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
204
205 val |= tmp << 16;
206 }
207
208 return val;
209 }
210
211 return readl_relaxed(ddata->module_va + offset);
212}
213
214static bool sysc_opt_clks_needed(struct sysc *ddata)
215{
216 return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
217}
218
219static u32 sysc_read_revision(struct sysc *ddata)
220{
221 int offset = ddata->offsets[SYSC_REVISION];
222
223 if (offset < 0)
224 return 0;
225
226 return sysc_read(ddata, offset);
227}
228
229static u32 sysc_read_sysconfig(struct sysc *ddata)
230{
231 int offset = ddata->offsets[SYSC_SYSCONFIG];
232
233 if (offset < 0)
234 return 0;
235
236 return sysc_read(ddata, offset);
237}
238
239static u32 sysc_read_sysstatus(struct sysc *ddata)
240{
241 int offset = ddata->offsets[SYSC_SYSSTATUS];
242
243 if (offset < 0)
244 return 0;
245
246 return sysc_read(ddata, offset);
247}
248
249static int sysc_poll_reset_sysstatus(struct sysc *ddata)
250{
251 int error, retries;
252 u32 syss_done, rstval;
253
254 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
255 syss_done = 0;
256 else
257 syss_done = ddata->cfg.syss_mask;
258
259 if (likely(!timekeeping_suspended)) {
260 error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
261 rstval, (rstval & ddata->cfg.syss_mask) ==
262 syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
263 } else {
264 retries = MAX_MODULE_SOFTRESET_WAIT;
265 while (retries--) {
266 rstval = sysc_read_sysstatus(ddata);
267 if ((rstval & ddata->cfg.syss_mask) == syss_done)
268 return 0;
269 udelay(2); /* Account for udelay flakeyness */
270 }
271 error = -ETIMEDOUT;
272 }
273
274 return error;
275}
276
277static int sysc_poll_reset_sysconfig(struct sysc *ddata)
278{
279 int error, retries;
280 u32 sysc_mask, rstval;
281
282 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
283
284 if (likely(!timekeeping_suspended)) {
285 error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
286 rstval, !(rstval & sysc_mask),
287 100, MAX_MODULE_SOFTRESET_WAIT);
288 } else {
289 retries = MAX_MODULE_SOFTRESET_WAIT;
290 while (retries--) {
291 rstval = sysc_read_sysconfig(ddata);
292 if (!(rstval & sysc_mask))
293 return 0;
294 udelay(2); /* Account for udelay flakeyness */
295 }
296 error = -ETIMEDOUT;
297 }
298
299 return error;
300}
301
302/* Poll on reset status */
303static int sysc_wait_softreset(struct sysc *ddata)
304{
305 int syss_offset, error = 0;
306
307 if (ddata->cap->regbits->srst_shift < 0)
308 return 0;
309
310 syss_offset = ddata->offsets[SYSC_SYSSTATUS];
311
312 if (syss_offset >= 0)
313 error = sysc_poll_reset_sysstatus(ddata);
314 else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
315 error = sysc_poll_reset_sysconfig(ddata);
316
317 return error;
318}
319
320static int sysc_add_named_clock_from_child(struct sysc *ddata,
321 const char *name,
322 const char *optfck_name)
323{
324 struct device_node *np = ddata->dev->of_node;
325 struct device_node *child;
326 struct clk_lookup *cl;
327 struct clk *clock;
328 const char *n;
329
330 if (name)
331 n = name;
332 else
333 n = optfck_name;
334
335 /* Does the clock alias already exist? */
336 clock = of_clk_get_by_name(np, n);
337 if (!IS_ERR(clock)) {
338 clk_put(clock);
339
340 return 0;
341 }
342
343 child = of_get_next_available_child(np, NULL);
344 if (!child)
345 return -ENODEV;
346
347 clock = devm_get_clk_from_child(ddata->dev, child, name);
348 if (IS_ERR(clock))
349 return PTR_ERR(clock);
350
351 /*
352 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
353 * limit for clk_get(). If cl ever needs to be freed, it should be done
354 * with clkdev_drop().
355 */
356 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
357 if (!cl)
358 return -ENOMEM;
359
360 cl->con_id = n;
361 cl->dev_id = dev_name(ddata->dev);
362 cl->clk = clock;
363 clkdev_add(cl);
364
365 clk_put(clock);
366
367 return 0;
368}
369
370static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
371{
372 const char *optfck_name;
373 int error, index;
374
375 if (ddata->nr_clocks < SYSC_OPTFCK0)
376 index = SYSC_OPTFCK0;
377 else
378 index = ddata->nr_clocks;
379
380 if (name)
381 optfck_name = name;
382 else
383 optfck_name = clock_names[index];
384
385 error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
386 if (error)
387 return error;
388
389 ddata->clock_roles[index] = optfck_name;
390 ddata->nr_clocks++;
391
392 return 0;
393}
394
395static int sysc_get_one_clock(struct sysc *ddata, const char *name)
396{
397 int error, i, index = -ENODEV;
398
399 if (!strncmp(clock_names[SYSC_FCK], name, 3))
400 index = SYSC_FCK;
401 else if (!strncmp(clock_names[SYSC_ICK], name, 3))
402 index = SYSC_ICK;
403
404 if (index < 0) {
405 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
406 if (!ddata->clocks[i]) {
407 index = i;
408 break;
409 }
410 }
411 }
412
413 if (index < 0) {
414 dev_err(ddata->dev, "clock %s not added\n", name);
415 return index;
416 }
417
418 ddata->clocks[index] = devm_clk_get(ddata->dev, name);
419 if (IS_ERR(ddata->clocks[index])) {
420 dev_err(ddata->dev, "clock get error for %s: %li\n",
421 name, PTR_ERR(ddata->clocks[index]));
422
423 return PTR_ERR(ddata->clocks[index]);
424 }
425
426 error = clk_prepare(ddata->clocks[index]);
427 if (error) {
428 dev_err(ddata->dev, "clock prepare error for %s: %i\n",
429 name, error);
430
431 return error;
432 }
433
434 return 0;
435}
436
437static int sysc_get_clocks(struct sysc *ddata)
438{
439 struct device_node *np = ddata->dev->of_node;
440 struct property *prop;
441 const char *name;
442 int nr_fck = 0, nr_ick = 0, i, error = 0;
443
444 ddata->clock_roles = devm_kcalloc(ddata->dev,
445 SYSC_MAX_CLOCKS,
446 sizeof(*ddata->clock_roles),
447 GFP_KERNEL);
448 if (!ddata->clock_roles)
449 return -ENOMEM;
450
451 of_property_for_each_string(np, "clock-names", prop, name) {
452 if (!strncmp(clock_names[SYSC_FCK], name, 3))
453 nr_fck++;
454 if (!strncmp(clock_names[SYSC_ICK], name, 3))
455 nr_ick++;
456 ddata->clock_roles[ddata->nr_clocks] = name;
457 ddata->nr_clocks++;
458 }
459
460 if (ddata->nr_clocks < 1)
461 return 0;
462
463 if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
464 error = sysc_init_ext_opt_clock(ddata, NULL);
465 if (error)
466 return error;
467 }
468
469 if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
470 dev_err(ddata->dev, "too many clocks for %pOF\n", np);
471
472 return -EINVAL;
473 }
474
475 if (nr_fck > 1 || nr_ick > 1) {
476 dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
477
478 return -EINVAL;
479 }
480
481 /* Always add a slot for main clocks fck and ick even if unused */
482 if (!nr_fck)
483 ddata->nr_clocks++;
484 if (!nr_ick)
485 ddata->nr_clocks++;
486
487 ddata->clocks = devm_kcalloc(ddata->dev,
488 ddata->nr_clocks, sizeof(*ddata->clocks),
489 GFP_KERNEL);
490 if (!ddata->clocks)
491 return -ENOMEM;
492
493 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
494 const char *name = ddata->clock_roles[i];
495
496 if (!name)
497 continue;
498
499 error = sysc_get_one_clock(ddata, name);
500 if (error)
501 return error;
502 }
503
504 return 0;
505}
506
507static int sysc_enable_main_clocks(struct sysc *ddata)
508{
509 struct clk *clock;
510 int i, error;
511
512 if (!ddata->clocks)
513 return 0;
514
515 for (i = 0; i < SYSC_OPTFCK0; i++) {
516 clock = ddata->clocks[i];
517
518 /* Main clocks may not have ick */
519 if (IS_ERR_OR_NULL(clock))
520 continue;
521
522 error = clk_enable(clock);
523 if (error)
524 goto err_disable;
525 }
526
527 return 0;
528
529err_disable:
530 for (i--; i >= 0; i--) {
531 clock = ddata->clocks[i];
532
533 /* Main clocks may not have ick */
534 if (IS_ERR_OR_NULL(clock))
535 continue;
536
537 clk_disable(clock);
538 }
539
540 return error;
541}
542
543static void sysc_disable_main_clocks(struct sysc *ddata)
544{
545 struct clk *clock;
546 int i;
547
548 if (!ddata->clocks)
549 return;
550
551 for (i = 0; i < SYSC_OPTFCK0; i++) {
552 clock = ddata->clocks[i];
553 if (IS_ERR_OR_NULL(clock))
554 continue;
555
556 clk_disable(clock);
557 }
558}
559
560static int sysc_enable_opt_clocks(struct sysc *ddata)
561{
562 struct clk *clock;
563 int i, error;
564
565 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
566 return 0;
567
568 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
569 clock = ddata->clocks[i];
570
571 /* Assume no holes for opt clocks */
572 if (IS_ERR_OR_NULL(clock))
573 return 0;
574
575 error = clk_enable(clock);
576 if (error)
577 goto err_disable;
578 }
579
580 return 0;
581
582err_disable:
583 for (i--; i >= 0; i--) {
584 clock = ddata->clocks[i];
585 if (IS_ERR_OR_NULL(clock))
586 continue;
587
588 clk_disable(clock);
589 }
590
591 return error;
592}
593
594static void sysc_disable_opt_clocks(struct sysc *ddata)
595{
596 struct clk *clock;
597 int i;
598
599 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
600 return;
601
602 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
603 clock = ddata->clocks[i];
604
605 /* Assume no holes for opt clocks */
606 if (IS_ERR_OR_NULL(clock))
607 return;
608
609 clk_disable(clock);
610 }
611}
612
613static void sysc_clkdm_deny_idle(struct sysc *ddata)
614{
615 struct ti_sysc_platform_data *pdata;
616
617 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
618 return;
619
620 pdata = dev_get_platdata(ddata->dev);
621 if (pdata && pdata->clkdm_deny_idle)
622 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
623}
624
625static void sysc_clkdm_allow_idle(struct sysc *ddata)
626{
627 struct ti_sysc_platform_data *pdata;
628
629 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
630 return;
631
632 pdata = dev_get_platdata(ddata->dev);
633 if (pdata && pdata->clkdm_allow_idle)
634 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
635}
636
637/**
638 * sysc_init_resets - init rstctrl reset line if configured
639 * @ddata: device driver data
640 *
641 * See sysc_rstctrl_reset_deassert().
642 */
643static int sysc_init_resets(struct sysc *ddata)
644{
645 ddata->rsts =
646 devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
647
648 return PTR_ERR_OR_ZERO(ddata->rsts);
649}
650
651/**
652 * sysc_parse_and_check_child_range - parses module IO region from ranges
653 * @ddata: device driver data
654 *
655 * In general we only need rev, syss, and sysc registers and not the whole
656 * module range. But we do want the offsets for these registers from the
657 * module base. This allows us to check them against the legacy hwmod
658 * platform data. Let's also check the ranges are configured properly.
659 */
660static int sysc_parse_and_check_child_range(struct sysc *ddata)
661{
662 struct device_node *np = ddata->dev->of_node;
663 struct of_range_parser parser;
664 struct of_range range;
665 int error;
666
667 error = of_range_parser_init(&parser, np);
668 if (error)
669 return error;
670
671 for_each_of_range(&parser, &range) {
672 ddata->module_pa = range.cpu_addr;
673 ddata->module_size = range.size;
674 break;
675 }
676
677 return 0;
678}
679
680/* Interconnect instances to probe before l4_per instances */
681static struct resource early_bus_ranges[] = {
682 /* am3/4 l4_wkup */
683 { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
684 /* omap4/5 and dra7 l4_cfg */
685 { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
686 /* omap4 l4_wkup */
687 { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
688 /* omap5 and dra7 l4_wkup without dra7 dcan segment */
689 { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
690};
691
692static atomic_t sysc_defer = ATOMIC_INIT(10);
693
694/**
695 * sysc_defer_non_critical - defer non_critical interconnect probing
696 * @ddata: device driver data
697 *
698 * We want to probe l4_cfg and l4_wkup interconnect instances before any
699 * l4_per instances as l4_per instances depend on resources on l4_cfg and
700 * l4_wkup interconnects.
701 */
702static int sysc_defer_non_critical(struct sysc *ddata)
703{
704 struct resource *res;
705 int i;
706
707 if (!atomic_read(&sysc_defer))
708 return 0;
709
710 for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
711 res = &early_bus_ranges[i];
712 if (ddata->module_pa >= res->start &&
713 ddata->module_pa <= res->end) {
714 atomic_set(&sysc_defer, 0);
715
716 return 0;
717 }
718 }
719
720 atomic_dec_if_positive(&sysc_defer);
721
722 return -EPROBE_DEFER;
723}
724
725static struct device_node *stdout_path;
726
727static void sysc_init_stdout_path(struct sysc *ddata)
728{
729 struct device_node *np = NULL;
730 const char *uart;
731
732 if (IS_ERR(stdout_path))
733 return;
734
735 if (stdout_path)
736 return;
737
738 np = of_find_node_by_path("/chosen");
739 if (!np)
740 goto err;
741
742 uart = of_get_property(np, "stdout-path", NULL);
743 if (!uart)
744 goto err;
745
746 np = of_find_node_by_path(uart);
747 if (!np)
748 goto err;
749
750 stdout_path = np;
751
752 return;
753
754err:
755 stdout_path = ERR_PTR(-ENODEV);
756}
757
758static void sysc_check_quirk_stdout(struct sysc *ddata,
759 struct device_node *np)
760{
761 sysc_init_stdout_path(ddata);
762 if (np != stdout_path)
763 return;
764
765 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
766 SYSC_QUIRK_NO_RESET_ON_INIT;
767}
768
769/**
770 * sysc_check_one_child - check child configuration
771 * @ddata: device driver data
772 * @np: child device node
773 *
774 * Let's avoid messy situations where we have new interconnect target
775 * node but children have "ti,hwmods". These belong to the interconnect
776 * target node and are managed by this driver.
777 */
778static void sysc_check_one_child(struct sysc *ddata,
779 struct device_node *np)
780{
781 const char *name;
782
783 name = of_get_property(np, "ti,hwmods", NULL);
784 if (name && !of_device_is_compatible(np, "ti,sysc"))
785 dev_warn(ddata->dev, "really a child ti,hwmods property?");
786
787 sysc_check_quirk_stdout(ddata, np);
788 sysc_parse_dts_quirks(ddata, np, true);
789}
790
791static void sysc_check_children(struct sysc *ddata)
792{
793 struct device_node *child;
794
795 for_each_child_of_node(ddata->dev->of_node, child)
796 sysc_check_one_child(ddata, child);
797}
798
799/*
800 * So far only I2C uses 16-bit read access with clockactivity with revision
801 * in two registers with stride of 4. We can detect this based on the rev
802 * register size to configure things far enough to be able to properly read
803 * the revision register.
804 */
805static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
806{
807 if (resource_size(res) == 8)
808 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
809}
810
811/**
812 * sysc_parse_one - parses the interconnect target module registers
813 * @ddata: device driver data
814 * @reg: register to parse
815 */
816static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
817{
818 struct resource *res;
819 const char *name;
820
821 switch (reg) {
822 case SYSC_REVISION:
823 case SYSC_SYSCONFIG:
824 case SYSC_SYSSTATUS:
825 name = reg_names[reg];
826 break;
827 default:
828 return -EINVAL;
829 }
830
831 res = platform_get_resource_byname(to_platform_device(ddata->dev),
832 IORESOURCE_MEM, name);
833 if (!res) {
834 ddata->offsets[reg] = -ENODEV;
835
836 return 0;
837 }
838
839 ddata->offsets[reg] = res->start - ddata->module_pa;
840 if (reg == SYSC_REVISION)
841 sysc_check_quirk_16bit(ddata, res);
842
843 return 0;
844}
845
846static int sysc_parse_registers(struct sysc *ddata)
847{
848 int i, error;
849
850 for (i = 0; i < SYSC_MAX_REGS; i++) {
851 error = sysc_parse_one(ddata, i);
852 if (error)
853 return error;
854 }
855
856 return 0;
857}
858
859/**
860 * sysc_check_registers - check for misconfigured register overlaps
861 * @ddata: device driver data
862 */
863static int sysc_check_registers(struct sysc *ddata)
864{
865 int i, j, nr_regs = 0, nr_matches = 0;
866
867 for (i = 0; i < SYSC_MAX_REGS; i++) {
868 if (ddata->offsets[i] < 0)
869 continue;
870
871 if (ddata->offsets[i] > (ddata->module_size - 4)) {
872 dev_err(ddata->dev, "register outside module range");
873
874 return -EINVAL;
875 }
876
877 for (j = 0; j < SYSC_MAX_REGS; j++) {
878 if (ddata->offsets[j] < 0)
879 continue;
880
881 if (ddata->offsets[i] == ddata->offsets[j])
882 nr_matches++;
883 }
884 nr_regs++;
885 }
886
887 if (nr_matches > nr_regs) {
888 dev_err(ddata->dev, "overlapping registers: (%i/%i)",
889 nr_regs, nr_matches);
890
891 return -EINVAL;
892 }
893
894 return 0;
895}
896
897/**
898 * sysc_ioremap - ioremap register space for the interconnect target module
899 * @ddata: device driver data
900 *
901 * Note that the interconnect target module registers can be anywhere
902 * within the interconnect target module range. For example, SGX has
903 * them at offset 0x1fc00 in the 32MB module address space. And cpsw
904 * has them at offset 0x1200 in the CPSW_WR child. Usually the
905 * interconnect target module registers are at the beginning of
906 * the module range though.
907 */
908static int sysc_ioremap(struct sysc *ddata)
909{
910 int size;
911
912 if (ddata->offsets[SYSC_REVISION] < 0 &&
913 ddata->offsets[SYSC_SYSCONFIG] < 0 &&
914 ddata->offsets[SYSC_SYSSTATUS] < 0) {
915 size = ddata->module_size;
916 } else {
917 size = max3(ddata->offsets[SYSC_REVISION],
918 ddata->offsets[SYSC_SYSCONFIG],
919 ddata->offsets[SYSC_SYSSTATUS]);
920
921 if (size < SZ_1K)
922 size = SZ_1K;
923
924 if ((size + sizeof(u32)) > ddata->module_size)
925 size = ddata->module_size;
926 }
927
928 ddata->module_va = devm_ioremap(ddata->dev,
929 ddata->module_pa,
930 size + sizeof(u32));
931 if (!ddata->module_va)
932 return -EIO;
933
934 return 0;
935}
936
937/**
938 * sysc_map_and_check_registers - ioremap and check device registers
939 * @ddata: device driver data
940 */
941static int sysc_map_and_check_registers(struct sysc *ddata)
942{
943 struct device_node *np = ddata->dev->of_node;
944 int error;
945
946 error = sysc_parse_and_check_child_range(ddata);
947 if (error)
948 return error;
949
950 error = sysc_defer_non_critical(ddata);
951 if (error)
952 return error;
953
954 sysc_check_children(ddata);
955
956 if (!of_property_present(np, "reg"))
957 return 0;
958
959 error = sysc_parse_registers(ddata);
960 if (error)
961 return error;
962
963 error = sysc_ioremap(ddata);
964 if (error)
965 return error;
966
967 error = sysc_check_registers(ddata);
968 if (error)
969 return error;
970
971 return 0;
972}
973
974/**
975 * sysc_show_rev - read and show interconnect target module revision
976 * @bufp: buffer to print the information to
977 * @ddata: device driver data
978 */
979static int sysc_show_rev(char *bufp, struct sysc *ddata)
980{
981 int len;
982
983 if (ddata->offsets[SYSC_REVISION] < 0)
984 return sprintf(bufp, ":NA");
985
986 len = sprintf(bufp, ":%08x", ddata->revision);
987
988 return len;
989}
990
991static int sysc_show_reg(struct sysc *ddata,
992 char *bufp, enum sysc_registers reg)
993{
994 if (ddata->offsets[reg] < 0)
995 return sprintf(bufp, ":NA");
996
997 return sprintf(bufp, ":%x", ddata->offsets[reg]);
998}
999
1000static int sysc_show_name(char *bufp, struct sysc *ddata)
1001{
1002 if (!ddata->name)
1003 return 0;
1004
1005 return sprintf(bufp, ":%s", ddata->name);
1006}
1007
1008/**
1009 * sysc_show_registers - show information about interconnect target module
1010 * @ddata: device driver data
1011 */
1012static void sysc_show_registers(struct sysc *ddata)
1013{
1014 char buf[128];
1015 char *bufp = buf;
1016 int i;
1017
1018 for (i = 0; i < SYSC_MAX_REGS; i++)
1019 bufp += sysc_show_reg(ddata, bufp, i);
1020
1021 bufp += sysc_show_rev(bufp, ddata);
1022 bufp += sysc_show_name(bufp, ddata);
1023
1024 dev_dbg(ddata->dev, "%llx:%x%s\n",
1025 ddata->module_pa, ddata->module_size,
1026 buf);
1027}
1028
1029/**
1030 * sysc_write_sysconfig - handle sysconfig quirks for register write
1031 * @ddata: device driver data
1032 * @value: register value
1033 */
1034static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
1035{
1036 if (ddata->module_unlock_quirk)
1037 ddata->module_unlock_quirk(ddata);
1038
1039 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
1040
1041 if (ddata->module_lock_quirk)
1042 ddata->module_lock_quirk(ddata);
1043}
1044
1045#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
1046#define SYSC_CLOCACT_ICK 2
1047
1048/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1049static int sysc_enable_module(struct device *dev)
1050{
1051 struct sysc *ddata;
1052 const struct sysc_regbits *regbits;
1053 u32 reg, idlemodes, best_mode;
1054 int error;
1055
1056 ddata = dev_get_drvdata(dev);
1057
1058 /*
1059 * Some modules like DSS reset automatically on idle. Enable optional
1060 * reset clocks and wait for OCP softreset to complete.
1061 */
1062 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
1063 error = sysc_enable_opt_clocks(ddata);
1064 if (error) {
1065 dev_err(ddata->dev,
1066 "Optional clocks failed for enable: %i\n",
1067 error);
1068 return error;
1069 }
1070 }
1071 /*
1072 * Some modules like i2c and hdq1w have unusable reset status unless
1073 * the module reset quirk is enabled. Skip status check on enable.
1074 */
1075 if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
1076 error = sysc_wait_softreset(ddata);
1077 if (error)
1078 dev_warn(ddata->dev, "OCP softreset timed out\n");
1079 }
1080 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1081 sysc_disable_opt_clocks(ddata);
1082
1083 /*
1084 * Some subsystem private interconnects, like DSS top level module,
1085 * need only the automatic OCP softreset handling with no sysconfig
1086 * register bits to configure.
1087 */
1088 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1089 return 0;
1090
1091 regbits = ddata->cap->regbits;
1092 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1093
1094 /*
1095 * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1096 * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1097 * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1098 */
1099 if (regbits->clkact_shift >= 0 &&
1100 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1101 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1102
1103 /* Set SIDLE mode */
1104 idlemodes = ddata->cfg.sidlemodes;
1105 if (!idlemodes || regbits->sidle_shift < 0)
1106 goto set_midle;
1107
1108 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
1109 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
1110 best_mode = SYSC_IDLE_NO;
1111
1112 /* Clear WAKEUP */
1113 if (regbits->enwkup_shift >= 0 &&
1114 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1115 reg &= ~BIT(regbits->enwkup_shift);
1116 } else {
1117 best_mode = fls(ddata->cfg.sidlemodes) - 1;
1118 if (best_mode > SYSC_IDLE_MASK) {
1119 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1120 return -EINVAL;
1121 }
1122
1123 /* Set WAKEUP */
1124 if (regbits->enwkup_shift >= 0 &&
1125 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1126 reg |= BIT(regbits->enwkup_shift);
1127 }
1128
1129 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1130 reg |= best_mode << regbits->sidle_shift;
1131 sysc_write_sysconfig(ddata, reg);
1132
1133set_midle:
1134 /* Set MIDLE mode */
1135 idlemodes = ddata->cfg.midlemodes;
1136 if (!idlemodes || regbits->midle_shift < 0)
1137 goto set_autoidle;
1138
1139 best_mode = fls(ddata->cfg.midlemodes) - 1;
1140 if (best_mode > SYSC_IDLE_MASK) {
1141 dev_err(dev, "%s: invalid midlemode\n", __func__);
1142 error = -EINVAL;
1143 goto save_context;
1144 }
1145
1146 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
1147 best_mode = SYSC_IDLE_NO;
1148
1149 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1150 reg |= best_mode << regbits->midle_shift;
1151 sysc_write_sysconfig(ddata, reg);
1152
1153set_autoidle:
1154 /* Autoidle bit must enabled separately if available */
1155 if (regbits->autoidle_shift >= 0 &&
1156 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
1157 reg |= 1 << regbits->autoidle_shift;
1158 sysc_write_sysconfig(ddata, reg);
1159 }
1160
1161 error = 0;
1162
1163save_context:
1164 /* Save context and flush posted write */
1165 ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1166
1167 if (ddata->module_enable_quirk)
1168 ddata->module_enable_quirk(ddata);
1169
1170 return error;
1171}
1172
1173static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
1174{
1175 if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
1176 *best_mode = SYSC_IDLE_SMART_WKUP;
1177 else if (idlemodes & BIT(SYSC_IDLE_SMART))
1178 *best_mode = SYSC_IDLE_SMART;
1179 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
1180 *best_mode = SYSC_IDLE_FORCE;
1181 else
1182 return -EINVAL;
1183
1184 return 0;
1185}
1186
1187/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1188static int sysc_disable_module(struct device *dev)
1189{
1190 struct sysc *ddata;
1191 const struct sysc_regbits *regbits;
1192 u32 reg, idlemodes, best_mode;
1193 int ret;
1194
1195 ddata = dev_get_drvdata(dev);
1196 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1197 return 0;
1198
1199 if (ddata->module_disable_quirk)
1200 ddata->module_disable_quirk(ddata);
1201
1202 regbits = ddata->cap->regbits;
1203 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1204
1205 /* Set MIDLE mode */
1206 idlemodes = ddata->cfg.midlemodes;
1207 if (!idlemodes || regbits->midle_shift < 0)
1208 goto set_sidle;
1209
1210 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1211 if (ret) {
1212 dev_err(dev, "%s: invalid midlemode\n", __func__);
1213 return ret;
1214 }
1215
1216 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
1217 ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
1218 best_mode = SYSC_IDLE_FORCE;
1219
1220 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1221 reg |= best_mode << regbits->midle_shift;
1222 sysc_write_sysconfig(ddata, reg);
1223
1224set_sidle:
1225 /* Set SIDLE mode */
1226 idlemodes = ddata->cfg.sidlemodes;
1227 if (!idlemodes || regbits->sidle_shift < 0) {
1228 ret = 0;
1229 goto save_context;
1230 }
1231
1232 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
1233 best_mode = SYSC_IDLE_FORCE;
1234 } else {
1235 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1236 if (ret) {
1237 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1238 ret = -EINVAL;
1239 goto save_context;
1240 }
1241 }
1242
1243 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
1244 /* Set WAKEUP */
1245 if (regbits->enwkup_shift >= 0 &&
1246 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1247 reg |= BIT(regbits->enwkup_shift);
1248 }
1249
1250 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1251 reg |= best_mode << regbits->sidle_shift;
1252 if (regbits->autoidle_shift >= 0 &&
1253 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
1254 reg |= 1 << regbits->autoidle_shift;
1255 sysc_write_sysconfig(ddata, reg);
1256
1257 ret = 0;
1258
1259save_context:
1260 /* Save context and flush posted write */
1261 ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1262
1263 return ret;
1264}
1265
1266static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
1267 struct sysc *ddata)
1268{
1269 struct ti_sysc_platform_data *pdata;
1270 int error;
1271
1272 pdata = dev_get_platdata(ddata->dev);
1273 if (!pdata)
1274 return 0;
1275
1276 if (!pdata->idle_module)
1277 return -ENODEV;
1278
1279 error = pdata->idle_module(dev, &ddata->cookie);
1280 if (error)
1281 dev_err(dev, "%s: could not idle: %i\n",
1282 __func__, error);
1283
1284 reset_control_assert(ddata->rsts);
1285
1286 return 0;
1287}
1288
1289static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
1290 struct sysc *ddata)
1291{
1292 struct ti_sysc_platform_data *pdata;
1293 int error;
1294
1295 pdata = dev_get_platdata(ddata->dev);
1296 if (!pdata)
1297 return 0;
1298
1299 if (!pdata->enable_module)
1300 return -ENODEV;
1301
1302 error = pdata->enable_module(dev, &ddata->cookie);
1303 if (error)
1304 dev_err(dev, "%s: could not enable: %i\n",
1305 __func__, error);
1306
1307 reset_control_deassert(ddata->rsts);
1308
1309 return 0;
1310}
1311
1312static int __maybe_unused sysc_runtime_suspend(struct device *dev)
1313{
1314 struct sysc *ddata;
1315 int error = 0;
1316
1317 ddata = dev_get_drvdata(dev);
1318
1319 if (!ddata->enabled)
1320 return 0;
1321
1322 sysc_clkdm_deny_idle(ddata);
1323
1324 if (ddata->legacy_mode) {
1325 error = sysc_runtime_suspend_legacy(dev, ddata);
1326 if (error)
1327 goto err_allow_idle;
1328 } else {
1329 error = sysc_disable_module(dev);
1330 if (error)
1331 goto err_allow_idle;
1332 }
1333
1334 sysc_disable_main_clocks(ddata);
1335
1336 if (sysc_opt_clks_needed(ddata))
1337 sysc_disable_opt_clocks(ddata);
1338
1339 ddata->enabled = false;
1340
1341err_allow_idle:
1342 sysc_clkdm_allow_idle(ddata);
1343
1344 reset_control_assert(ddata->rsts);
1345
1346 return error;
1347}
1348
1349static int __maybe_unused sysc_runtime_resume(struct device *dev)
1350{
1351 struct sysc *ddata;
1352 int error = 0;
1353
1354 ddata = dev_get_drvdata(dev);
1355
1356 if (ddata->enabled)
1357 return 0;
1358
1359
1360 sysc_clkdm_deny_idle(ddata);
1361
1362 if (sysc_opt_clks_needed(ddata)) {
1363 error = sysc_enable_opt_clocks(ddata);
1364 if (error)
1365 goto err_allow_idle;
1366 }
1367
1368 error = sysc_enable_main_clocks(ddata);
1369 if (error)
1370 goto err_opt_clocks;
1371
1372 reset_control_deassert(ddata->rsts);
1373
1374 if (ddata->legacy_mode) {
1375 error = sysc_runtime_resume_legacy(dev, ddata);
1376 if (error)
1377 goto err_main_clocks;
1378 } else {
1379 error = sysc_enable_module(dev);
1380 if (error)
1381 goto err_main_clocks;
1382 }
1383
1384 ddata->enabled = true;
1385
1386 sysc_clkdm_allow_idle(ddata);
1387
1388 return 0;
1389
1390err_main_clocks:
1391 sysc_disable_main_clocks(ddata);
1392err_opt_clocks:
1393 if (sysc_opt_clks_needed(ddata))
1394 sysc_disable_opt_clocks(ddata);
1395err_allow_idle:
1396 sysc_clkdm_allow_idle(ddata);
1397
1398 return error;
1399}
1400
1401/*
1402 * Checks if device context was lost. Assumes the sysconfig register value
1403 * after lost context is different from the configured value. Only works for
1404 * enabled devices.
1405 *
1406 * Eventually we may want to also add support to using the context lost
1407 * registers that some SoCs have.
1408 */
1409static int sysc_check_context(struct sysc *ddata)
1410{
1411 u32 reg;
1412
1413 if (!ddata->enabled)
1414 return -ENODATA;
1415
1416 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1417 if (reg == ddata->sysconfig)
1418 return 0;
1419
1420 return -EACCES;
1421}
1422
1423static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
1424{
1425 struct device *dev = ddata->dev;
1426 int error;
1427
1428 if (ddata->enabled) {
1429 /* Nothing to do if enabled and context not lost */
1430 error = sysc_check_context(ddata);
1431 if (!error)
1432 return 0;
1433
1434 /* Disable target module if it is enabled */
1435 error = sysc_runtime_suspend(dev);
1436 if (error)
1437 dev_warn(dev, "reinit suspend failed: %i\n", error);
1438 }
1439
1440 /* Enable target module */
1441 error = sysc_runtime_resume(dev);
1442 if (error)
1443 dev_warn(dev, "reinit resume failed: %i\n", error);
1444
1445 /* Some modules like am335x gpmc need reset and restore of sysconfig */
1446 if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
1447 error = sysc_reset(ddata);
1448 if (error)
1449 dev_warn(dev, "reinit reset failed: %i\n", error);
1450
1451 sysc_write_sysconfig(ddata, ddata->sysconfig);
1452 }
1453
1454 if (leave_enabled)
1455 return error;
1456
1457 /* Disable target module if no leave_enabled was set */
1458 error = sysc_runtime_suspend(dev);
1459 if (error)
1460 dev_warn(dev, "reinit suspend failed: %i\n", error);
1461
1462 return error;
1463}
1464
1465static int __maybe_unused sysc_noirq_suspend(struct device *dev)
1466{
1467 struct sysc *ddata;
1468
1469 ddata = dev_get_drvdata(dev);
1470
1471 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
1472 return 0;
1473
1474 if (!ddata->enabled)
1475 return 0;
1476
1477 ddata->needs_resume = 1;
1478
1479 return sysc_runtime_suspend(dev);
1480}
1481
1482static int __maybe_unused sysc_noirq_resume(struct device *dev)
1483{
1484 struct sysc *ddata;
1485 int error = 0;
1486
1487 ddata = dev_get_drvdata(dev);
1488
1489 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
1490 return 0;
1491
1492 if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
1493 error = sysc_reinit_module(ddata, ddata->needs_resume);
1494 if (error)
1495 dev_warn(dev, "noirq_resume failed: %i\n", error);
1496 } else if (ddata->needs_resume) {
1497 error = sysc_runtime_resume(dev);
1498 if (error)
1499 dev_warn(dev, "noirq_resume failed: %i\n", error);
1500 }
1501
1502 ddata->needs_resume = 0;
1503
1504 return error;
1505}
1506
1507static const struct dev_pm_ops sysc_pm_ops = {
1508 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
1509 SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
1510 sysc_runtime_resume,
1511 NULL)
1512};
1513
1514/* Module revision register based quirks */
1515struct sysc_revision_quirk {
1516 const char *name;
1517 u32 base;
1518 int rev_offset;
1519 int sysc_offset;
1520 int syss_offset;
1521 u32 revision;
1522 u32 revision_mask;
1523 u32 quirks;
1524};
1525
1526#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
1527 optrev_val, optrevmask, optquirkmask) \
1528 { \
1529 .name = (optname), \
1530 .base = (optbase), \
1531 .rev_offset = (optrev), \
1532 .sysc_offset = (optsysc), \
1533 .syss_offset = (optsyss), \
1534 .revision = (optrev_val), \
1535 .revision_mask = (optrevmask), \
1536 .quirks = (optquirkmask), \
1537 }
1538
1539static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1540 /* Quirks that need to be set based on the module address */
1541 SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
1542 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
1543 SYSC_QUIRK_SWSUP_SIDLE),
1544
1545 /* Quirks that need to be set based on detected module */
1546 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
1547 SYSC_MODULE_QUIRK_AESS),
1548 /* Errata i893 handling for dra7 dcan1 and 2 */
1549 SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1550 SYSC_QUIRK_CLKDM_NOAUTO),
1551 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1552 SYSC_QUIRK_CLKDM_NOAUTO),
1553 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
1554 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1555 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
1556 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1557 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
1558 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1559 SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1560 SYSC_QUIRK_CLKDM_NOAUTO),
1561 SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1562 SYSC_QUIRK_CLKDM_NOAUTO),
1563 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
1564 SYSC_QUIRK_OPT_CLKS_IN_RESET),
1565 SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
1566 SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
1567 SYSC_QUIRK_GPMC_DEBUG),
1568 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
1569 SYSC_QUIRK_OPT_CLKS_NEEDED),
1570 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1571 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1572 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1573 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1574 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1575 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1576 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1577 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1578 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1579 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1580 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1581 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1582 SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
1583 SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
1584 SYSC_MODULE_QUIRK_SGX),
1585 SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
1586 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1587 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
1588 SYSC_QUIRK_SWSUP_SIDLE),
1589 SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
1590 SYSC_MODULE_QUIRK_RTC_UNLOCK),
1591 SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
1592 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1593 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
1594 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1595 SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
1596 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1597 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
1598 SYSC_QUIRK_SWSUP_SIDLE_ACT),
1599 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
1600 SYSC_QUIRK_SWSUP_SIDLE_ACT),
1601 /* Uarts on omap4 and later */
1602 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1603 SYSC_QUIRK_SWSUP_SIDLE_ACT),
1604 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
1605 SYSC_QUIRK_SWSUP_SIDLE_ACT),
1606 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
1607 SYSC_QUIRK_SWSUP_SIDLE_ACT),
1608 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
1609 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1610 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
1611 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1612 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000033,
1613 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1614 SYSC_MODULE_QUIRK_OTG),
1615 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000040,
1616 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1617 SYSC_MODULE_QUIRK_OTG),
1618 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1619 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1620 SYSC_MODULE_QUIRK_OTG),
1621 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1622 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1623 SYSC_QUIRK_REINIT_ON_CTX_LOST),
1624 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1625 SYSC_MODULE_QUIRK_WDT),
1626 /* PRUSS on am3, am4 and am5 */
1627 SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
1628 SYSC_MODULE_QUIRK_PRUSS),
1629 /* Watchdog on am3 and am4 */
1630 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1631 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
1632
1633#ifdef DEBUG
1634 SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
1635 SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
1636 SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
1637 SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1638 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1639 0xffff00f0, 0),
1640 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
1641 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
1642 SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1643 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1644 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
1645 SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
1646 SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1647 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1648 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1649 SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1650 SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
1651 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1652 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1653 SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1654 SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
1655 SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
1656 SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
1657 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
1658 SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
1659 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
1660 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
1661 SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
1662 SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1663 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
1664 SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
1665 SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
1666 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
1667 SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
1668 SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
1669 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
1670 SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
1671 SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
1672 SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
1673 SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1674 SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1675 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
1676 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
1677 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
1678 SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1679 SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1680 SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1681 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
1682 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
1683 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
1684 SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
1685 SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
1686 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
1687 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
1688 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
1689 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
1690 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
1691 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
1692 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
1693 SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
1694 SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
1695 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
1696 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
1697 /* Some timers on omap4 and later */
1698 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
1699 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
1700 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
1701 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
1702 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
1703 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
1704 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
1705 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
1706 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
1707 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
1708#endif
1709};
1710
1711/*
1712 * Early quirks based on module base and register offsets only that are
1713 * needed before the module revision can be read
1714 */
1715static void sysc_init_early_quirks(struct sysc *ddata)
1716{
1717 const struct sysc_revision_quirk *q;
1718 int i;
1719
1720 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1721 q = &sysc_revision_quirks[i];
1722
1723 if (!q->base)
1724 continue;
1725
1726 if (q->base != ddata->module_pa)
1727 continue;
1728
1729 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1730 continue;
1731
1732 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1733 continue;
1734
1735 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1736 continue;
1737
1738 ddata->name = q->name;
1739 ddata->cfg.quirks |= q->quirks;
1740 }
1741}
1742
1743/* Quirks that also consider the revision register value */
1744static void sysc_init_revision_quirks(struct sysc *ddata)
1745{
1746 const struct sysc_revision_quirk *q;
1747 int i;
1748
1749 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1750 q = &sysc_revision_quirks[i];
1751
1752 if (q->base && q->base != ddata->module_pa)
1753 continue;
1754
1755 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1756 continue;
1757
1758 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1759 continue;
1760
1761 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1762 continue;
1763
1764 if (q->revision == ddata->revision ||
1765 (q->revision & q->revision_mask) ==
1766 (ddata->revision & q->revision_mask)) {
1767 ddata->name = q->name;
1768 ddata->cfg.quirks |= q->quirks;
1769 }
1770 }
1771}
1772
1773/*
1774 * DSS needs dispc outputs disabled to reset modules. Returns mask of
1775 * enabled DSS interrupts. Eventually we may be able to do this on
1776 * dispc init rather than top-level DSS init.
1777 */
1778static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1779 bool disable)
1780{
1781 bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
1782 const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
1783 int manager_count;
1784 bool framedonetv_irq = true;
1785 u32 val, irq_mask = 0;
1786
1787 switch (sysc_soc->soc) {
1788 case SOC_2420 ... SOC_3630:
1789 manager_count = 2;
1790 framedonetv_irq = false;
1791 break;
1792 case SOC_4430 ... SOC_4470:
1793 manager_count = 3;
1794 break;
1795 case SOC_5430:
1796 case SOC_DRA7:
1797 manager_count = 4;
1798 break;
1799 case SOC_AM4:
1800 manager_count = 1;
1801 framedonetv_irq = false;
1802 break;
1803 case SOC_UNKNOWN:
1804 default:
1805 return 0;
1806 }
1807
1808 /* Remap the whole module range to be able to reset dispc outputs */
1809 devm_iounmap(ddata->dev, ddata->module_va);
1810 ddata->module_va = devm_ioremap(ddata->dev,
1811 ddata->module_pa,
1812 ddata->module_size);
1813 if (!ddata->module_va)
1814 return -EIO;
1815
1816 /* DISP_CONTROL, shut down lcd and digit on disable if enabled */
1817 val = sysc_read(ddata, dispc_offset + 0x40);
1818 lcd_en = val & lcd_en_mask;
1819 digit_en = val & digit_en_mask;
1820 if (lcd_en)
1821 irq_mask |= BIT(0); /* FRAMEDONE */
1822 if (digit_en) {
1823 if (framedonetv_irq)
1824 irq_mask |= BIT(24); /* FRAMEDONETV */
1825 else
1826 irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
1827 }
1828 if (disable && (lcd_en || digit_en))
1829 sysc_write(ddata, dispc_offset + 0x40,
1830 val & ~(lcd_en_mask | digit_en_mask));
1831
1832 if (manager_count <= 2)
1833 return irq_mask;
1834
1835 /* DISPC_CONTROL2 */
1836 val = sysc_read(ddata, dispc_offset + 0x238);
1837 lcd2_en = val & lcd_en_mask;
1838 if (lcd2_en)
1839 irq_mask |= BIT(22); /* FRAMEDONE2 */
1840 if (disable && lcd2_en)
1841 sysc_write(ddata, dispc_offset + 0x238,
1842 val & ~lcd_en_mask);
1843
1844 if (manager_count <= 3)
1845 return irq_mask;
1846
1847 /* DISPC_CONTROL3 */
1848 val = sysc_read(ddata, dispc_offset + 0x848);
1849 lcd3_en = val & lcd_en_mask;
1850 if (lcd3_en)
1851 irq_mask |= BIT(30); /* FRAMEDONE3 */
1852 if (disable && lcd3_en)
1853 sysc_write(ddata, dispc_offset + 0x848,
1854 val & ~lcd_en_mask);
1855
1856 return irq_mask;
1857}
1858
1859/* DSS needs child outputs disabled and SDI registers cleared for reset */
1860static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
1861{
1862 const int dispc_offset = 0x1000;
1863 int error;
1864 u32 irq_mask, val;
1865
1866 /* Get enabled outputs */
1867 irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
1868 if (!irq_mask)
1869 return;
1870
1871 /* Clear IRQSTATUS */
1872 sysc_write(ddata, dispc_offset + 0x18, irq_mask);
1873
1874 /* Disable outputs */
1875 val = sysc_quirk_dispc(ddata, dispc_offset, true);
1876
1877 /* Poll IRQSTATUS */
1878 error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
1879 val, val != irq_mask, 100, 50);
1880 if (error)
1881 dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
1882 __func__, val, irq_mask);
1883
1884 if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
1885 /* Clear DSS_SDI_CONTROL */
1886 sysc_write(ddata, 0x44, 0);
1887
1888 /* Clear DSS_PLL_CONTROL */
1889 sysc_write(ddata, 0x48, 0);
1890 }
1891
1892 /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
1893 sysc_write(ddata, 0x40, 0);
1894}
1895
1896/* 1-wire needs module's internal clocks enabled for reset */
1897static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1898{
1899 int offset = 0x0c; /* HDQ_CTRL_STATUS */
1900 u16 val;
1901
1902 val = sysc_read(ddata, offset);
1903 val |= BIT(5);
1904 sysc_write(ddata, offset, val);
1905}
1906
1907/* AESS (Audio Engine SubSystem) needs autogating set after enable */
1908static void sysc_module_enable_quirk_aess(struct sysc *ddata)
1909{
1910 int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
1911
1912 sysc_write(ddata, offset, 1);
1913}
1914
1915/* I2C needs to be disabled for reset */
1916static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
1917{
1918 int offset;
1919 u16 val;
1920
1921 /* I2C_CON, omap2/3 is different from omap4 and later */
1922 if ((ddata->revision & 0xffffff00) == 0x001f0000)
1923 offset = 0x24;
1924 else
1925 offset = 0xa4;
1926
1927 /* I2C_EN */
1928 val = sysc_read(ddata, offset);
1929 if (enable)
1930 val |= BIT(15);
1931 else
1932 val &= ~BIT(15);
1933 sysc_write(ddata, offset, val);
1934}
1935
1936static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
1937{
1938 sysc_clk_quirk_i2c(ddata, false);
1939}
1940
1941static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
1942{
1943 sysc_clk_quirk_i2c(ddata, true);
1944}
1945
1946/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
1947static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
1948{
1949 u32 val, kick0_val = 0, kick1_val = 0;
1950 unsigned long flags;
1951 int error;
1952
1953 if (!lock) {
1954 kick0_val = 0x83e70b13;
1955 kick1_val = 0x95a4f1e0;
1956 }
1957
1958 local_irq_save(flags);
1959 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
1960 error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
1961 !(val & BIT(0)), 100, 50);
1962 if (error)
1963 dev_warn(ddata->dev, "rtc busy timeout\n");
1964 /* Now we have ~15 microseconds to read/write various registers */
1965 sysc_write(ddata, 0x6c, kick0_val);
1966 sysc_write(ddata, 0x70, kick1_val);
1967 local_irq_restore(flags);
1968}
1969
1970static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
1971{
1972 sysc_quirk_rtc(ddata, false);
1973}
1974
1975static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
1976{
1977 sysc_quirk_rtc(ddata, true);
1978}
1979
1980/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
1981static void sysc_module_enable_quirk_otg(struct sysc *ddata)
1982{
1983 int offset = 0x414; /* OTG_FORCESTDBY */
1984
1985 sysc_write(ddata, offset, 0);
1986}
1987
1988static void sysc_module_disable_quirk_otg(struct sysc *ddata)
1989{
1990 int offset = 0x414; /* OTG_FORCESTDBY */
1991 u32 val = BIT(0); /* ENABLEFORCE */
1992
1993 sysc_write(ddata, offset, val);
1994}
1995
1996/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
1997static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
1998{
1999 int offset = 0xff08; /* OCP_DEBUG_CONFIG */
2000 u32 val = BIT(31); /* THALIA_INT_BYPASS */
2001
2002 sysc_write(ddata, offset, val);
2003}
2004
2005/* Watchdog timer needs a disable sequence after reset */
2006static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
2007{
2008 int wps, spr, error;
2009 u32 val;
2010
2011 wps = 0x34;
2012 spr = 0x48;
2013
2014 sysc_write(ddata, spr, 0xaaaa);
2015 error = readl_poll_timeout(ddata->module_va + wps, val,
2016 !(val & 0x10), 100,
2017 MAX_MODULE_SOFTRESET_WAIT);
2018 if (error)
2019 dev_warn(ddata->dev, "wdt disable step1 failed\n");
2020
2021 sysc_write(ddata, spr, 0x5555);
2022 error = readl_poll_timeout(ddata->module_va + wps, val,
2023 !(val & 0x10), 100,
2024 MAX_MODULE_SOFTRESET_WAIT);
2025 if (error)
2026 dev_warn(ddata->dev, "wdt disable step2 failed\n");
2027}
2028
2029/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
2030static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
2031{
2032 u32 reg;
2033
2034 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
2035 reg |= SYSC_PRUSS_STANDBY_INIT;
2036 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
2037}
2038
2039static void sysc_init_module_quirks(struct sysc *ddata)
2040{
2041 if (ddata->legacy_mode || !ddata->name)
2042 return;
2043
2044 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
2045 ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
2046
2047 return;
2048 }
2049
2050#ifdef CONFIG_OMAP_GPMC_DEBUG
2051 if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
2052 ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
2053
2054 return;
2055 }
2056#endif
2057
2058 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
2059 ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
2060 ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
2061
2062 return;
2063 }
2064
2065 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
2066 ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
2067
2068 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
2069 ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
2070
2071 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
2072 ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
2073 ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
2074
2075 return;
2076 }
2077
2078 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
2079 ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
2080 ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
2081 }
2082
2083 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
2084 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
2085
2086 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
2087 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
2088 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
2089 }
2090
2091 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
2092 ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
2093}
2094
2095static int sysc_clockdomain_init(struct sysc *ddata)
2096{
2097 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2098 struct clk *fck = NULL, *ick = NULL;
2099 int error;
2100
2101 if (!pdata || !pdata->init_clockdomain)
2102 return 0;
2103
2104 switch (ddata->nr_clocks) {
2105 case 2:
2106 ick = ddata->clocks[SYSC_ICK];
2107 fallthrough;
2108 case 1:
2109 fck = ddata->clocks[SYSC_FCK];
2110 break;
2111 case 0:
2112 return 0;
2113 }
2114
2115 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
2116 if (!error || error == -ENODEV)
2117 return 0;
2118
2119 return error;
2120}
2121
2122/*
2123 * Note that pdata->init_module() typically does a reset first. After
2124 * pdata->init_module() is done, PM runtime can be used for the interconnect
2125 * target module.
2126 */
2127static int sysc_legacy_init(struct sysc *ddata)
2128{
2129 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2130 int error;
2131
2132 if (!pdata || !pdata->init_module)
2133 return 0;
2134
2135 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
2136 if (error == -EEXIST)
2137 error = 0;
2138
2139 return error;
2140}
2141
2142/*
2143 * Note that the caller must ensure the interconnect target module is enabled
2144 * before calling reset. Otherwise reset will not complete.
2145 */
2146static int sysc_reset(struct sysc *ddata)
2147{
2148 int sysc_offset, sysc_val, error;
2149 u32 sysc_mask;
2150
2151 sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
2152
2153 if (ddata->legacy_mode ||
2154 ddata->cap->regbits->srst_shift < 0)
2155 return 0;
2156
2157 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
2158
2159 if (ddata->pre_reset_quirk)
2160 ddata->pre_reset_quirk(ddata);
2161
2162 if (sysc_offset >= 0) {
2163 sysc_val = sysc_read_sysconfig(ddata);
2164 sysc_val |= sysc_mask;
2165 sysc_write(ddata, sysc_offset, sysc_val);
2166
2167 /*
2168 * Some devices need a delay before reading registers
2169 * after reset. Presumably a srst_udelay is not needed
2170 * for devices that use a rstctrl register reset.
2171 */
2172 if (ddata->cfg.srst_udelay)
2173 fsleep(ddata->cfg.srst_udelay);
2174
2175 /*
2176 * Flush posted write. For devices needing srst_udelay
2177 * this should trigger an interconnect error if the
2178 * srst_udelay value is needed but not configured.
2179 */
2180 sysc_val = sysc_read_sysconfig(ddata);
2181 }
2182
2183 if (ddata->post_reset_quirk)
2184 ddata->post_reset_quirk(ddata);
2185
2186 error = sysc_wait_softreset(ddata);
2187 if (error)
2188 dev_warn(ddata->dev, "OCP softreset timed out\n");
2189
2190 if (ddata->reset_done_quirk)
2191 ddata->reset_done_quirk(ddata);
2192
2193 return error;
2194}
2195
2196/*
2197 * At this point the module is configured enough to read the revision but
2198 * module may not be completely configured yet to use PM runtime. Enable
2199 * all clocks directly during init to configure the quirks needed for PM
2200 * runtime based on the revision register.
2201 */
2202static int sysc_init_module(struct sysc *ddata)
2203{
2204 bool rstctrl_deasserted = false;
2205 int error = 0;
2206
2207 error = sysc_clockdomain_init(ddata);
2208 if (error)
2209 return error;
2210
2211 sysc_clkdm_deny_idle(ddata);
2212
2213 /*
2214 * Always enable clocks. The bootloader may or may not have enabled
2215 * the related clocks.
2216 */
2217 error = sysc_enable_opt_clocks(ddata);
2218 if (error)
2219 return error;
2220
2221 error = sysc_enable_main_clocks(ddata);
2222 if (error)
2223 goto err_opt_clocks;
2224
2225 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
2226 error = reset_control_deassert(ddata->rsts);
2227 if (error)
2228 goto err_main_clocks;
2229 rstctrl_deasserted = true;
2230 }
2231
2232 ddata->revision = sysc_read_revision(ddata);
2233 sysc_init_revision_quirks(ddata);
2234 sysc_init_module_quirks(ddata);
2235
2236 if (ddata->legacy_mode) {
2237 error = sysc_legacy_init(ddata);
2238 if (error)
2239 goto err_main_clocks;
2240 }
2241
2242 if (!ddata->legacy_mode) {
2243 error = sysc_enable_module(ddata->dev);
2244 if (error)
2245 goto err_main_clocks;
2246 }
2247
2248 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
2249 error = sysc_reset(ddata);
2250 if (error)
2251 dev_err(ddata->dev, "Reset failed with %d\n", error);
2252
2253 if (error && !ddata->legacy_mode)
2254 sysc_disable_module(ddata->dev);
2255 }
2256
2257err_main_clocks:
2258 if (error)
2259 sysc_disable_main_clocks(ddata);
2260err_opt_clocks:
2261 /* No re-enable of clockdomain autoidle to prevent module autoidle */
2262 if (error) {
2263 sysc_disable_opt_clocks(ddata);
2264 sysc_clkdm_allow_idle(ddata);
2265 }
2266
2267 if (error && rstctrl_deasserted &&
2268 !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
2269 reset_control_assert(ddata->rsts);
2270
2271 return error;
2272}
2273
2274static int sysc_init_sysc_mask(struct sysc *ddata)
2275{
2276 struct device_node *np = ddata->dev->of_node;
2277 int error;
2278 u32 val;
2279
2280 error = of_property_read_u32(np, "ti,sysc-mask", &val);
2281 if (error)
2282 return 0;
2283
2284 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
2285
2286 return 0;
2287}
2288
2289static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
2290 const char *name)
2291{
2292 struct device_node *np = ddata->dev->of_node;
2293 u32 val;
2294
2295 of_property_for_each_u32(np, name, val) {
2296 if (val >= SYSC_NR_IDLEMODES) {
2297 dev_err(ddata->dev, "invalid idlemode: %i\n", val);
2298 return -EINVAL;
2299 }
2300 *idlemodes |= (1 << val);
2301 }
2302
2303 return 0;
2304}
2305
2306static int sysc_init_idlemodes(struct sysc *ddata)
2307{
2308 int error;
2309
2310 error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
2311 "ti,sysc-midle");
2312 if (error)
2313 return error;
2314
2315 error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
2316 "ti,sysc-sidle");
2317 if (error)
2318 return error;
2319
2320 return 0;
2321}
2322
2323/*
2324 * Only some devices on omap4 and later have SYSCONFIG reset done
2325 * bit. We can detect this if there is no SYSSTATUS at all, or the
2326 * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
2327 * have multiple bits for the child devices like OHCI and EHCI.
2328 * Depends on SYSC being parsed first.
2329 */
2330static int sysc_init_syss_mask(struct sysc *ddata)
2331{
2332 struct device_node *np = ddata->dev->of_node;
2333 int error;
2334 u32 val;
2335
2336 error = of_property_read_u32(np, "ti,syss-mask", &val);
2337 if (error) {
2338 if ((ddata->cap->type == TI_SYSC_OMAP4 ||
2339 ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
2340 (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2341 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2342
2343 return 0;
2344 }
2345
2346 if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2347 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2348
2349 ddata->cfg.syss_mask = val;
2350
2351 return 0;
2352}
2353
2354/*
2355 * Many child device drivers need to have fck and opt clocks available
2356 * to get the clock rate for device internal configuration etc.
2357 */
2358static int sysc_child_add_named_clock(struct sysc *ddata,
2359 struct device *child,
2360 const char *name)
2361{
2362 struct clk *clk;
2363 struct clk_lookup *l;
2364 int error = 0;
2365
2366 if (!name)
2367 return 0;
2368
2369 clk = clk_get(child, name);
2370 if (!IS_ERR(clk)) {
2371 error = -EEXIST;
2372 goto put_clk;
2373 }
2374
2375 clk = clk_get(ddata->dev, name);
2376 if (IS_ERR(clk))
2377 return -ENODEV;
2378
2379 l = clkdev_create(clk, name, dev_name(child));
2380 if (!l)
2381 error = -ENOMEM;
2382put_clk:
2383 clk_put(clk);
2384
2385 return error;
2386}
2387
2388static int sysc_child_add_clocks(struct sysc *ddata,
2389 struct device *child)
2390{
2391 int i, error;
2392
2393 for (i = 0; i < ddata->nr_clocks; i++) {
2394 error = sysc_child_add_named_clock(ddata,
2395 child,
2396 ddata->clock_roles[i]);
2397 if (error && error != -EEXIST) {
2398 dev_err(ddata->dev, "could not add child clock %s: %i\n",
2399 ddata->clock_roles[i], error);
2400
2401 return error;
2402 }
2403 }
2404
2405 return 0;
2406}
2407
2408static const struct device_type sysc_device_type = {
2409};
2410
2411static struct sysc *sysc_child_to_parent(struct device *dev)
2412{
2413 struct device *parent = dev->parent;
2414
2415 if (!parent || parent->type != &sysc_device_type)
2416 return NULL;
2417
2418 return dev_get_drvdata(parent);
2419}
2420
2421static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
2422{
2423 struct sysc *ddata;
2424 int error;
2425
2426 ddata = sysc_child_to_parent(dev);
2427
2428 error = pm_generic_runtime_suspend(dev);
2429 if (error)
2430 return error;
2431
2432 if (!ddata->enabled)
2433 return 0;
2434
2435 return sysc_runtime_suspend(ddata->dev);
2436}
2437
2438static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
2439{
2440 struct sysc *ddata;
2441 int error;
2442
2443 ddata = sysc_child_to_parent(dev);
2444
2445 if (!ddata->enabled) {
2446 error = sysc_runtime_resume(ddata->dev);
2447 if (error < 0)
2448 dev_err(ddata->dev,
2449 "%s error: %i\n", __func__, error);
2450 }
2451
2452 return pm_generic_runtime_resume(dev);
2453}
2454
2455/* Caller needs to take list_lock if ever used outside of cpu_pm */
2456static void sysc_reinit_modules(struct sysc_soc_info *soc)
2457{
2458 struct sysc_module *module;
2459 struct sysc *ddata;
2460
2461 list_for_each_entry(module, &sysc_soc->restored_modules, node) {
2462 ddata = module->ddata;
2463 sysc_reinit_module(ddata, ddata->enabled);
2464 }
2465}
2466
2467/**
2468 * sysc_context_notifier - optionally reset and restore module after idle
2469 * @nb: notifier block
2470 * @cmd: unused
2471 * @v: unused
2472 *
2473 * Some interconnect target modules need to be restored, or reset and restored
2474 * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
2475 * OTG and GPMC target modules even if the modules are unused.
2476 */
2477static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
2478 void *v)
2479{
2480 struct sysc_soc_info *soc;
2481
2482 soc = container_of(nb, struct sysc_soc_info, nb);
2483
2484 switch (cmd) {
2485 case CPU_CLUSTER_PM_ENTER:
2486 break;
2487 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
2488 break;
2489 case CPU_CLUSTER_PM_EXIT:
2490 sysc_reinit_modules(soc);
2491 break;
2492 }
2493
2494 return NOTIFY_OK;
2495}
2496
2497/**
2498 * sysc_add_restored - optionally add reset and restore quirk hanlling
2499 * @ddata: device data
2500 */
2501static void sysc_add_restored(struct sysc *ddata)
2502{
2503 struct sysc_module *restored_module;
2504
2505 restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
2506 if (!restored_module)
2507 return;
2508
2509 restored_module->ddata = ddata;
2510
2511 mutex_lock(&sysc_soc->list_lock);
2512
2513 list_add(&restored_module->node, &sysc_soc->restored_modules);
2514
2515 if (sysc_soc->nb.notifier_call)
2516 goto out_unlock;
2517
2518 sysc_soc->nb.notifier_call = sysc_context_notifier;
2519 cpu_pm_register_notifier(&sysc_soc->nb);
2520
2521out_unlock:
2522 mutex_unlock(&sysc_soc->list_lock);
2523}
2524
2525static int sysc_notifier_call(struct notifier_block *nb,
2526 unsigned long event, void *device)
2527{
2528 struct device *dev = device;
2529 struct sysc *ddata;
2530 int error;
2531
2532 ddata = sysc_child_to_parent(dev);
2533 if (!ddata)
2534 return NOTIFY_DONE;
2535
2536 switch (event) {
2537 case BUS_NOTIFY_ADD_DEVICE:
2538 error = sysc_child_add_clocks(ddata, dev);
2539 if (error)
2540 return error;
2541 break;
2542 default:
2543 break;
2544 }
2545
2546 return NOTIFY_DONE;
2547}
2548
2549static struct notifier_block sysc_nb = {
2550 .notifier_call = sysc_notifier_call,
2551};
2552
2553/* Device tree configured quirks */
2554struct sysc_dts_quirk {
2555 const char *name;
2556 u32 mask;
2557};
2558
2559static const struct sysc_dts_quirk sysc_dts_quirks[] = {
2560 { .name = "ti,no-idle-on-init",
2561 .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
2562 { .name = "ti,no-reset-on-init",
2563 .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
2564 { .name = "ti,no-idle",
2565 .mask = SYSC_QUIRK_NO_IDLE, },
2566};
2567
2568static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
2569 bool is_child)
2570{
2571 int i;
2572
2573 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
2574 const char *name = sysc_dts_quirks[i].name;
2575
2576 if (!of_property_present(np, name))
2577 continue;
2578
2579 ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
2580 if (is_child) {
2581 dev_warn(ddata->dev,
2582 "dts flag should be at module level for %s\n",
2583 name);
2584 }
2585 }
2586}
2587
2588static int sysc_init_dts_quirks(struct sysc *ddata)
2589{
2590 struct device_node *np = ddata->dev->of_node;
2591 int error;
2592 u32 val;
2593
2594 ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
2595
2596 sysc_parse_dts_quirks(ddata, np, false);
2597 error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
2598 if (!error) {
2599 if (val > 255) {
2600 dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
2601 val);
2602 }
2603
2604 ddata->cfg.srst_udelay = (u8)val;
2605 }
2606
2607 return 0;
2608}
2609
2610static void sysc_unprepare(struct sysc *ddata)
2611{
2612 int i;
2613
2614 if (!ddata->clocks)
2615 return;
2616
2617 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
2618 if (!IS_ERR_OR_NULL(ddata->clocks[i]))
2619 clk_unprepare(ddata->clocks[i]);
2620 }
2621}
2622
2623/*
2624 * Common sysc register bits found on omap2, also known as type1
2625 */
2626static const struct sysc_regbits sysc_regbits_omap2 = {
2627 .dmadisable_shift = -ENODEV,
2628 .midle_shift = 12,
2629 .sidle_shift = 3,
2630 .clkact_shift = 8,
2631 .emufree_shift = 5,
2632 .enwkup_shift = 2,
2633 .srst_shift = 1,
2634 .autoidle_shift = 0,
2635};
2636
2637static const struct sysc_capabilities sysc_omap2 = {
2638 .type = TI_SYSC_OMAP2,
2639 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2640 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2641 SYSC_OMAP2_AUTOIDLE,
2642 .regbits = &sysc_regbits_omap2,
2643};
2644
2645/* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
2646static const struct sysc_capabilities sysc_omap2_timer = {
2647 .type = TI_SYSC_OMAP2_TIMER,
2648 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2649 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2650 SYSC_OMAP2_AUTOIDLE,
2651 .regbits = &sysc_regbits_omap2,
2652 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
2653};
2654
2655/*
2656 * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
2657 * with different sidle position
2658 */
2659static const struct sysc_regbits sysc_regbits_omap3_sham = {
2660 .dmadisable_shift = -ENODEV,
2661 .midle_shift = -ENODEV,
2662 .sidle_shift = 4,
2663 .clkact_shift = -ENODEV,
2664 .enwkup_shift = -ENODEV,
2665 .srst_shift = 1,
2666 .autoidle_shift = 0,
2667 .emufree_shift = -ENODEV,
2668};
2669
2670static const struct sysc_capabilities sysc_omap3_sham = {
2671 .type = TI_SYSC_OMAP3_SHAM,
2672 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2673 .regbits = &sysc_regbits_omap3_sham,
2674};
2675
2676/*
2677 * AES register bits found on omap3 and later, a variant of
2678 * sysc_regbits_omap2 with different sidle position
2679 */
2680static const struct sysc_regbits sysc_regbits_omap3_aes = {
2681 .dmadisable_shift = -ENODEV,
2682 .midle_shift = -ENODEV,
2683 .sidle_shift = 6,
2684 .clkact_shift = -ENODEV,
2685 .enwkup_shift = -ENODEV,
2686 .srst_shift = 1,
2687 .autoidle_shift = 0,
2688 .emufree_shift = -ENODEV,
2689};
2690
2691static const struct sysc_capabilities sysc_omap3_aes = {
2692 .type = TI_SYSC_OMAP3_AES,
2693 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2694 .regbits = &sysc_regbits_omap3_aes,
2695};
2696
2697/*
2698 * Common sysc register bits found on omap4, also known as type2
2699 */
2700static const struct sysc_regbits sysc_regbits_omap4 = {
2701 .dmadisable_shift = 16,
2702 .midle_shift = 4,
2703 .sidle_shift = 2,
2704 .clkact_shift = -ENODEV,
2705 .enwkup_shift = -ENODEV,
2706 .emufree_shift = 1,
2707 .srst_shift = 0,
2708 .autoidle_shift = -ENODEV,
2709};
2710
2711static const struct sysc_capabilities sysc_omap4 = {
2712 .type = TI_SYSC_OMAP4,
2713 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2714 SYSC_OMAP4_SOFTRESET,
2715 .regbits = &sysc_regbits_omap4,
2716};
2717
2718static const struct sysc_capabilities sysc_omap4_timer = {
2719 .type = TI_SYSC_OMAP4_TIMER,
2720 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2721 SYSC_OMAP4_SOFTRESET,
2722 .regbits = &sysc_regbits_omap4,
2723};
2724
2725/*
2726 * Common sysc register bits found on omap4, also known as type3
2727 */
2728static const struct sysc_regbits sysc_regbits_omap4_simple = {
2729 .dmadisable_shift = -ENODEV,
2730 .midle_shift = 2,
2731 .sidle_shift = 0,
2732 .clkact_shift = -ENODEV,
2733 .enwkup_shift = -ENODEV,
2734 .srst_shift = -ENODEV,
2735 .emufree_shift = -ENODEV,
2736 .autoidle_shift = -ENODEV,
2737};
2738
2739static const struct sysc_capabilities sysc_omap4_simple = {
2740 .type = TI_SYSC_OMAP4_SIMPLE,
2741 .regbits = &sysc_regbits_omap4_simple,
2742};
2743
2744/*
2745 * SmartReflex sysc found on omap34xx
2746 */
2747static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
2748 .dmadisable_shift = -ENODEV,
2749 .midle_shift = -ENODEV,
2750 .sidle_shift = -ENODEV,
2751 .clkact_shift = 20,
2752 .enwkup_shift = -ENODEV,
2753 .srst_shift = -ENODEV,
2754 .emufree_shift = -ENODEV,
2755 .autoidle_shift = -ENODEV,
2756};
2757
2758static const struct sysc_capabilities sysc_34xx_sr = {
2759 .type = TI_SYSC_OMAP34XX_SR,
2760 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
2761 .regbits = &sysc_regbits_omap34xx_sr,
2762 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED,
2763};
2764
2765/*
2766 * SmartReflex sysc found on omap36xx and later
2767 */
2768static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
2769 .dmadisable_shift = -ENODEV,
2770 .midle_shift = -ENODEV,
2771 .sidle_shift = 24,
2772 .clkact_shift = -ENODEV,
2773 .enwkup_shift = 26,
2774 .srst_shift = -ENODEV,
2775 .emufree_shift = -ENODEV,
2776 .autoidle_shift = -ENODEV,
2777};
2778
2779static const struct sysc_capabilities sysc_36xx_sr = {
2780 .type = TI_SYSC_OMAP36XX_SR,
2781 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
2782 .regbits = &sysc_regbits_omap36xx_sr,
2783 .mod_quirks = SYSC_QUIRK_UNCACHED,
2784};
2785
2786static const struct sysc_capabilities sysc_omap4_sr = {
2787 .type = TI_SYSC_OMAP4_SR,
2788 .regbits = &sysc_regbits_omap36xx_sr,
2789};
2790
2791/*
2792 * McASP register bits found on omap4 and later
2793 */
2794static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
2795 .dmadisable_shift = -ENODEV,
2796 .midle_shift = -ENODEV,
2797 .sidle_shift = 0,
2798 .clkact_shift = -ENODEV,
2799 .enwkup_shift = -ENODEV,
2800 .srst_shift = -ENODEV,
2801 .emufree_shift = -ENODEV,
2802 .autoidle_shift = -ENODEV,
2803};
2804
2805static const struct sysc_capabilities sysc_omap4_mcasp = {
2806 .type = TI_SYSC_OMAP4_MCASP,
2807 .regbits = &sysc_regbits_omap4_mcasp,
2808 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2809};
2810
2811/*
2812 * McASP found on dra7 and later
2813 */
2814static const struct sysc_capabilities sysc_dra7_mcasp = {
2815 .type = TI_SYSC_OMAP4_SIMPLE,
2816 .regbits = &sysc_regbits_omap4_simple,
2817 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2818};
2819
2820/*
2821 * FS USB host found on omap4 and later
2822 */
2823static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
2824 .dmadisable_shift = -ENODEV,
2825 .midle_shift = -ENODEV,
2826 .sidle_shift = 24,
2827 .clkact_shift = -ENODEV,
2828 .enwkup_shift = 26,
2829 .srst_shift = -ENODEV,
2830 .emufree_shift = -ENODEV,
2831 .autoidle_shift = -ENODEV,
2832};
2833
2834static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
2835 .type = TI_SYSC_OMAP4_USB_HOST_FS,
2836 .sysc_mask = SYSC_OMAP2_ENAWAKEUP,
2837 .regbits = &sysc_regbits_omap4_usb_host_fs,
2838};
2839
2840static const struct sysc_regbits sysc_regbits_dra7_mcan = {
2841 .dmadisable_shift = -ENODEV,
2842 .midle_shift = -ENODEV,
2843 .sidle_shift = -ENODEV,
2844 .clkact_shift = -ENODEV,
2845 .enwkup_shift = 4,
2846 .srst_shift = 0,
2847 .emufree_shift = -ENODEV,
2848 .autoidle_shift = -ENODEV,
2849};
2850
2851static const struct sysc_capabilities sysc_dra7_mcan = {
2852 .type = TI_SYSC_DRA7_MCAN,
2853 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
2854 .regbits = &sysc_regbits_dra7_mcan,
2855 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
2856};
2857
2858/*
2859 * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
2860 */
2861static const struct sysc_capabilities sysc_pruss = {
2862 .type = TI_SYSC_PRUSS,
2863 .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
2864 .regbits = &sysc_regbits_omap4_simple,
2865 .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
2866};
2867
2868static int sysc_init_pdata(struct sysc *ddata)
2869{
2870 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2871 struct ti_sysc_module_data *mdata;
2872
2873 if (!pdata)
2874 return 0;
2875
2876 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
2877 if (!mdata)
2878 return -ENOMEM;
2879
2880 if (ddata->legacy_mode) {
2881 mdata->name = ddata->legacy_mode;
2882 mdata->module_pa = ddata->module_pa;
2883 mdata->module_size = ddata->module_size;
2884 mdata->offsets = ddata->offsets;
2885 mdata->nr_offsets = SYSC_MAX_REGS;
2886 mdata->cap = ddata->cap;
2887 mdata->cfg = &ddata->cfg;
2888 }
2889
2890 ddata->mdata = mdata;
2891
2892 return 0;
2893}
2894
2895static int sysc_init_match(struct sysc *ddata)
2896{
2897 const struct sysc_capabilities *cap;
2898
2899 cap = of_device_get_match_data(ddata->dev);
2900 if (!cap)
2901 return -EINVAL;
2902
2903 ddata->cap = cap;
2904 if (ddata->cap)
2905 ddata->cfg.quirks |= ddata->cap->mod_quirks;
2906
2907 return 0;
2908}
2909
2910static void ti_sysc_idle(struct work_struct *work)
2911{
2912 struct sysc *ddata;
2913
2914 ddata = container_of(work, struct sysc, idle_work.work);
2915
2916 /*
2917 * One time decrement of clock usage counts if left on from init.
2918 * Note that we disable opt clocks unconditionally in this case
2919 * as they are enabled unconditionally during init without
2920 * considering sysc_opt_clks_needed() at that point.
2921 */
2922 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
2923 SYSC_QUIRK_NO_IDLE_ON_INIT)) {
2924 sysc_disable_main_clocks(ddata);
2925 sysc_disable_opt_clocks(ddata);
2926 sysc_clkdm_allow_idle(ddata);
2927 }
2928
2929 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
2930 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
2931 return;
2932
2933 /*
2934 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
2935 * and SYSC_QUIRK_NO_RESET_ON_INIT
2936 */
2937 if (pm_runtime_active(ddata->dev))
2938 pm_runtime_put_sync(ddata->dev);
2939}
2940
2941/*
2942 * SoC model and features detection. Only needed for SoCs that need
2943 * special handling for quirks, no need to list others.
2944 */
2945static const struct soc_device_attribute sysc_soc_match[] = {
2946 SOC_FLAG("OMAP242*", SOC_2420),
2947 SOC_FLAG("OMAP243*", SOC_2430),
2948 SOC_FLAG("AM35*", SOC_AM35),
2949 SOC_FLAG("OMAP3[45]*", SOC_3430),
2950 SOC_FLAG("OMAP3[67]*", SOC_3630),
2951 SOC_FLAG("OMAP443*", SOC_4430),
2952 SOC_FLAG("OMAP446*", SOC_4460),
2953 SOC_FLAG("OMAP447*", SOC_4470),
2954 SOC_FLAG("OMAP54*", SOC_5430),
2955 SOC_FLAG("AM433", SOC_AM3),
2956 SOC_FLAG("AM43*", SOC_AM4),
2957 SOC_FLAG("DRA7*", SOC_DRA7),
2958
2959 { /* sentinel */ }
2960};
2961
2962/*
2963 * List of SoCs variants with disabled features. By default we assume all
2964 * devices in the device tree are available so no need to list those SoCs.
2965 */
2966static const struct soc_device_attribute sysc_soc_feat_match[] = {
2967 /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
2968 SOC_FLAG("AM3505", DIS_SGX),
2969 SOC_FLAG("OMAP3525", DIS_SGX),
2970 SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
2971 SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
2972
2973 /* OMAP3630/DM3730 variants with some accelerators disabled */
2974 SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
2975 SOC_FLAG("DM3725", DIS_SGX),
2976 SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
2977 SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
2978 SOC_FLAG("OMAP3621", DIS_ISP),
2979
2980 { /* sentinel */ }
2981};
2982
2983static int sysc_add_disabled(unsigned long base)
2984{
2985 struct sysc_address *disabled_module;
2986
2987 disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
2988 if (!disabled_module)
2989 return -ENOMEM;
2990
2991 disabled_module->base = base;
2992
2993 mutex_lock(&sysc_soc->list_lock);
2994 list_add(&disabled_module->node, &sysc_soc->disabled_modules);
2995 mutex_unlock(&sysc_soc->list_lock);
2996
2997 return 0;
2998}
2999
3000/*
3001 * One time init to detect the booted SoC, disable unavailable features
3002 * and initialize list for optional cpu_pm notifier.
3003 *
3004 * Note that we initialize static data shared across all ti-sysc instances
3005 * so ddata is only used for SoC type. This can be called from module_init
3006 * once we no longer need to rely on platform data.
3007 */
3008static int sysc_init_static_data(struct sysc *ddata)
3009{
3010 const struct soc_device_attribute *match;
3011 struct ti_sysc_platform_data *pdata;
3012 unsigned long features = 0;
3013 struct device_node *np;
3014
3015 if (sysc_soc)
3016 return 0;
3017
3018 sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
3019 if (!sysc_soc)
3020 return -ENOMEM;
3021
3022 mutex_init(&sysc_soc->list_lock);
3023 INIT_LIST_HEAD(&sysc_soc->disabled_modules);
3024 INIT_LIST_HEAD(&sysc_soc->restored_modules);
3025 sysc_soc->general_purpose = true;
3026
3027 pdata = dev_get_platdata(ddata->dev);
3028 if (pdata && pdata->soc_type_gp)
3029 sysc_soc->general_purpose = pdata->soc_type_gp();
3030
3031 match = soc_device_match(sysc_soc_match);
3032 if (match && match->data)
3033 sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
3034
3035 /*
3036 * Check and warn about possible old incomplete dtb. We now want to see
3037 * simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
3038 */
3039 switch (sysc_soc->soc) {
3040 case SOC_AM3:
3041 case SOC_AM4:
3042 case SOC_4430 ... SOC_4470:
3043 case SOC_5430:
3044 case SOC_DRA7:
3045 np = of_find_node_by_path("/ocp");
3046 WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
3047 "ti-sysc: Incomplete old dtb, please update\n");
3048 break;
3049 default:
3050 break;
3051 }
3052
3053 /* Ignore devices that are not available on HS and EMU SoCs */
3054 if (!sysc_soc->general_purpose) {
3055 switch (sysc_soc->soc) {
3056 case SOC_3430 ... SOC_3630:
3057 sysc_add_disabled(0x48304000); /* timer12 */
3058 break;
3059 case SOC_AM3:
3060 sysc_add_disabled(0x48310000); /* rng */
3061 break;
3062 default:
3063 break;
3064 }
3065 }
3066
3067 match = soc_device_match(sysc_soc_feat_match);
3068 if (!match)
3069 return 0;
3070
3071 if (match->data)
3072 features = (unsigned long)match->data;
3073
3074 /*
3075 * Add disabled devices to the list based on the module base.
3076 * Note that this must be done before we attempt to access the
3077 * device and have module revision checks working.
3078 */
3079 if (features & DIS_ISP)
3080 sysc_add_disabled(0x480bd400);
3081 if (features & DIS_IVA)
3082 sysc_add_disabled(0x5d000000);
3083 if (features & DIS_SGX)
3084 sysc_add_disabled(0x50000000);
3085
3086 return 0;
3087}
3088
3089static void sysc_cleanup_static_data(void)
3090{
3091 struct sysc_module *restored_module;
3092 struct sysc_address *disabled_module;
3093 struct list_head *pos, *tmp;
3094
3095 if (!sysc_soc)
3096 return;
3097
3098 if (sysc_soc->nb.notifier_call)
3099 cpu_pm_unregister_notifier(&sysc_soc->nb);
3100
3101 mutex_lock(&sysc_soc->list_lock);
3102 list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
3103 restored_module = list_entry(pos, struct sysc_module, node);
3104 list_del(pos);
3105 kfree(restored_module);
3106 }
3107 list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
3108 disabled_module = list_entry(pos, struct sysc_address, node);
3109 list_del(pos);
3110 kfree(disabled_module);
3111 }
3112 mutex_unlock(&sysc_soc->list_lock);
3113}
3114
3115static int sysc_check_disabled_devices(struct sysc *ddata)
3116{
3117 struct sysc_address *disabled_module;
3118 int error = 0;
3119
3120 mutex_lock(&sysc_soc->list_lock);
3121 list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
3122 if (ddata->module_pa == disabled_module->base) {
3123 dev_dbg(ddata->dev, "module disabled for this SoC\n");
3124 error = -ENODEV;
3125 break;
3126 }
3127 }
3128 mutex_unlock(&sysc_soc->list_lock);
3129
3130 return error;
3131}
3132
3133/*
3134 * Ignore timers tagged with no-reset and no-idle. These are likely in use,
3135 * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
3136 * are needed, we could also look at the timer register configuration.
3137 */
3138static int sysc_check_active_timer(struct sysc *ddata)
3139{
3140 int error;
3141
3142 if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
3143 ddata->cap->type != TI_SYSC_OMAP4_TIMER)
3144 return 0;
3145
3146 /*
3147 * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
3148 * Revision C and later are fixed with commit 23885389dbbb ("ARM:
3149 * dts: Fix timer regression for beagleboard revision c"). This all
3150 * can be dropped if we stop supporting old beagleboard revisions
3151 * A to B4 at some point.
3152 */
3153 if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
3154 error = -ENXIO;
3155 else
3156 error = -EBUSY;
3157
3158 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
3159 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
3160 return error;
3161
3162 return 0;
3163}
3164
3165static const struct of_device_id sysc_match_table[] = {
3166 { .compatible = "simple-bus", },
3167 { /* sentinel */ },
3168};
3169
3170static int sysc_probe(struct platform_device *pdev)
3171{
3172 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
3173 struct sysc *ddata;
3174 int error;
3175
3176 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
3177 if (!ddata)
3178 return -ENOMEM;
3179
3180 ddata->offsets[SYSC_REVISION] = -ENODEV;
3181 ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
3182 ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
3183 ddata->dev = &pdev->dev;
3184 platform_set_drvdata(pdev, ddata);
3185
3186 error = sysc_init_static_data(ddata);
3187 if (error)
3188 return error;
3189
3190 error = sysc_init_match(ddata);
3191 if (error)
3192 return error;
3193
3194 error = sysc_init_dts_quirks(ddata);
3195 if (error)
3196 return error;
3197
3198 error = sysc_map_and_check_registers(ddata);
3199 if (error)
3200 return error;
3201
3202 error = sysc_init_sysc_mask(ddata);
3203 if (error)
3204 return error;
3205
3206 error = sysc_init_idlemodes(ddata);
3207 if (error)
3208 return error;
3209
3210 error = sysc_init_syss_mask(ddata);
3211 if (error)
3212 return error;
3213
3214 error = sysc_init_pdata(ddata);
3215 if (error)
3216 return error;
3217
3218 sysc_init_early_quirks(ddata);
3219
3220 error = sysc_check_disabled_devices(ddata);
3221 if (error)
3222 return error;
3223
3224 error = sysc_check_active_timer(ddata);
3225 if (error == -ENXIO)
3226 ddata->reserved = true;
3227 else if (error)
3228 return error;
3229
3230 error = sysc_get_clocks(ddata);
3231 if (error)
3232 return error;
3233
3234 error = sysc_init_resets(ddata);
3235 if (error)
3236 goto unprepare;
3237
3238 error = sysc_init_module(ddata);
3239 if (error)
3240 goto unprepare;
3241
3242 pm_runtime_enable(ddata->dev);
3243 error = pm_runtime_resume_and_get(ddata->dev);
3244 if (error < 0) {
3245 pm_runtime_disable(ddata->dev);
3246 goto unprepare;
3247 }
3248
3249 /* Balance use counts as PM runtime should have enabled these all */
3250 if (!(ddata->cfg.quirks &
3251 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
3252 sysc_disable_main_clocks(ddata);
3253 sysc_disable_opt_clocks(ddata);
3254 sysc_clkdm_allow_idle(ddata);
3255 }
3256
3257 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
3258 reset_control_assert(ddata->rsts);
3259
3260 sysc_show_registers(ddata);
3261
3262 ddata->dev->type = &sysc_device_type;
3263
3264 if (!ddata->reserved) {
3265 error = of_platform_populate(ddata->dev->of_node,
3266 sysc_match_table,
3267 pdata ? pdata->auxdata : NULL,
3268 ddata->dev);
3269 if (error)
3270 goto err;
3271 }
3272
3273 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
3274
3275 /* At least earlycon won't survive without deferred idle */
3276 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
3277 SYSC_QUIRK_NO_IDLE_ON_INIT |
3278 SYSC_QUIRK_NO_RESET_ON_INIT)) {
3279 schedule_delayed_work(&ddata->idle_work, 3000);
3280 } else {
3281 pm_runtime_put(&pdev->dev);
3282 }
3283
3284 if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
3285 sysc_add_restored(ddata);
3286
3287 return 0;
3288
3289err:
3290 pm_runtime_put_sync(&pdev->dev);
3291 pm_runtime_disable(&pdev->dev);
3292unprepare:
3293 sysc_unprepare(ddata);
3294
3295 return error;
3296}
3297
3298static void sysc_remove(struct platform_device *pdev)
3299{
3300 struct sysc *ddata = platform_get_drvdata(pdev);
3301 int error;
3302
3303 /* Device can still be enabled, see deferred idle quirk in probe */
3304 if (cancel_delayed_work_sync(&ddata->idle_work))
3305 ti_sysc_idle(&ddata->idle_work.work);
3306
3307 error = pm_runtime_resume_and_get(ddata->dev);
3308 if (error < 0) {
3309 pm_runtime_disable(ddata->dev);
3310 goto unprepare;
3311 }
3312
3313 of_platform_depopulate(&pdev->dev);
3314
3315 pm_runtime_put_sync(&pdev->dev);
3316 pm_runtime_disable(&pdev->dev);
3317
3318 if (!reset_control_status(ddata->rsts))
3319 reset_control_assert(ddata->rsts);
3320
3321unprepare:
3322 sysc_unprepare(ddata);
3323}
3324
3325static const struct of_device_id sysc_match[] = {
3326 { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
3327 { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
3328 { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
3329 { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
3330 { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
3331 { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
3332 { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
3333 { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
3334 { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
3335 { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
3336 { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
3337 { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
3338 { .compatible = "ti,sysc-usb-host-fs",
3339 .data = &sysc_omap4_usb_host_fs, },
3340 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
3341 { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
3342 { },
3343};
3344MODULE_DEVICE_TABLE(of, sysc_match);
3345
3346static struct platform_driver sysc_driver = {
3347 .probe = sysc_probe,
3348 .remove = sysc_remove,
3349 .driver = {
3350 .name = "ti-sysc",
3351 .of_match_table = sysc_match,
3352 .pm = &sysc_pm_ops,
3353 },
3354};
3355
3356static int __init sysc_init(void)
3357{
3358 bus_register_notifier(&platform_bus_type, &sysc_nb);
3359
3360 return platform_driver_register(&sysc_driver);
3361}
3362module_init(sysc_init);
3363
3364static void __exit sysc_exit(void)
3365{
3366 bus_unregister_notifier(&platform_bus_type, &sysc_nb);
3367 platform_driver_unregister(&sysc_driver);
3368 sysc_cleanup_static_data();
3369}
3370module_exit(sysc_exit);
3371
3372MODULE_DESCRIPTION("TI sysc interconnect target driver");
3373MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ti-sysc.c - Texas Instruments sysc interconnect target driver
4 */
5
6#include <linux/io.h>
7#include <linux/clk.h>
8#include <linux/clkdev.h>
9#include <linux/cpu_pm.h>
10#include <linux/delay.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/pm_domain.h>
15#include <linux/pm_runtime.h>
16#include <linux/reset.h>
17#include <linux/of_address.h>
18#include <linux/of_platform.h>
19#include <linux/slab.h>
20#include <linux/sys_soc.h>
21#include <linux/timekeeping.h>
22#include <linux/iopoll.h>
23
24#include <linux/platform_data/ti-sysc.h>
25
26#include <dt-bindings/bus/ti-sysc.h>
27
28#define DIS_ISP BIT(2)
29#define DIS_IVA BIT(1)
30#define DIS_SGX BIT(0)
31
32#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
33
34#define MAX_MODULE_SOFTRESET_WAIT 10000
35
36enum sysc_soc {
37 SOC_UNKNOWN,
38 SOC_2420,
39 SOC_2430,
40 SOC_3430,
41 SOC_AM35,
42 SOC_3630,
43 SOC_4430,
44 SOC_4460,
45 SOC_4470,
46 SOC_5430,
47 SOC_AM3,
48 SOC_AM4,
49 SOC_DRA7,
50};
51
52struct sysc_address {
53 unsigned long base;
54 struct list_head node;
55};
56
57struct sysc_module {
58 struct sysc *ddata;
59 struct list_head node;
60};
61
62struct sysc_soc_info {
63 unsigned long general_purpose:1;
64 enum sysc_soc soc;
65 struct mutex list_lock; /* disabled and restored modules list lock */
66 struct list_head disabled_modules;
67 struct list_head restored_modules;
68 struct notifier_block nb;
69};
70
71enum sysc_clocks {
72 SYSC_FCK,
73 SYSC_ICK,
74 SYSC_OPTFCK0,
75 SYSC_OPTFCK1,
76 SYSC_OPTFCK2,
77 SYSC_OPTFCK3,
78 SYSC_OPTFCK4,
79 SYSC_OPTFCK5,
80 SYSC_OPTFCK6,
81 SYSC_OPTFCK7,
82 SYSC_MAX_CLOCKS,
83};
84
85static struct sysc_soc_info *sysc_soc;
86static const char * const reg_names[] = { "rev", "sysc", "syss", };
87static const char * const clock_names[SYSC_MAX_CLOCKS] = {
88 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
89 "opt5", "opt6", "opt7",
90};
91
92#define SYSC_IDLEMODE_MASK 3
93#define SYSC_CLOCKACTIVITY_MASK 3
94
95/**
96 * struct sysc - TI sysc interconnect target module registers and capabilities
97 * @dev: struct device pointer
98 * @module_pa: physical address of the interconnect target module
99 * @module_size: size of the interconnect target module
100 * @module_va: virtual address of the interconnect target module
101 * @offsets: register offsets from module base
102 * @mdata: ti-sysc to hwmod translation data for a module
103 * @clocks: clocks used by the interconnect target module
104 * @clock_roles: clock role names for the found clocks
105 * @nr_clocks: number of clocks used by the interconnect target module
106 * @rsts: resets used by the interconnect target module
107 * @legacy_mode: configured for legacy mode if set
108 * @cap: interconnect target module capabilities
109 * @cfg: interconnect target module configuration
110 * @cookie: data used by legacy platform callbacks
111 * @name: name if available
112 * @revision: interconnect target module revision
113 * @sysconfig: saved sysconfig register value
114 * @reserved: target module is reserved and already in use
115 * @enabled: sysc runtime enabled status
116 * @needs_resume: runtime resume needed on resume from suspend
117 * @child_needs_resume: runtime resume needed for child on resume from suspend
118 * @disable_on_idle: status flag used for disabling modules with resets
119 * @idle_work: work structure used to perform delayed idle on a module
120 * @pre_reset_quirk: module specific pre-reset quirk
121 * @post_reset_quirk: module specific post-reset quirk
122 * @reset_done_quirk: module specific reset done quirk
123 * @module_enable_quirk: module specific enable quirk
124 * @module_disable_quirk: module specific disable quirk
125 * @module_unlock_quirk: module specific sysconfig unlock quirk
126 * @module_lock_quirk: module specific sysconfig lock quirk
127 */
128struct sysc {
129 struct device *dev;
130 u64 module_pa;
131 u32 module_size;
132 void __iomem *module_va;
133 int offsets[SYSC_MAX_REGS];
134 struct ti_sysc_module_data *mdata;
135 struct clk **clocks;
136 const char **clock_roles;
137 int nr_clocks;
138 struct reset_control *rsts;
139 const char *legacy_mode;
140 const struct sysc_capabilities *cap;
141 struct sysc_config cfg;
142 struct ti_sysc_cookie cookie;
143 const char *name;
144 u32 revision;
145 u32 sysconfig;
146 unsigned int reserved:1;
147 unsigned int enabled:1;
148 unsigned int needs_resume:1;
149 unsigned int child_needs_resume:1;
150 struct delayed_work idle_work;
151 void (*pre_reset_quirk)(struct sysc *sysc);
152 void (*post_reset_quirk)(struct sysc *sysc);
153 void (*reset_done_quirk)(struct sysc *sysc);
154 void (*module_enable_quirk)(struct sysc *sysc);
155 void (*module_disable_quirk)(struct sysc *sysc);
156 void (*module_unlock_quirk)(struct sysc *sysc);
157 void (*module_lock_quirk)(struct sysc *sysc);
158};
159
160static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
161 bool is_child);
162static int sysc_reset(struct sysc *ddata);
163
164static void sysc_write(struct sysc *ddata, int offset, u32 value)
165{
166 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
167 writew_relaxed(value & 0xffff, ddata->module_va + offset);
168
169 /* Only i2c revision has LO and HI register with stride of 4 */
170 if (ddata->offsets[SYSC_REVISION] >= 0 &&
171 offset == ddata->offsets[SYSC_REVISION]) {
172 u16 hi = value >> 16;
173
174 writew_relaxed(hi, ddata->module_va + offset + 4);
175 }
176
177 return;
178 }
179
180 writel_relaxed(value, ddata->module_va + offset);
181}
182
183static u32 sysc_read(struct sysc *ddata, int offset)
184{
185 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
186 u32 val;
187
188 val = readw_relaxed(ddata->module_va + offset);
189
190 /* Only i2c revision has LO and HI register with stride of 4 */
191 if (ddata->offsets[SYSC_REVISION] >= 0 &&
192 offset == ddata->offsets[SYSC_REVISION]) {
193 u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
194
195 val |= tmp << 16;
196 }
197
198 return val;
199 }
200
201 return readl_relaxed(ddata->module_va + offset);
202}
203
204static bool sysc_opt_clks_needed(struct sysc *ddata)
205{
206 return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
207}
208
209static u32 sysc_read_revision(struct sysc *ddata)
210{
211 int offset = ddata->offsets[SYSC_REVISION];
212
213 if (offset < 0)
214 return 0;
215
216 return sysc_read(ddata, offset);
217}
218
219static u32 sysc_read_sysconfig(struct sysc *ddata)
220{
221 int offset = ddata->offsets[SYSC_SYSCONFIG];
222
223 if (offset < 0)
224 return 0;
225
226 return sysc_read(ddata, offset);
227}
228
229static u32 sysc_read_sysstatus(struct sysc *ddata)
230{
231 int offset = ddata->offsets[SYSC_SYSSTATUS];
232
233 if (offset < 0)
234 return 0;
235
236 return sysc_read(ddata, offset);
237}
238
239static int sysc_poll_reset_sysstatus(struct sysc *ddata)
240{
241 int error, retries;
242 u32 syss_done, rstval;
243
244 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
245 syss_done = 0;
246 else
247 syss_done = ddata->cfg.syss_mask;
248
249 if (likely(!timekeeping_suspended)) {
250 error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
251 rstval, (rstval & ddata->cfg.syss_mask) ==
252 syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
253 } else {
254 retries = MAX_MODULE_SOFTRESET_WAIT;
255 while (retries--) {
256 rstval = sysc_read_sysstatus(ddata);
257 if ((rstval & ddata->cfg.syss_mask) == syss_done)
258 return 0;
259 udelay(2); /* Account for udelay flakeyness */
260 }
261 error = -ETIMEDOUT;
262 }
263
264 return error;
265}
266
267static int sysc_poll_reset_sysconfig(struct sysc *ddata)
268{
269 int error, retries;
270 u32 sysc_mask, rstval;
271
272 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
273
274 if (likely(!timekeeping_suspended)) {
275 error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
276 rstval, !(rstval & sysc_mask),
277 100, MAX_MODULE_SOFTRESET_WAIT);
278 } else {
279 retries = MAX_MODULE_SOFTRESET_WAIT;
280 while (retries--) {
281 rstval = sysc_read_sysconfig(ddata);
282 if (!(rstval & sysc_mask))
283 return 0;
284 udelay(2); /* Account for udelay flakeyness */
285 }
286 error = -ETIMEDOUT;
287 }
288
289 return error;
290}
291
292/* Poll on reset status */
293static int sysc_wait_softreset(struct sysc *ddata)
294{
295 int syss_offset, error = 0;
296
297 if (ddata->cap->regbits->srst_shift < 0)
298 return 0;
299
300 syss_offset = ddata->offsets[SYSC_SYSSTATUS];
301
302 if (syss_offset >= 0)
303 error = sysc_poll_reset_sysstatus(ddata);
304 else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
305 error = sysc_poll_reset_sysconfig(ddata);
306
307 return error;
308}
309
310static int sysc_add_named_clock_from_child(struct sysc *ddata,
311 const char *name,
312 const char *optfck_name)
313{
314 struct device_node *np = ddata->dev->of_node;
315 struct device_node *child;
316 struct clk_lookup *cl;
317 struct clk *clock;
318 const char *n;
319
320 if (name)
321 n = name;
322 else
323 n = optfck_name;
324
325 /* Does the clock alias already exist? */
326 clock = of_clk_get_by_name(np, n);
327 if (!IS_ERR(clock)) {
328 clk_put(clock);
329
330 return 0;
331 }
332
333 child = of_get_next_available_child(np, NULL);
334 if (!child)
335 return -ENODEV;
336
337 clock = devm_get_clk_from_child(ddata->dev, child, name);
338 if (IS_ERR(clock))
339 return PTR_ERR(clock);
340
341 /*
342 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
343 * limit for clk_get(). If cl ever needs to be freed, it should be done
344 * with clkdev_drop().
345 */
346 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
347 if (!cl)
348 return -ENOMEM;
349
350 cl->con_id = n;
351 cl->dev_id = dev_name(ddata->dev);
352 cl->clk = clock;
353 clkdev_add(cl);
354
355 clk_put(clock);
356
357 return 0;
358}
359
360static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
361{
362 const char *optfck_name;
363 int error, index;
364
365 if (ddata->nr_clocks < SYSC_OPTFCK0)
366 index = SYSC_OPTFCK0;
367 else
368 index = ddata->nr_clocks;
369
370 if (name)
371 optfck_name = name;
372 else
373 optfck_name = clock_names[index];
374
375 error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
376 if (error)
377 return error;
378
379 ddata->clock_roles[index] = optfck_name;
380 ddata->nr_clocks++;
381
382 return 0;
383}
384
385static int sysc_get_one_clock(struct sysc *ddata, const char *name)
386{
387 int error, i, index = -ENODEV;
388
389 if (!strncmp(clock_names[SYSC_FCK], name, 3))
390 index = SYSC_FCK;
391 else if (!strncmp(clock_names[SYSC_ICK], name, 3))
392 index = SYSC_ICK;
393
394 if (index < 0) {
395 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
396 if (!ddata->clocks[i]) {
397 index = i;
398 break;
399 }
400 }
401 }
402
403 if (index < 0) {
404 dev_err(ddata->dev, "clock %s not added\n", name);
405 return index;
406 }
407
408 ddata->clocks[index] = devm_clk_get(ddata->dev, name);
409 if (IS_ERR(ddata->clocks[index])) {
410 dev_err(ddata->dev, "clock get error for %s: %li\n",
411 name, PTR_ERR(ddata->clocks[index]));
412
413 return PTR_ERR(ddata->clocks[index]);
414 }
415
416 error = clk_prepare(ddata->clocks[index]);
417 if (error) {
418 dev_err(ddata->dev, "clock prepare error for %s: %i\n",
419 name, error);
420
421 return error;
422 }
423
424 return 0;
425}
426
427static int sysc_get_clocks(struct sysc *ddata)
428{
429 struct device_node *np = ddata->dev->of_node;
430 struct property *prop;
431 const char *name;
432 int nr_fck = 0, nr_ick = 0, i, error = 0;
433
434 ddata->clock_roles = devm_kcalloc(ddata->dev,
435 SYSC_MAX_CLOCKS,
436 sizeof(*ddata->clock_roles),
437 GFP_KERNEL);
438 if (!ddata->clock_roles)
439 return -ENOMEM;
440
441 of_property_for_each_string(np, "clock-names", prop, name) {
442 if (!strncmp(clock_names[SYSC_FCK], name, 3))
443 nr_fck++;
444 if (!strncmp(clock_names[SYSC_ICK], name, 3))
445 nr_ick++;
446 ddata->clock_roles[ddata->nr_clocks] = name;
447 ddata->nr_clocks++;
448 }
449
450 if (ddata->nr_clocks < 1)
451 return 0;
452
453 if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
454 error = sysc_init_ext_opt_clock(ddata, NULL);
455 if (error)
456 return error;
457 }
458
459 if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
460 dev_err(ddata->dev, "too many clocks for %pOF\n", np);
461
462 return -EINVAL;
463 }
464
465 if (nr_fck > 1 || nr_ick > 1) {
466 dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
467
468 return -EINVAL;
469 }
470
471 /* Always add a slot for main clocks fck and ick even if unused */
472 if (!nr_fck)
473 ddata->nr_clocks++;
474 if (!nr_ick)
475 ddata->nr_clocks++;
476
477 ddata->clocks = devm_kcalloc(ddata->dev,
478 ddata->nr_clocks, sizeof(*ddata->clocks),
479 GFP_KERNEL);
480 if (!ddata->clocks)
481 return -ENOMEM;
482
483 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
484 const char *name = ddata->clock_roles[i];
485
486 if (!name)
487 continue;
488
489 error = sysc_get_one_clock(ddata, name);
490 if (error)
491 return error;
492 }
493
494 return 0;
495}
496
497static int sysc_enable_main_clocks(struct sysc *ddata)
498{
499 struct clk *clock;
500 int i, error;
501
502 if (!ddata->clocks)
503 return 0;
504
505 for (i = 0; i < SYSC_OPTFCK0; i++) {
506 clock = ddata->clocks[i];
507
508 /* Main clocks may not have ick */
509 if (IS_ERR_OR_NULL(clock))
510 continue;
511
512 error = clk_enable(clock);
513 if (error)
514 goto err_disable;
515 }
516
517 return 0;
518
519err_disable:
520 for (i--; i >= 0; i--) {
521 clock = ddata->clocks[i];
522
523 /* Main clocks may not have ick */
524 if (IS_ERR_OR_NULL(clock))
525 continue;
526
527 clk_disable(clock);
528 }
529
530 return error;
531}
532
533static void sysc_disable_main_clocks(struct sysc *ddata)
534{
535 struct clk *clock;
536 int i;
537
538 if (!ddata->clocks)
539 return;
540
541 for (i = 0; i < SYSC_OPTFCK0; i++) {
542 clock = ddata->clocks[i];
543 if (IS_ERR_OR_NULL(clock))
544 continue;
545
546 clk_disable(clock);
547 }
548}
549
550static int sysc_enable_opt_clocks(struct sysc *ddata)
551{
552 struct clk *clock;
553 int i, error;
554
555 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
556 return 0;
557
558 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
559 clock = ddata->clocks[i];
560
561 /* Assume no holes for opt clocks */
562 if (IS_ERR_OR_NULL(clock))
563 return 0;
564
565 error = clk_enable(clock);
566 if (error)
567 goto err_disable;
568 }
569
570 return 0;
571
572err_disable:
573 for (i--; i >= 0; i--) {
574 clock = ddata->clocks[i];
575 if (IS_ERR_OR_NULL(clock))
576 continue;
577
578 clk_disable(clock);
579 }
580
581 return error;
582}
583
584static void sysc_disable_opt_clocks(struct sysc *ddata)
585{
586 struct clk *clock;
587 int i;
588
589 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
590 return;
591
592 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
593 clock = ddata->clocks[i];
594
595 /* Assume no holes for opt clocks */
596 if (IS_ERR_OR_NULL(clock))
597 return;
598
599 clk_disable(clock);
600 }
601}
602
603static void sysc_clkdm_deny_idle(struct sysc *ddata)
604{
605 struct ti_sysc_platform_data *pdata;
606
607 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
608 return;
609
610 pdata = dev_get_platdata(ddata->dev);
611 if (pdata && pdata->clkdm_deny_idle)
612 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
613}
614
615static void sysc_clkdm_allow_idle(struct sysc *ddata)
616{
617 struct ti_sysc_platform_data *pdata;
618
619 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
620 return;
621
622 pdata = dev_get_platdata(ddata->dev);
623 if (pdata && pdata->clkdm_allow_idle)
624 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
625}
626
627/**
628 * sysc_init_resets - init rstctrl reset line if configured
629 * @ddata: device driver data
630 *
631 * See sysc_rstctrl_reset_deassert().
632 */
633static int sysc_init_resets(struct sysc *ddata)
634{
635 ddata->rsts =
636 devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
637
638 return PTR_ERR_OR_ZERO(ddata->rsts);
639}
640
641/**
642 * sysc_parse_and_check_child_range - parses module IO region from ranges
643 * @ddata: device driver data
644 *
645 * In general we only need rev, syss, and sysc registers and not the whole
646 * module range. But we do want the offsets for these registers from the
647 * module base. This allows us to check them against the legacy hwmod
648 * platform data. Let's also check the ranges are configured properly.
649 */
650static int sysc_parse_and_check_child_range(struct sysc *ddata)
651{
652 struct device_node *np = ddata->dev->of_node;
653 struct of_range_parser parser;
654 struct of_range range;
655 int error;
656
657 error = of_range_parser_init(&parser, np);
658 if (error)
659 return error;
660
661 for_each_of_range(&parser, &range) {
662 ddata->module_pa = range.cpu_addr;
663 ddata->module_size = range.size;
664 break;
665 }
666
667 return 0;
668}
669
670/* Interconnect instances to probe before l4_per instances */
671static struct resource early_bus_ranges[] = {
672 /* am3/4 l4_wkup */
673 { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
674 /* omap4/5 and dra7 l4_cfg */
675 { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
676 /* omap4 l4_wkup */
677 { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
678 /* omap5 and dra7 l4_wkup without dra7 dcan segment */
679 { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
680};
681
682static atomic_t sysc_defer = ATOMIC_INIT(10);
683
684/**
685 * sysc_defer_non_critical - defer non_critical interconnect probing
686 * @ddata: device driver data
687 *
688 * We want to probe l4_cfg and l4_wkup interconnect instances before any
689 * l4_per instances as l4_per instances depend on resources on l4_cfg and
690 * l4_wkup interconnects.
691 */
692static int sysc_defer_non_critical(struct sysc *ddata)
693{
694 struct resource *res;
695 int i;
696
697 if (!atomic_read(&sysc_defer))
698 return 0;
699
700 for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
701 res = &early_bus_ranges[i];
702 if (ddata->module_pa >= res->start &&
703 ddata->module_pa <= res->end) {
704 atomic_set(&sysc_defer, 0);
705
706 return 0;
707 }
708 }
709
710 atomic_dec_if_positive(&sysc_defer);
711
712 return -EPROBE_DEFER;
713}
714
715static struct device_node *stdout_path;
716
717static void sysc_init_stdout_path(struct sysc *ddata)
718{
719 struct device_node *np = NULL;
720 const char *uart;
721
722 if (IS_ERR(stdout_path))
723 return;
724
725 if (stdout_path)
726 return;
727
728 np = of_find_node_by_path("/chosen");
729 if (!np)
730 goto err;
731
732 uart = of_get_property(np, "stdout-path", NULL);
733 if (!uart)
734 goto err;
735
736 np = of_find_node_by_path(uart);
737 if (!np)
738 goto err;
739
740 stdout_path = np;
741
742 return;
743
744err:
745 stdout_path = ERR_PTR(-ENODEV);
746}
747
748static void sysc_check_quirk_stdout(struct sysc *ddata,
749 struct device_node *np)
750{
751 sysc_init_stdout_path(ddata);
752 if (np != stdout_path)
753 return;
754
755 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
756 SYSC_QUIRK_NO_RESET_ON_INIT;
757}
758
759/**
760 * sysc_check_one_child - check child configuration
761 * @ddata: device driver data
762 * @np: child device node
763 *
764 * Let's avoid messy situations where we have new interconnect target
765 * node but children have "ti,hwmods". These belong to the interconnect
766 * target node and are managed by this driver.
767 */
768static void sysc_check_one_child(struct sysc *ddata,
769 struct device_node *np)
770{
771 const char *name;
772
773 name = of_get_property(np, "ti,hwmods", NULL);
774 if (name && !of_device_is_compatible(np, "ti,sysc"))
775 dev_warn(ddata->dev, "really a child ti,hwmods property?");
776
777 sysc_check_quirk_stdout(ddata, np);
778 sysc_parse_dts_quirks(ddata, np, true);
779}
780
781static void sysc_check_children(struct sysc *ddata)
782{
783 struct device_node *child;
784
785 for_each_child_of_node(ddata->dev->of_node, child)
786 sysc_check_one_child(ddata, child);
787}
788
789/*
790 * So far only I2C uses 16-bit read access with clockactivity with revision
791 * in two registers with stride of 4. We can detect this based on the rev
792 * register size to configure things far enough to be able to properly read
793 * the revision register.
794 */
795static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
796{
797 if (resource_size(res) == 8)
798 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
799}
800
801/**
802 * sysc_parse_one - parses the interconnect target module registers
803 * @ddata: device driver data
804 * @reg: register to parse
805 */
806static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
807{
808 struct resource *res;
809 const char *name;
810
811 switch (reg) {
812 case SYSC_REVISION:
813 case SYSC_SYSCONFIG:
814 case SYSC_SYSSTATUS:
815 name = reg_names[reg];
816 break;
817 default:
818 return -EINVAL;
819 }
820
821 res = platform_get_resource_byname(to_platform_device(ddata->dev),
822 IORESOURCE_MEM, name);
823 if (!res) {
824 ddata->offsets[reg] = -ENODEV;
825
826 return 0;
827 }
828
829 ddata->offsets[reg] = res->start - ddata->module_pa;
830 if (reg == SYSC_REVISION)
831 sysc_check_quirk_16bit(ddata, res);
832
833 return 0;
834}
835
836static int sysc_parse_registers(struct sysc *ddata)
837{
838 int i, error;
839
840 for (i = 0; i < SYSC_MAX_REGS; i++) {
841 error = sysc_parse_one(ddata, i);
842 if (error)
843 return error;
844 }
845
846 return 0;
847}
848
849/**
850 * sysc_check_registers - check for misconfigured register overlaps
851 * @ddata: device driver data
852 */
853static int sysc_check_registers(struct sysc *ddata)
854{
855 int i, j, nr_regs = 0, nr_matches = 0;
856
857 for (i = 0; i < SYSC_MAX_REGS; i++) {
858 if (ddata->offsets[i] < 0)
859 continue;
860
861 if (ddata->offsets[i] > (ddata->module_size - 4)) {
862 dev_err(ddata->dev, "register outside module range");
863
864 return -EINVAL;
865 }
866
867 for (j = 0; j < SYSC_MAX_REGS; j++) {
868 if (ddata->offsets[j] < 0)
869 continue;
870
871 if (ddata->offsets[i] == ddata->offsets[j])
872 nr_matches++;
873 }
874 nr_regs++;
875 }
876
877 if (nr_matches > nr_regs) {
878 dev_err(ddata->dev, "overlapping registers: (%i/%i)",
879 nr_regs, nr_matches);
880
881 return -EINVAL;
882 }
883
884 return 0;
885}
886
887/**
888 * sysc_ioremap - ioremap register space for the interconnect target module
889 * @ddata: device driver data
890 *
891 * Note that the interconnect target module registers can be anywhere
892 * within the interconnect target module range. For example, SGX has
893 * them at offset 0x1fc00 in the 32MB module address space. And cpsw
894 * has them at offset 0x1200 in the CPSW_WR child. Usually the
895 * interconnect target module registers are at the beginning of
896 * the module range though.
897 */
898static int sysc_ioremap(struct sysc *ddata)
899{
900 int size;
901
902 if (ddata->offsets[SYSC_REVISION] < 0 &&
903 ddata->offsets[SYSC_SYSCONFIG] < 0 &&
904 ddata->offsets[SYSC_SYSSTATUS] < 0) {
905 size = ddata->module_size;
906 } else {
907 size = max3(ddata->offsets[SYSC_REVISION],
908 ddata->offsets[SYSC_SYSCONFIG],
909 ddata->offsets[SYSC_SYSSTATUS]);
910
911 if (size < SZ_1K)
912 size = SZ_1K;
913
914 if ((size + sizeof(u32)) > ddata->module_size)
915 size = ddata->module_size;
916 }
917
918 ddata->module_va = devm_ioremap(ddata->dev,
919 ddata->module_pa,
920 size + sizeof(u32));
921 if (!ddata->module_va)
922 return -EIO;
923
924 return 0;
925}
926
927/**
928 * sysc_map_and_check_registers - ioremap and check device registers
929 * @ddata: device driver data
930 */
931static int sysc_map_and_check_registers(struct sysc *ddata)
932{
933 struct device_node *np = ddata->dev->of_node;
934 int error;
935
936 error = sysc_parse_and_check_child_range(ddata);
937 if (error)
938 return error;
939
940 error = sysc_defer_non_critical(ddata);
941 if (error)
942 return error;
943
944 sysc_check_children(ddata);
945
946 if (!of_property_present(np, "reg"))
947 return 0;
948
949 error = sysc_parse_registers(ddata);
950 if (error)
951 return error;
952
953 error = sysc_ioremap(ddata);
954 if (error)
955 return error;
956
957 error = sysc_check_registers(ddata);
958 if (error)
959 return error;
960
961 return 0;
962}
963
964/**
965 * sysc_show_rev - read and show interconnect target module revision
966 * @bufp: buffer to print the information to
967 * @ddata: device driver data
968 */
969static int sysc_show_rev(char *bufp, struct sysc *ddata)
970{
971 int len;
972
973 if (ddata->offsets[SYSC_REVISION] < 0)
974 return sprintf(bufp, ":NA");
975
976 len = sprintf(bufp, ":%08x", ddata->revision);
977
978 return len;
979}
980
981static int sysc_show_reg(struct sysc *ddata,
982 char *bufp, enum sysc_registers reg)
983{
984 if (ddata->offsets[reg] < 0)
985 return sprintf(bufp, ":NA");
986
987 return sprintf(bufp, ":%x", ddata->offsets[reg]);
988}
989
990static int sysc_show_name(char *bufp, struct sysc *ddata)
991{
992 if (!ddata->name)
993 return 0;
994
995 return sprintf(bufp, ":%s", ddata->name);
996}
997
998/**
999 * sysc_show_registers - show information about interconnect target module
1000 * @ddata: device driver data
1001 */
1002static void sysc_show_registers(struct sysc *ddata)
1003{
1004 char buf[128];
1005 char *bufp = buf;
1006 int i;
1007
1008 for (i = 0; i < SYSC_MAX_REGS; i++)
1009 bufp += sysc_show_reg(ddata, bufp, i);
1010
1011 bufp += sysc_show_rev(bufp, ddata);
1012 bufp += sysc_show_name(bufp, ddata);
1013
1014 dev_dbg(ddata->dev, "%llx:%x%s\n",
1015 ddata->module_pa, ddata->module_size,
1016 buf);
1017}
1018
1019/**
1020 * sysc_write_sysconfig - handle sysconfig quirks for register write
1021 * @ddata: device driver data
1022 * @value: register value
1023 */
1024static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
1025{
1026 if (ddata->module_unlock_quirk)
1027 ddata->module_unlock_quirk(ddata);
1028
1029 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
1030
1031 if (ddata->module_lock_quirk)
1032 ddata->module_lock_quirk(ddata);
1033}
1034
1035#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
1036#define SYSC_CLOCACT_ICK 2
1037
1038/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1039static int sysc_enable_module(struct device *dev)
1040{
1041 struct sysc *ddata;
1042 const struct sysc_regbits *regbits;
1043 u32 reg, idlemodes, best_mode;
1044 int error;
1045
1046 ddata = dev_get_drvdata(dev);
1047
1048 /*
1049 * Some modules like DSS reset automatically on idle. Enable optional
1050 * reset clocks and wait for OCP softreset to complete.
1051 */
1052 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
1053 error = sysc_enable_opt_clocks(ddata);
1054 if (error) {
1055 dev_err(ddata->dev,
1056 "Optional clocks failed for enable: %i\n",
1057 error);
1058 return error;
1059 }
1060 }
1061 /*
1062 * Some modules like i2c and hdq1w have unusable reset status unless
1063 * the module reset quirk is enabled. Skip status check on enable.
1064 */
1065 if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
1066 error = sysc_wait_softreset(ddata);
1067 if (error)
1068 dev_warn(ddata->dev, "OCP softreset timed out\n");
1069 }
1070 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1071 sysc_disable_opt_clocks(ddata);
1072
1073 /*
1074 * Some subsystem private interconnects, like DSS top level module,
1075 * need only the automatic OCP softreset handling with no sysconfig
1076 * register bits to configure.
1077 */
1078 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1079 return 0;
1080
1081 regbits = ddata->cap->regbits;
1082 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1083
1084 /*
1085 * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1086 * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1087 * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1088 */
1089 if (regbits->clkact_shift >= 0 &&
1090 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1091 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1092
1093 /* Set SIDLE mode */
1094 idlemodes = ddata->cfg.sidlemodes;
1095 if (!idlemodes || regbits->sidle_shift < 0)
1096 goto set_midle;
1097
1098 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
1099 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
1100 best_mode = SYSC_IDLE_NO;
1101
1102 /* Clear WAKEUP */
1103 if (regbits->enwkup_shift >= 0 &&
1104 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1105 reg &= ~BIT(regbits->enwkup_shift);
1106 } else {
1107 best_mode = fls(ddata->cfg.sidlemodes) - 1;
1108 if (best_mode > SYSC_IDLE_MASK) {
1109 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1110 return -EINVAL;
1111 }
1112
1113 /* Set WAKEUP */
1114 if (regbits->enwkup_shift >= 0 &&
1115 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1116 reg |= BIT(regbits->enwkup_shift);
1117 }
1118
1119 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1120 reg |= best_mode << regbits->sidle_shift;
1121 sysc_write_sysconfig(ddata, reg);
1122
1123set_midle:
1124 /* Set MIDLE mode */
1125 idlemodes = ddata->cfg.midlemodes;
1126 if (!idlemodes || regbits->midle_shift < 0)
1127 goto set_autoidle;
1128
1129 best_mode = fls(ddata->cfg.midlemodes) - 1;
1130 if (best_mode > SYSC_IDLE_MASK) {
1131 dev_err(dev, "%s: invalid midlemode\n", __func__);
1132 error = -EINVAL;
1133 goto save_context;
1134 }
1135
1136 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
1137 best_mode = SYSC_IDLE_NO;
1138
1139 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1140 reg |= best_mode << regbits->midle_shift;
1141 sysc_write_sysconfig(ddata, reg);
1142
1143set_autoidle:
1144 /* Autoidle bit must enabled separately if available */
1145 if (regbits->autoidle_shift >= 0 &&
1146 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
1147 reg |= 1 << regbits->autoidle_shift;
1148 sysc_write_sysconfig(ddata, reg);
1149 }
1150
1151 error = 0;
1152
1153save_context:
1154 /* Save context and flush posted write */
1155 ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1156
1157 if (ddata->module_enable_quirk)
1158 ddata->module_enable_quirk(ddata);
1159
1160 return error;
1161}
1162
1163static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
1164{
1165 if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
1166 *best_mode = SYSC_IDLE_SMART_WKUP;
1167 else if (idlemodes & BIT(SYSC_IDLE_SMART))
1168 *best_mode = SYSC_IDLE_SMART;
1169 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
1170 *best_mode = SYSC_IDLE_FORCE;
1171 else
1172 return -EINVAL;
1173
1174 return 0;
1175}
1176
1177/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1178static int sysc_disable_module(struct device *dev)
1179{
1180 struct sysc *ddata;
1181 const struct sysc_regbits *regbits;
1182 u32 reg, idlemodes, best_mode;
1183 int ret;
1184
1185 ddata = dev_get_drvdata(dev);
1186 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1187 return 0;
1188
1189 if (ddata->module_disable_quirk)
1190 ddata->module_disable_quirk(ddata);
1191
1192 regbits = ddata->cap->regbits;
1193 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1194
1195 /* Set MIDLE mode */
1196 idlemodes = ddata->cfg.midlemodes;
1197 if (!idlemodes || regbits->midle_shift < 0)
1198 goto set_sidle;
1199
1200 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1201 if (ret) {
1202 dev_err(dev, "%s: invalid midlemode\n", __func__);
1203 return ret;
1204 }
1205
1206 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
1207 ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
1208 best_mode = SYSC_IDLE_FORCE;
1209
1210 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1211 reg |= best_mode << regbits->midle_shift;
1212 sysc_write_sysconfig(ddata, reg);
1213
1214set_sidle:
1215 /* Set SIDLE mode */
1216 idlemodes = ddata->cfg.sidlemodes;
1217 if (!idlemodes || regbits->sidle_shift < 0) {
1218 ret = 0;
1219 goto save_context;
1220 }
1221
1222 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
1223 best_mode = SYSC_IDLE_FORCE;
1224 } else {
1225 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1226 if (ret) {
1227 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1228 ret = -EINVAL;
1229 goto save_context;
1230 }
1231 }
1232
1233 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
1234 /* Set WAKEUP */
1235 if (regbits->enwkup_shift >= 0 &&
1236 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1237 reg |= BIT(regbits->enwkup_shift);
1238 }
1239
1240 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1241 reg |= best_mode << regbits->sidle_shift;
1242 if (regbits->autoidle_shift >= 0 &&
1243 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
1244 reg |= 1 << regbits->autoidle_shift;
1245 sysc_write_sysconfig(ddata, reg);
1246
1247 ret = 0;
1248
1249save_context:
1250 /* Save context and flush posted write */
1251 ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1252
1253 return ret;
1254}
1255
1256static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
1257 struct sysc *ddata)
1258{
1259 struct ti_sysc_platform_data *pdata;
1260 int error;
1261
1262 pdata = dev_get_platdata(ddata->dev);
1263 if (!pdata)
1264 return 0;
1265
1266 if (!pdata->idle_module)
1267 return -ENODEV;
1268
1269 error = pdata->idle_module(dev, &ddata->cookie);
1270 if (error)
1271 dev_err(dev, "%s: could not idle: %i\n",
1272 __func__, error);
1273
1274 reset_control_assert(ddata->rsts);
1275
1276 return 0;
1277}
1278
1279static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
1280 struct sysc *ddata)
1281{
1282 struct ti_sysc_platform_data *pdata;
1283 int error;
1284
1285 pdata = dev_get_platdata(ddata->dev);
1286 if (!pdata)
1287 return 0;
1288
1289 if (!pdata->enable_module)
1290 return -ENODEV;
1291
1292 error = pdata->enable_module(dev, &ddata->cookie);
1293 if (error)
1294 dev_err(dev, "%s: could not enable: %i\n",
1295 __func__, error);
1296
1297 reset_control_deassert(ddata->rsts);
1298
1299 return 0;
1300}
1301
1302static int __maybe_unused sysc_runtime_suspend(struct device *dev)
1303{
1304 struct sysc *ddata;
1305 int error = 0;
1306
1307 ddata = dev_get_drvdata(dev);
1308
1309 if (!ddata->enabled)
1310 return 0;
1311
1312 sysc_clkdm_deny_idle(ddata);
1313
1314 if (ddata->legacy_mode) {
1315 error = sysc_runtime_suspend_legacy(dev, ddata);
1316 if (error)
1317 goto err_allow_idle;
1318 } else {
1319 error = sysc_disable_module(dev);
1320 if (error)
1321 goto err_allow_idle;
1322 }
1323
1324 sysc_disable_main_clocks(ddata);
1325
1326 if (sysc_opt_clks_needed(ddata))
1327 sysc_disable_opt_clocks(ddata);
1328
1329 ddata->enabled = false;
1330
1331err_allow_idle:
1332 sysc_clkdm_allow_idle(ddata);
1333
1334 reset_control_assert(ddata->rsts);
1335
1336 return error;
1337}
1338
1339static int __maybe_unused sysc_runtime_resume(struct device *dev)
1340{
1341 struct sysc *ddata;
1342 int error = 0;
1343
1344 ddata = dev_get_drvdata(dev);
1345
1346 if (ddata->enabled)
1347 return 0;
1348
1349
1350 sysc_clkdm_deny_idle(ddata);
1351
1352 if (sysc_opt_clks_needed(ddata)) {
1353 error = sysc_enable_opt_clocks(ddata);
1354 if (error)
1355 goto err_allow_idle;
1356 }
1357
1358 error = sysc_enable_main_clocks(ddata);
1359 if (error)
1360 goto err_opt_clocks;
1361
1362 reset_control_deassert(ddata->rsts);
1363
1364 if (ddata->legacy_mode) {
1365 error = sysc_runtime_resume_legacy(dev, ddata);
1366 if (error)
1367 goto err_main_clocks;
1368 } else {
1369 error = sysc_enable_module(dev);
1370 if (error)
1371 goto err_main_clocks;
1372 }
1373
1374 ddata->enabled = true;
1375
1376 sysc_clkdm_allow_idle(ddata);
1377
1378 return 0;
1379
1380err_main_clocks:
1381 sysc_disable_main_clocks(ddata);
1382err_opt_clocks:
1383 if (sysc_opt_clks_needed(ddata))
1384 sysc_disable_opt_clocks(ddata);
1385err_allow_idle:
1386 sysc_clkdm_allow_idle(ddata);
1387
1388 return error;
1389}
1390
1391/*
1392 * Checks if device context was lost. Assumes the sysconfig register value
1393 * after lost context is different from the configured value. Only works for
1394 * enabled devices.
1395 *
1396 * Eventually we may want to also add support to using the context lost
1397 * registers that some SoCs have.
1398 */
1399static int sysc_check_context(struct sysc *ddata)
1400{
1401 u32 reg;
1402
1403 if (!ddata->enabled)
1404 return -ENODATA;
1405
1406 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1407 if (reg == ddata->sysconfig)
1408 return 0;
1409
1410 return -EACCES;
1411}
1412
1413static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
1414{
1415 struct device *dev = ddata->dev;
1416 int error;
1417
1418 if (ddata->enabled) {
1419 /* Nothing to do if enabled and context not lost */
1420 error = sysc_check_context(ddata);
1421 if (!error)
1422 return 0;
1423
1424 /* Disable target module if it is enabled */
1425 error = sysc_runtime_suspend(dev);
1426 if (error)
1427 dev_warn(dev, "reinit suspend failed: %i\n", error);
1428 }
1429
1430 /* Enable target module */
1431 error = sysc_runtime_resume(dev);
1432 if (error)
1433 dev_warn(dev, "reinit resume failed: %i\n", error);
1434
1435 /* Some modules like am335x gpmc need reset and restore of sysconfig */
1436 if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
1437 error = sysc_reset(ddata);
1438 if (error)
1439 dev_warn(dev, "reinit reset failed: %i\n", error);
1440
1441 sysc_write_sysconfig(ddata, ddata->sysconfig);
1442 }
1443
1444 if (leave_enabled)
1445 return error;
1446
1447 /* Disable target module if no leave_enabled was set */
1448 error = sysc_runtime_suspend(dev);
1449 if (error)
1450 dev_warn(dev, "reinit suspend failed: %i\n", error);
1451
1452 return error;
1453}
1454
1455static int __maybe_unused sysc_noirq_suspend(struct device *dev)
1456{
1457 struct sysc *ddata;
1458
1459 ddata = dev_get_drvdata(dev);
1460
1461 if (ddata->cfg.quirks &
1462 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1463 return 0;
1464
1465 if (!ddata->enabled)
1466 return 0;
1467
1468 ddata->needs_resume = 1;
1469
1470 return sysc_runtime_suspend(dev);
1471}
1472
1473static int __maybe_unused sysc_noirq_resume(struct device *dev)
1474{
1475 struct sysc *ddata;
1476 int error = 0;
1477
1478 ddata = dev_get_drvdata(dev);
1479
1480 if (ddata->cfg.quirks &
1481 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1482 return 0;
1483
1484 if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
1485 error = sysc_reinit_module(ddata, ddata->needs_resume);
1486 if (error)
1487 dev_warn(dev, "noirq_resume failed: %i\n", error);
1488 } else if (ddata->needs_resume) {
1489 error = sysc_runtime_resume(dev);
1490 if (error)
1491 dev_warn(dev, "noirq_resume failed: %i\n", error);
1492 }
1493
1494 ddata->needs_resume = 0;
1495
1496 return error;
1497}
1498
1499static const struct dev_pm_ops sysc_pm_ops = {
1500 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
1501 SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
1502 sysc_runtime_resume,
1503 NULL)
1504};
1505
1506/* Module revision register based quirks */
1507struct sysc_revision_quirk {
1508 const char *name;
1509 u32 base;
1510 int rev_offset;
1511 int sysc_offset;
1512 int syss_offset;
1513 u32 revision;
1514 u32 revision_mask;
1515 u32 quirks;
1516};
1517
1518#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
1519 optrev_val, optrevmask, optquirkmask) \
1520 { \
1521 .name = (optname), \
1522 .base = (optbase), \
1523 .rev_offset = (optrev), \
1524 .sysc_offset = (optsysc), \
1525 .syss_offset = (optsyss), \
1526 .revision = (optrev_val), \
1527 .revision_mask = (optrevmask), \
1528 .quirks = (optquirkmask), \
1529 }
1530
1531static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1532 /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
1533 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
1534 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1535 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
1536 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1537 /* Uarts on omap4 and later */
1538 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1539 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1540 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
1541 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1542 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
1543 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1544
1545 /* Quirks that need to be set based on the module address */
1546 SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
1547 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
1548 SYSC_QUIRK_SWSUP_SIDLE),
1549
1550 /* Quirks that need to be set based on detected module */
1551 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
1552 SYSC_MODULE_QUIRK_AESS),
1553 /* Errata i893 handling for dra7 dcan1 and 2 */
1554 SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1555 SYSC_QUIRK_CLKDM_NOAUTO),
1556 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1557 SYSC_QUIRK_CLKDM_NOAUTO),
1558 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
1559 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1560 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
1561 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1562 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
1563 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1564 SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1565 SYSC_QUIRK_CLKDM_NOAUTO),
1566 SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1567 SYSC_QUIRK_CLKDM_NOAUTO),
1568 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
1569 SYSC_QUIRK_OPT_CLKS_IN_RESET),
1570 SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
1571 SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
1572 SYSC_QUIRK_GPMC_DEBUG),
1573 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
1574 SYSC_QUIRK_OPT_CLKS_NEEDED),
1575 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1576 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1577 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1578 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1579 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1580 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1581 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1582 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1583 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1584 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1585 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1586 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1587 SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
1588 SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
1589 SYSC_MODULE_QUIRK_SGX),
1590 SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
1591 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1592 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
1593 SYSC_QUIRK_SWSUP_SIDLE),
1594 SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
1595 SYSC_MODULE_QUIRK_RTC_UNLOCK),
1596 SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
1597 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1598 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
1599 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1600 SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
1601 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1602 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
1603 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1604 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
1605 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1606 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000033,
1607 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1608 SYSC_MODULE_QUIRK_OTG),
1609 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000040,
1610 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1611 SYSC_MODULE_QUIRK_OTG),
1612 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1613 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1614 SYSC_MODULE_QUIRK_OTG),
1615 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1616 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1617 SYSC_QUIRK_REINIT_ON_CTX_LOST),
1618 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1619 SYSC_MODULE_QUIRK_WDT),
1620 /* PRUSS on am3, am4 and am5 */
1621 SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
1622 SYSC_MODULE_QUIRK_PRUSS),
1623 /* Watchdog on am3 and am4 */
1624 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1625 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
1626
1627#ifdef DEBUG
1628 SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
1629 SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
1630 SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
1631 SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1632 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1633 0xffff00f0, 0),
1634 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
1635 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
1636 SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1637 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1638 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
1639 SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
1640 SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1641 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1642 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1643 SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1644 SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
1645 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1646 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1647 SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1648 SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
1649 SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
1650 SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
1651 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
1652 SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
1653 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
1654 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
1655 SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
1656 SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1657 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
1658 SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
1659 SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
1660 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
1661 SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
1662 SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
1663 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
1664 SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
1665 SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
1666 SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
1667 SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1668 SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1669 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
1670 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
1671 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
1672 SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1673 SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1674 SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1675 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
1676 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
1677 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
1678 SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
1679 SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
1680 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
1681 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
1682 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
1683 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
1684 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
1685 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
1686 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
1687 SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
1688 SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
1689 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
1690 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
1691 /* Some timers on omap4 and later */
1692 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
1693 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
1694 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
1695 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
1696 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
1697 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
1698 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
1699 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
1700 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
1701 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
1702#endif
1703};
1704
1705/*
1706 * Early quirks based on module base and register offsets only that are
1707 * needed before the module revision can be read
1708 */
1709static void sysc_init_early_quirks(struct sysc *ddata)
1710{
1711 const struct sysc_revision_quirk *q;
1712 int i;
1713
1714 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1715 q = &sysc_revision_quirks[i];
1716
1717 if (!q->base)
1718 continue;
1719
1720 if (q->base != ddata->module_pa)
1721 continue;
1722
1723 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1724 continue;
1725
1726 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1727 continue;
1728
1729 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1730 continue;
1731
1732 ddata->name = q->name;
1733 ddata->cfg.quirks |= q->quirks;
1734 }
1735}
1736
1737/* Quirks that also consider the revision register value */
1738static void sysc_init_revision_quirks(struct sysc *ddata)
1739{
1740 const struct sysc_revision_quirk *q;
1741 int i;
1742
1743 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1744 q = &sysc_revision_quirks[i];
1745
1746 if (q->base && q->base != ddata->module_pa)
1747 continue;
1748
1749 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1750 continue;
1751
1752 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1753 continue;
1754
1755 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1756 continue;
1757
1758 if (q->revision == ddata->revision ||
1759 (q->revision & q->revision_mask) ==
1760 (ddata->revision & q->revision_mask)) {
1761 ddata->name = q->name;
1762 ddata->cfg.quirks |= q->quirks;
1763 }
1764 }
1765}
1766
1767/*
1768 * DSS needs dispc outputs disabled to reset modules. Returns mask of
1769 * enabled DSS interrupts. Eventually we may be able to do this on
1770 * dispc init rather than top-level DSS init.
1771 */
1772static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1773 bool disable)
1774{
1775 bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
1776 const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
1777 int manager_count;
1778 bool framedonetv_irq = true;
1779 u32 val, irq_mask = 0;
1780
1781 switch (sysc_soc->soc) {
1782 case SOC_2420 ... SOC_3630:
1783 manager_count = 2;
1784 framedonetv_irq = false;
1785 break;
1786 case SOC_4430 ... SOC_4470:
1787 manager_count = 3;
1788 break;
1789 case SOC_5430:
1790 case SOC_DRA7:
1791 manager_count = 4;
1792 break;
1793 case SOC_AM4:
1794 manager_count = 1;
1795 framedonetv_irq = false;
1796 break;
1797 case SOC_UNKNOWN:
1798 default:
1799 return 0;
1800 }
1801
1802 /* Remap the whole module range to be able to reset dispc outputs */
1803 devm_iounmap(ddata->dev, ddata->module_va);
1804 ddata->module_va = devm_ioremap(ddata->dev,
1805 ddata->module_pa,
1806 ddata->module_size);
1807 if (!ddata->module_va)
1808 return -EIO;
1809
1810 /* DISP_CONTROL, shut down lcd and digit on disable if enabled */
1811 val = sysc_read(ddata, dispc_offset + 0x40);
1812 lcd_en = val & lcd_en_mask;
1813 digit_en = val & digit_en_mask;
1814 if (lcd_en)
1815 irq_mask |= BIT(0); /* FRAMEDONE */
1816 if (digit_en) {
1817 if (framedonetv_irq)
1818 irq_mask |= BIT(24); /* FRAMEDONETV */
1819 else
1820 irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
1821 }
1822 if (disable && (lcd_en || digit_en))
1823 sysc_write(ddata, dispc_offset + 0x40,
1824 val & ~(lcd_en_mask | digit_en_mask));
1825
1826 if (manager_count <= 2)
1827 return irq_mask;
1828
1829 /* DISPC_CONTROL2 */
1830 val = sysc_read(ddata, dispc_offset + 0x238);
1831 lcd2_en = val & lcd_en_mask;
1832 if (lcd2_en)
1833 irq_mask |= BIT(22); /* FRAMEDONE2 */
1834 if (disable && lcd2_en)
1835 sysc_write(ddata, dispc_offset + 0x238,
1836 val & ~lcd_en_mask);
1837
1838 if (manager_count <= 3)
1839 return irq_mask;
1840
1841 /* DISPC_CONTROL3 */
1842 val = sysc_read(ddata, dispc_offset + 0x848);
1843 lcd3_en = val & lcd_en_mask;
1844 if (lcd3_en)
1845 irq_mask |= BIT(30); /* FRAMEDONE3 */
1846 if (disable && lcd3_en)
1847 sysc_write(ddata, dispc_offset + 0x848,
1848 val & ~lcd_en_mask);
1849
1850 return irq_mask;
1851}
1852
1853/* DSS needs child outputs disabled and SDI registers cleared for reset */
1854static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
1855{
1856 const int dispc_offset = 0x1000;
1857 int error;
1858 u32 irq_mask, val;
1859
1860 /* Get enabled outputs */
1861 irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
1862 if (!irq_mask)
1863 return;
1864
1865 /* Clear IRQSTATUS */
1866 sysc_write(ddata, dispc_offset + 0x18, irq_mask);
1867
1868 /* Disable outputs */
1869 val = sysc_quirk_dispc(ddata, dispc_offset, true);
1870
1871 /* Poll IRQSTATUS */
1872 error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
1873 val, val != irq_mask, 100, 50);
1874 if (error)
1875 dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
1876 __func__, val, irq_mask);
1877
1878 if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
1879 /* Clear DSS_SDI_CONTROL */
1880 sysc_write(ddata, 0x44, 0);
1881
1882 /* Clear DSS_PLL_CONTROL */
1883 sysc_write(ddata, 0x48, 0);
1884 }
1885
1886 /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
1887 sysc_write(ddata, 0x40, 0);
1888}
1889
1890/* 1-wire needs module's internal clocks enabled for reset */
1891static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1892{
1893 int offset = 0x0c; /* HDQ_CTRL_STATUS */
1894 u16 val;
1895
1896 val = sysc_read(ddata, offset);
1897 val |= BIT(5);
1898 sysc_write(ddata, offset, val);
1899}
1900
1901/* AESS (Audio Engine SubSystem) needs autogating set after enable */
1902static void sysc_module_enable_quirk_aess(struct sysc *ddata)
1903{
1904 int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
1905
1906 sysc_write(ddata, offset, 1);
1907}
1908
1909/* I2C needs to be disabled for reset */
1910static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
1911{
1912 int offset;
1913 u16 val;
1914
1915 /* I2C_CON, omap2/3 is different from omap4 and later */
1916 if ((ddata->revision & 0xffffff00) == 0x001f0000)
1917 offset = 0x24;
1918 else
1919 offset = 0xa4;
1920
1921 /* I2C_EN */
1922 val = sysc_read(ddata, offset);
1923 if (enable)
1924 val |= BIT(15);
1925 else
1926 val &= ~BIT(15);
1927 sysc_write(ddata, offset, val);
1928}
1929
1930static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
1931{
1932 sysc_clk_quirk_i2c(ddata, false);
1933}
1934
1935static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
1936{
1937 sysc_clk_quirk_i2c(ddata, true);
1938}
1939
1940/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
1941static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
1942{
1943 u32 val, kick0_val = 0, kick1_val = 0;
1944 unsigned long flags;
1945 int error;
1946
1947 if (!lock) {
1948 kick0_val = 0x83e70b13;
1949 kick1_val = 0x95a4f1e0;
1950 }
1951
1952 local_irq_save(flags);
1953 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
1954 error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
1955 !(val & BIT(0)), 100, 50);
1956 if (error)
1957 dev_warn(ddata->dev, "rtc busy timeout\n");
1958 /* Now we have ~15 microseconds to read/write various registers */
1959 sysc_write(ddata, 0x6c, kick0_val);
1960 sysc_write(ddata, 0x70, kick1_val);
1961 local_irq_restore(flags);
1962}
1963
1964static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
1965{
1966 sysc_quirk_rtc(ddata, false);
1967}
1968
1969static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
1970{
1971 sysc_quirk_rtc(ddata, true);
1972}
1973
1974/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
1975static void sysc_module_enable_quirk_otg(struct sysc *ddata)
1976{
1977 int offset = 0x414; /* OTG_FORCESTDBY */
1978
1979 sysc_write(ddata, offset, 0);
1980}
1981
1982static void sysc_module_disable_quirk_otg(struct sysc *ddata)
1983{
1984 int offset = 0x414; /* OTG_FORCESTDBY */
1985 u32 val = BIT(0); /* ENABLEFORCE */
1986
1987 sysc_write(ddata, offset, val);
1988}
1989
1990/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
1991static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
1992{
1993 int offset = 0xff08; /* OCP_DEBUG_CONFIG */
1994 u32 val = BIT(31); /* THALIA_INT_BYPASS */
1995
1996 sysc_write(ddata, offset, val);
1997}
1998
1999/* Watchdog timer needs a disable sequence after reset */
2000static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
2001{
2002 int wps, spr, error;
2003 u32 val;
2004
2005 wps = 0x34;
2006 spr = 0x48;
2007
2008 sysc_write(ddata, spr, 0xaaaa);
2009 error = readl_poll_timeout(ddata->module_va + wps, val,
2010 !(val & 0x10), 100,
2011 MAX_MODULE_SOFTRESET_WAIT);
2012 if (error)
2013 dev_warn(ddata->dev, "wdt disable step1 failed\n");
2014
2015 sysc_write(ddata, spr, 0x5555);
2016 error = readl_poll_timeout(ddata->module_va + wps, val,
2017 !(val & 0x10), 100,
2018 MAX_MODULE_SOFTRESET_WAIT);
2019 if (error)
2020 dev_warn(ddata->dev, "wdt disable step2 failed\n");
2021}
2022
2023/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
2024static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
2025{
2026 u32 reg;
2027
2028 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
2029 reg |= SYSC_PRUSS_STANDBY_INIT;
2030 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
2031}
2032
2033static void sysc_init_module_quirks(struct sysc *ddata)
2034{
2035 if (ddata->legacy_mode || !ddata->name)
2036 return;
2037
2038 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
2039 ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
2040
2041 return;
2042 }
2043
2044#ifdef CONFIG_OMAP_GPMC_DEBUG
2045 if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
2046 ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
2047
2048 return;
2049 }
2050#endif
2051
2052 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
2053 ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
2054 ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
2055
2056 return;
2057 }
2058
2059 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
2060 ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
2061
2062 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
2063 ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
2064
2065 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
2066 ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
2067 ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
2068
2069 return;
2070 }
2071
2072 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
2073 ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
2074 ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
2075 }
2076
2077 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
2078 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
2079
2080 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
2081 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
2082 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
2083 }
2084
2085 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
2086 ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
2087}
2088
2089static int sysc_clockdomain_init(struct sysc *ddata)
2090{
2091 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2092 struct clk *fck = NULL, *ick = NULL;
2093 int error;
2094
2095 if (!pdata || !pdata->init_clockdomain)
2096 return 0;
2097
2098 switch (ddata->nr_clocks) {
2099 case 2:
2100 ick = ddata->clocks[SYSC_ICK];
2101 fallthrough;
2102 case 1:
2103 fck = ddata->clocks[SYSC_FCK];
2104 break;
2105 case 0:
2106 return 0;
2107 }
2108
2109 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
2110 if (!error || error == -ENODEV)
2111 return 0;
2112
2113 return error;
2114}
2115
2116/*
2117 * Note that pdata->init_module() typically does a reset first. After
2118 * pdata->init_module() is done, PM runtime can be used for the interconnect
2119 * target module.
2120 */
2121static int sysc_legacy_init(struct sysc *ddata)
2122{
2123 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2124 int error;
2125
2126 if (!pdata || !pdata->init_module)
2127 return 0;
2128
2129 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
2130 if (error == -EEXIST)
2131 error = 0;
2132
2133 return error;
2134}
2135
2136/*
2137 * Note that the caller must ensure the interconnect target module is enabled
2138 * before calling reset. Otherwise reset will not complete.
2139 */
2140static int sysc_reset(struct sysc *ddata)
2141{
2142 int sysc_offset, sysc_val, error;
2143 u32 sysc_mask;
2144
2145 sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
2146
2147 if (ddata->legacy_mode ||
2148 ddata->cap->regbits->srst_shift < 0 ||
2149 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
2150 return 0;
2151
2152 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
2153
2154 if (ddata->pre_reset_quirk)
2155 ddata->pre_reset_quirk(ddata);
2156
2157 if (sysc_offset >= 0) {
2158 sysc_val = sysc_read_sysconfig(ddata);
2159 sysc_val |= sysc_mask;
2160 sysc_write(ddata, sysc_offset, sysc_val);
2161
2162 /*
2163 * Some devices need a delay before reading registers
2164 * after reset. Presumably a srst_udelay is not needed
2165 * for devices that use a rstctrl register reset.
2166 */
2167 if (ddata->cfg.srst_udelay)
2168 fsleep(ddata->cfg.srst_udelay);
2169
2170 /*
2171 * Flush posted write. For devices needing srst_udelay
2172 * this should trigger an interconnect error if the
2173 * srst_udelay value is needed but not configured.
2174 */
2175 sysc_val = sysc_read_sysconfig(ddata);
2176 }
2177
2178 if (ddata->post_reset_quirk)
2179 ddata->post_reset_quirk(ddata);
2180
2181 error = sysc_wait_softreset(ddata);
2182 if (error)
2183 dev_warn(ddata->dev, "OCP softreset timed out\n");
2184
2185 if (ddata->reset_done_quirk)
2186 ddata->reset_done_quirk(ddata);
2187
2188 return error;
2189}
2190
2191/*
2192 * At this point the module is configured enough to read the revision but
2193 * module may not be completely configured yet to use PM runtime. Enable
2194 * all clocks directly during init to configure the quirks needed for PM
2195 * runtime based on the revision register.
2196 */
2197static int sysc_init_module(struct sysc *ddata)
2198{
2199 bool rstctrl_deasserted = false;
2200 int error = 0;
2201
2202 error = sysc_clockdomain_init(ddata);
2203 if (error)
2204 return error;
2205
2206 sysc_clkdm_deny_idle(ddata);
2207
2208 /*
2209 * Always enable clocks. The bootloader may or may not have enabled
2210 * the related clocks.
2211 */
2212 error = sysc_enable_opt_clocks(ddata);
2213 if (error)
2214 return error;
2215
2216 error = sysc_enable_main_clocks(ddata);
2217 if (error)
2218 goto err_opt_clocks;
2219
2220 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
2221 error = reset_control_deassert(ddata->rsts);
2222 if (error)
2223 goto err_main_clocks;
2224 rstctrl_deasserted = true;
2225 }
2226
2227 ddata->revision = sysc_read_revision(ddata);
2228 sysc_init_revision_quirks(ddata);
2229 sysc_init_module_quirks(ddata);
2230
2231 if (ddata->legacy_mode) {
2232 error = sysc_legacy_init(ddata);
2233 if (error)
2234 goto err_main_clocks;
2235 }
2236
2237 if (!ddata->legacy_mode) {
2238 error = sysc_enable_module(ddata->dev);
2239 if (error)
2240 goto err_main_clocks;
2241 }
2242
2243 error = sysc_reset(ddata);
2244 if (error)
2245 dev_err(ddata->dev, "Reset failed with %d\n", error);
2246
2247 if (error && !ddata->legacy_mode)
2248 sysc_disable_module(ddata->dev);
2249
2250err_main_clocks:
2251 if (error)
2252 sysc_disable_main_clocks(ddata);
2253err_opt_clocks:
2254 /* No re-enable of clockdomain autoidle to prevent module autoidle */
2255 if (error) {
2256 sysc_disable_opt_clocks(ddata);
2257 sysc_clkdm_allow_idle(ddata);
2258 }
2259
2260 if (error && rstctrl_deasserted &&
2261 !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
2262 reset_control_assert(ddata->rsts);
2263
2264 return error;
2265}
2266
2267static int sysc_init_sysc_mask(struct sysc *ddata)
2268{
2269 struct device_node *np = ddata->dev->of_node;
2270 int error;
2271 u32 val;
2272
2273 error = of_property_read_u32(np, "ti,sysc-mask", &val);
2274 if (error)
2275 return 0;
2276
2277 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
2278
2279 return 0;
2280}
2281
2282static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
2283 const char *name)
2284{
2285 struct device_node *np = ddata->dev->of_node;
2286 struct property *prop;
2287 const __be32 *p;
2288 u32 val;
2289
2290 of_property_for_each_u32(np, name, prop, p, val) {
2291 if (val >= SYSC_NR_IDLEMODES) {
2292 dev_err(ddata->dev, "invalid idlemode: %i\n", val);
2293 return -EINVAL;
2294 }
2295 *idlemodes |= (1 << val);
2296 }
2297
2298 return 0;
2299}
2300
2301static int sysc_init_idlemodes(struct sysc *ddata)
2302{
2303 int error;
2304
2305 error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
2306 "ti,sysc-midle");
2307 if (error)
2308 return error;
2309
2310 error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
2311 "ti,sysc-sidle");
2312 if (error)
2313 return error;
2314
2315 return 0;
2316}
2317
2318/*
2319 * Only some devices on omap4 and later have SYSCONFIG reset done
2320 * bit. We can detect this if there is no SYSSTATUS at all, or the
2321 * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
2322 * have multiple bits for the child devices like OHCI and EHCI.
2323 * Depends on SYSC being parsed first.
2324 */
2325static int sysc_init_syss_mask(struct sysc *ddata)
2326{
2327 struct device_node *np = ddata->dev->of_node;
2328 int error;
2329 u32 val;
2330
2331 error = of_property_read_u32(np, "ti,syss-mask", &val);
2332 if (error) {
2333 if ((ddata->cap->type == TI_SYSC_OMAP4 ||
2334 ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
2335 (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2336 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2337
2338 return 0;
2339 }
2340
2341 if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2342 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2343
2344 ddata->cfg.syss_mask = val;
2345
2346 return 0;
2347}
2348
2349/*
2350 * Many child device drivers need to have fck and opt clocks available
2351 * to get the clock rate for device internal configuration etc.
2352 */
2353static int sysc_child_add_named_clock(struct sysc *ddata,
2354 struct device *child,
2355 const char *name)
2356{
2357 struct clk *clk;
2358 struct clk_lookup *l;
2359 int error = 0;
2360
2361 if (!name)
2362 return 0;
2363
2364 clk = clk_get(child, name);
2365 if (!IS_ERR(clk)) {
2366 error = -EEXIST;
2367 goto put_clk;
2368 }
2369
2370 clk = clk_get(ddata->dev, name);
2371 if (IS_ERR(clk))
2372 return -ENODEV;
2373
2374 l = clkdev_create(clk, name, dev_name(child));
2375 if (!l)
2376 error = -ENOMEM;
2377put_clk:
2378 clk_put(clk);
2379
2380 return error;
2381}
2382
2383static int sysc_child_add_clocks(struct sysc *ddata,
2384 struct device *child)
2385{
2386 int i, error;
2387
2388 for (i = 0; i < ddata->nr_clocks; i++) {
2389 error = sysc_child_add_named_clock(ddata,
2390 child,
2391 ddata->clock_roles[i]);
2392 if (error && error != -EEXIST) {
2393 dev_err(ddata->dev, "could not add child clock %s: %i\n",
2394 ddata->clock_roles[i], error);
2395
2396 return error;
2397 }
2398 }
2399
2400 return 0;
2401}
2402
2403static struct device_type sysc_device_type = {
2404};
2405
2406static struct sysc *sysc_child_to_parent(struct device *dev)
2407{
2408 struct device *parent = dev->parent;
2409
2410 if (!parent || parent->type != &sysc_device_type)
2411 return NULL;
2412
2413 return dev_get_drvdata(parent);
2414}
2415
2416static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
2417{
2418 struct sysc *ddata;
2419 int error;
2420
2421 ddata = sysc_child_to_parent(dev);
2422
2423 error = pm_generic_runtime_suspend(dev);
2424 if (error)
2425 return error;
2426
2427 if (!ddata->enabled)
2428 return 0;
2429
2430 return sysc_runtime_suspend(ddata->dev);
2431}
2432
2433static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
2434{
2435 struct sysc *ddata;
2436 int error;
2437
2438 ddata = sysc_child_to_parent(dev);
2439
2440 if (!ddata->enabled) {
2441 error = sysc_runtime_resume(ddata->dev);
2442 if (error < 0)
2443 dev_err(ddata->dev,
2444 "%s error: %i\n", __func__, error);
2445 }
2446
2447 return pm_generic_runtime_resume(dev);
2448}
2449
2450#ifdef CONFIG_PM_SLEEP
2451static int sysc_child_suspend_noirq(struct device *dev)
2452{
2453 struct sysc *ddata;
2454 int error;
2455
2456 ddata = sysc_child_to_parent(dev);
2457
2458 dev_dbg(ddata->dev, "%s %s\n", __func__,
2459 ddata->name ? ddata->name : "");
2460
2461 error = pm_generic_suspend_noirq(dev);
2462 if (error) {
2463 dev_err(dev, "%s error at %i: %i\n",
2464 __func__, __LINE__, error);
2465
2466 return error;
2467 }
2468
2469 if (!pm_runtime_status_suspended(dev)) {
2470 error = pm_generic_runtime_suspend(dev);
2471 if (error) {
2472 dev_dbg(dev, "%s busy at %i: %i\n",
2473 __func__, __LINE__, error);
2474
2475 return 0;
2476 }
2477
2478 error = sysc_runtime_suspend(ddata->dev);
2479 if (error) {
2480 dev_err(dev, "%s error at %i: %i\n",
2481 __func__, __LINE__, error);
2482
2483 return error;
2484 }
2485
2486 ddata->child_needs_resume = true;
2487 }
2488
2489 return 0;
2490}
2491
2492static int sysc_child_resume_noirq(struct device *dev)
2493{
2494 struct sysc *ddata;
2495 int error;
2496
2497 ddata = sysc_child_to_parent(dev);
2498
2499 dev_dbg(ddata->dev, "%s %s\n", __func__,
2500 ddata->name ? ddata->name : "");
2501
2502 if (ddata->child_needs_resume) {
2503 ddata->child_needs_resume = false;
2504
2505 error = sysc_runtime_resume(ddata->dev);
2506 if (error)
2507 dev_err(ddata->dev,
2508 "%s runtime resume error: %i\n",
2509 __func__, error);
2510
2511 error = pm_generic_runtime_resume(dev);
2512 if (error)
2513 dev_err(ddata->dev,
2514 "%s generic runtime resume: %i\n",
2515 __func__, error);
2516 }
2517
2518 return pm_generic_resume_noirq(dev);
2519}
2520#endif
2521
2522static struct dev_pm_domain sysc_child_pm_domain = {
2523 .ops = {
2524 SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
2525 sysc_child_runtime_resume,
2526 NULL)
2527 USE_PLATFORM_PM_SLEEP_OPS
2528 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
2529 sysc_child_resume_noirq)
2530 }
2531};
2532
2533/* Caller needs to take list_lock if ever used outside of cpu_pm */
2534static void sysc_reinit_modules(struct sysc_soc_info *soc)
2535{
2536 struct sysc_module *module;
2537 struct sysc *ddata;
2538
2539 list_for_each_entry(module, &sysc_soc->restored_modules, node) {
2540 ddata = module->ddata;
2541 sysc_reinit_module(ddata, ddata->enabled);
2542 }
2543}
2544
2545/**
2546 * sysc_context_notifier - optionally reset and restore module after idle
2547 * @nb: notifier block
2548 * @cmd: unused
2549 * @v: unused
2550 *
2551 * Some interconnect target modules need to be restored, or reset and restored
2552 * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
2553 * OTG and GPMC target modules even if the modules are unused.
2554 */
2555static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
2556 void *v)
2557{
2558 struct sysc_soc_info *soc;
2559
2560 soc = container_of(nb, struct sysc_soc_info, nb);
2561
2562 switch (cmd) {
2563 case CPU_CLUSTER_PM_ENTER:
2564 break;
2565 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
2566 break;
2567 case CPU_CLUSTER_PM_EXIT:
2568 sysc_reinit_modules(soc);
2569 break;
2570 }
2571
2572 return NOTIFY_OK;
2573}
2574
2575/**
2576 * sysc_add_restored - optionally add reset and restore quirk hanlling
2577 * @ddata: device data
2578 */
2579static void sysc_add_restored(struct sysc *ddata)
2580{
2581 struct sysc_module *restored_module;
2582
2583 restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
2584 if (!restored_module)
2585 return;
2586
2587 restored_module->ddata = ddata;
2588
2589 mutex_lock(&sysc_soc->list_lock);
2590
2591 list_add(&restored_module->node, &sysc_soc->restored_modules);
2592
2593 if (sysc_soc->nb.notifier_call)
2594 goto out_unlock;
2595
2596 sysc_soc->nb.notifier_call = sysc_context_notifier;
2597 cpu_pm_register_notifier(&sysc_soc->nb);
2598
2599out_unlock:
2600 mutex_unlock(&sysc_soc->list_lock);
2601}
2602
2603/**
2604 * sysc_legacy_idle_quirk - handle children in omap_device compatible way
2605 * @ddata: device driver data
2606 * @child: child device driver
2607 *
2608 * Allow idle for child devices as done with _od_runtime_suspend().
2609 * Otherwise many child devices will not idle because of the permanent
2610 * parent usecount set in pm_runtime_irq_safe().
2611 *
2612 * Note that the long term solution is to just modify the child device
2613 * drivers to not set pm_runtime_irq_safe() and then this can be just
2614 * dropped.
2615 */
2616static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
2617{
2618 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
2619 dev_pm_domain_set(child, &sysc_child_pm_domain);
2620}
2621
2622static int sysc_notifier_call(struct notifier_block *nb,
2623 unsigned long event, void *device)
2624{
2625 struct device *dev = device;
2626 struct sysc *ddata;
2627 int error;
2628
2629 ddata = sysc_child_to_parent(dev);
2630 if (!ddata)
2631 return NOTIFY_DONE;
2632
2633 switch (event) {
2634 case BUS_NOTIFY_ADD_DEVICE:
2635 error = sysc_child_add_clocks(ddata, dev);
2636 if (error)
2637 return error;
2638 sysc_legacy_idle_quirk(ddata, dev);
2639 break;
2640 default:
2641 break;
2642 }
2643
2644 return NOTIFY_DONE;
2645}
2646
2647static struct notifier_block sysc_nb = {
2648 .notifier_call = sysc_notifier_call,
2649};
2650
2651/* Device tree configured quirks */
2652struct sysc_dts_quirk {
2653 const char *name;
2654 u32 mask;
2655};
2656
2657static const struct sysc_dts_quirk sysc_dts_quirks[] = {
2658 { .name = "ti,no-idle-on-init",
2659 .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
2660 { .name = "ti,no-reset-on-init",
2661 .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
2662 { .name = "ti,no-idle",
2663 .mask = SYSC_QUIRK_NO_IDLE, },
2664};
2665
2666static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
2667 bool is_child)
2668{
2669 const struct property *prop;
2670 int i, len;
2671
2672 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
2673 const char *name = sysc_dts_quirks[i].name;
2674
2675 prop = of_get_property(np, name, &len);
2676 if (!prop)
2677 continue;
2678
2679 ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
2680 if (is_child) {
2681 dev_warn(ddata->dev,
2682 "dts flag should be at module level for %s\n",
2683 name);
2684 }
2685 }
2686}
2687
2688static int sysc_init_dts_quirks(struct sysc *ddata)
2689{
2690 struct device_node *np = ddata->dev->of_node;
2691 int error;
2692 u32 val;
2693
2694 ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
2695
2696 sysc_parse_dts_quirks(ddata, np, false);
2697 error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
2698 if (!error) {
2699 if (val > 255) {
2700 dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
2701 val);
2702 }
2703
2704 ddata->cfg.srst_udelay = (u8)val;
2705 }
2706
2707 return 0;
2708}
2709
2710static void sysc_unprepare(struct sysc *ddata)
2711{
2712 int i;
2713
2714 if (!ddata->clocks)
2715 return;
2716
2717 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
2718 if (!IS_ERR_OR_NULL(ddata->clocks[i]))
2719 clk_unprepare(ddata->clocks[i]);
2720 }
2721}
2722
2723/*
2724 * Common sysc register bits found on omap2, also known as type1
2725 */
2726static const struct sysc_regbits sysc_regbits_omap2 = {
2727 .dmadisable_shift = -ENODEV,
2728 .midle_shift = 12,
2729 .sidle_shift = 3,
2730 .clkact_shift = 8,
2731 .emufree_shift = 5,
2732 .enwkup_shift = 2,
2733 .srst_shift = 1,
2734 .autoidle_shift = 0,
2735};
2736
2737static const struct sysc_capabilities sysc_omap2 = {
2738 .type = TI_SYSC_OMAP2,
2739 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2740 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2741 SYSC_OMAP2_AUTOIDLE,
2742 .regbits = &sysc_regbits_omap2,
2743};
2744
2745/* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
2746static const struct sysc_capabilities sysc_omap2_timer = {
2747 .type = TI_SYSC_OMAP2_TIMER,
2748 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2749 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2750 SYSC_OMAP2_AUTOIDLE,
2751 .regbits = &sysc_regbits_omap2,
2752 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
2753};
2754
2755/*
2756 * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
2757 * with different sidle position
2758 */
2759static const struct sysc_regbits sysc_regbits_omap3_sham = {
2760 .dmadisable_shift = -ENODEV,
2761 .midle_shift = -ENODEV,
2762 .sidle_shift = 4,
2763 .clkact_shift = -ENODEV,
2764 .enwkup_shift = -ENODEV,
2765 .srst_shift = 1,
2766 .autoidle_shift = 0,
2767 .emufree_shift = -ENODEV,
2768};
2769
2770static const struct sysc_capabilities sysc_omap3_sham = {
2771 .type = TI_SYSC_OMAP3_SHAM,
2772 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2773 .regbits = &sysc_regbits_omap3_sham,
2774};
2775
2776/*
2777 * AES register bits found on omap3 and later, a variant of
2778 * sysc_regbits_omap2 with different sidle position
2779 */
2780static const struct sysc_regbits sysc_regbits_omap3_aes = {
2781 .dmadisable_shift = -ENODEV,
2782 .midle_shift = -ENODEV,
2783 .sidle_shift = 6,
2784 .clkact_shift = -ENODEV,
2785 .enwkup_shift = -ENODEV,
2786 .srst_shift = 1,
2787 .autoidle_shift = 0,
2788 .emufree_shift = -ENODEV,
2789};
2790
2791static const struct sysc_capabilities sysc_omap3_aes = {
2792 .type = TI_SYSC_OMAP3_AES,
2793 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2794 .regbits = &sysc_regbits_omap3_aes,
2795};
2796
2797/*
2798 * Common sysc register bits found on omap4, also known as type2
2799 */
2800static const struct sysc_regbits sysc_regbits_omap4 = {
2801 .dmadisable_shift = 16,
2802 .midle_shift = 4,
2803 .sidle_shift = 2,
2804 .clkact_shift = -ENODEV,
2805 .enwkup_shift = -ENODEV,
2806 .emufree_shift = 1,
2807 .srst_shift = 0,
2808 .autoidle_shift = -ENODEV,
2809};
2810
2811static const struct sysc_capabilities sysc_omap4 = {
2812 .type = TI_SYSC_OMAP4,
2813 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2814 SYSC_OMAP4_SOFTRESET,
2815 .regbits = &sysc_regbits_omap4,
2816};
2817
2818static const struct sysc_capabilities sysc_omap4_timer = {
2819 .type = TI_SYSC_OMAP4_TIMER,
2820 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2821 SYSC_OMAP4_SOFTRESET,
2822 .regbits = &sysc_regbits_omap4,
2823};
2824
2825/*
2826 * Common sysc register bits found on omap4, also known as type3
2827 */
2828static const struct sysc_regbits sysc_regbits_omap4_simple = {
2829 .dmadisable_shift = -ENODEV,
2830 .midle_shift = 2,
2831 .sidle_shift = 0,
2832 .clkact_shift = -ENODEV,
2833 .enwkup_shift = -ENODEV,
2834 .srst_shift = -ENODEV,
2835 .emufree_shift = -ENODEV,
2836 .autoidle_shift = -ENODEV,
2837};
2838
2839static const struct sysc_capabilities sysc_omap4_simple = {
2840 .type = TI_SYSC_OMAP4_SIMPLE,
2841 .regbits = &sysc_regbits_omap4_simple,
2842};
2843
2844/*
2845 * SmartReflex sysc found on omap34xx
2846 */
2847static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
2848 .dmadisable_shift = -ENODEV,
2849 .midle_shift = -ENODEV,
2850 .sidle_shift = -ENODEV,
2851 .clkact_shift = 20,
2852 .enwkup_shift = -ENODEV,
2853 .srst_shift = -ENODEV,
2854 .emufree_shift = -ENODEV,
2855 .autoidle_shift = -ENODEV,
2856};
2857
2858static const struct sysc_capabilities sysc_34xx_sr = {
2859 .type = TI_SYSC_OMAP34XX_SR,
2860 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
2861 .regbits = &sysc_regbits_omap34xx_sr,
2862 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
2863 SYSC_QUIRK_LEGACY_IDLE,
2864};
2865
2866/*
2867 * SmartReflex sysc found on omap36xx and later
2868 */
2869static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
2870 .dmadisable_shift = -ENODEV,
2871 .midle_shift = -ENODEV,
2872 .sidle_shift = 24,
2873 .clkact_shift = -ENODEV,
2874 .enwkup_shift = 26,
2875 .srst_shift = -ENODEV,
2876 .emufree_shift = -ENODEV,
2877 .autoidle_shift = -ENODEV,
2878};
2879
2880static const struct sysc_capabilities sysc_36xx_sr = {
2881 .type = TI_SYSC_OMAP36XX_SR,
2882 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
2883 .regbits = &sysc_regbits_omap36xx_sr,
2884 .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
2885};
2886
2887static const struct sysc_capabilities sysc_omap4_sr = {
2888 .type = TI_SYSC_OMAP4_SR,
2889 .regbits = &sysc_regbits_omap36xx_sr,
2890 .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
2891};
2892
2893/*
2894 * McASP register bits found on omap4 and later
2895 */
2896static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
2897 .dmadisable_shift = -ENODEV,
2898 .midle_shift = -ENODEV,
2899 .sidle_shift = 0,
2900 .clkact_shift = -ENODEV,
2901 .enwkup_shift = -ENODEV,
2902 .srst_shift = -ENODEV,
2903 .emufree_shift = -ENODEV,
2904 .autoidle_shift = -ENODEV,
2905};
2906
2907static const struct sysc_capabilities sysc_omap4_mcasp = {
2908 .type = TI_SYSC_OMAP4_MCASP,
2909 .regbits = &sysc_regbits_omap4_mcasp,
2910 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2911};
2912
2913/*
2914 * McASP found on dra7 and later
2915 */
2916static const struct sysc_capabilities sysc_dra7_mcasp = {
2917 .type = TI_SYSC_OMAP4_SIMPLE,
2918 .regbits = &sysc_regbits_omap4_simple,
2919 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2920};
2921
2922/*
2923 * FS USB host found on omap4 and later
2924 */
2925static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
2926 .dmadisable_shift = -ENODEV,
2927 .midle_shift = -ENODEV,
2928 .sidle_shift = 24,
2929 .clkact_shift = -ENODEV,
2930 .enwkup_shift = 26,
2931 .srst_shift = -ENODEV,
2932 .emufree_shift = -ENODEV,
2933 .autoidle_shift = -ENODEV,
2934};
2935
2936static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
2937 .type = TI_SYSC_OMAP4_USB_HOST_FS,
2938 .sysc_mask = SYSC_OMAP2_ENAWAKEUP,
2939 .regbits = &sysc_regbits_omap4_usb_host_fs,
2940};
2941
2942static const struct sysc_regbits sysc_regbits_dra7_mcan = {
2943 .dmadisable_shift = -ENODEV,
2944 .midle_shift = -ENODEV,
2945 .sidle_shift = -ENODEV,
2946 .clkact_shift = -ENODEV,
2947 .enwkup_shift = 4,
2948 .srst_shift = 0,
2949 .emufree_shift = -ENODEV,
2950 .autoidle_shift = -ENODEV,
2951};
2952
2953static const struct sysc_capabilities sysc_dra7_mcan = {
2954 .type = TI_SYSC_DRA7_MCAN,
2955 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
2956 .regbits = &sysc_regbits_dra7_mcan,
2957 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
2958};
2959
2960/*
2961 * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
2962 */
2963static const struct sysc_capabilities sysc_pruss = {
2964 .type = TI_SYSC_PRUSS,
2965 .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
2966 .regbits = &sysc_regbits_omap4_simple,
2967 .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
2968};
2969
2970static int sysc_init_pdata(struct sysc *ddata)
2971{
2972 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2973 struct ti_sysc_module_data *mdata;
2974
2975 if (!pdata)
2976 return 0;
2977
2978 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
2979 if (!mdata)
2980 return -ENOMEM;
2981
2982 if (ddata->legacy_mode) {
2983 mdata->name = ddata->legacy_mode;
2984 mdata->module_pa = ddata->module_pa;
2985 mdata->module_size = ddata->module_size;
2986 mdata->offsets = ddata->offsets;
2987 mdata->nr_offsets = SYSC_MAX_REGS;
2988 mdata->cap = ddata->cap;
2989 mdata->cfg = &ddata->cfg;
2990 }
2991
2992 ddata->mdata = mdata;
2993
2994 return 0;
2995}
2996
2997static int sysc_init_match(struct sysc *ddata)
2998{
2999 const struct sysc_capabilities *cap;
3000
3001 cap = of_device_get_match_data(ddata->dev);
3002 if (!cap)
3003 return -EINVAL;
3004
3005 ddata->cap = cap;
3006 if (ddata->cap)
3007 ddata->cfg.quirks |= ddata->cap->mod_quirks;
3008
3009 return 0;
3010}
3011
3012static void ti_sysc_idle(struct work_struct *work)
3013{
3014 struct sysc *ddata;
3015
3016 ddata = container_of(work, struct sysc, idle_work.work);
3017
3018 /*
3019 * One time decrement of clock usage counts if left on from init.
3020 * Note that we disable opt clocks unconditionally in this case
3021 * as they are enabled unconditionally during init without
3022 * considering sysc_opt_clks_needed() at that point.
3023 */
3024 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
3025 SYSC_QUIRK_NO_IDLE_ON_INIT)) {
3026 sysc_disable_main_clocks(ddata);
3027 sysc_disable_opt_clocks(ddata);
3028 sysc_clkdm_allow_idle(ddata);
3029 }
3030
3031 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
3032 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
3033 return;
3034
3035 /*
3036 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
3037 * and SYSC_QUIRK_NO_RESET_ON_INIT
3038 */
3039 if (pm_runtime_active(ddata->dev))
3040 pm_runtime_put_sync(ddata->dev);
3041}
3042
3043/*
3044 * SoC model and features detection. Only needed for SoCs that need
3045 * special handling for quirks, no need to list others.
3046 */
3047static const struct soc_device_attribute sysc_soc_match[] = {
3048 SOC_FLAG("OMAP242*", SOC_2420),
3049 SOC_FLAG("OMAP243*", SOC_2430),
3050 SOC_FLAG("AM35*", SOC_AM35),
3051 SOC_FLAG("OMAP3[45]*", SOC_3430),
3052 SOC_FLAG("OMAP3[67]*", SOC_3630),
3053 SOC_FLAG("OMAP443*", SOC_4430),
3054 SOC_FLAG("OMAP446*", SOC_4460),
3055 SOC_FLAG("OMAP447*", SOC_4470),
3056 SOC_FLAG("OMAP54*", SOC_5430),
3057 SOC_FLAG("AM433", SOC_AM3),
3058 SOC_FLAG("AM43*", SOC_AM4),
3059 SOC_FLAG("DRA7*", SOC_DRA7),
3060
3061 { /* sentinel */ }
3062};
3063
3064/*
3065 * List of SoCs variants with disabled features. By default we assume all
3066 * devices in the device tree are available so no need to list those SoCs.
3067 */
3068static const struct soc_device_attribute sysc_soc_feat_match[] = {
3069 /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
3070 SOC_FLAG("AM3505", DIS_SGX),
3071 SOC_FLAG("OMAP3525", DIS_SGX),
3072 SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
3073 SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
3074
3075 /* OMAP3630/DM3730 variants with some accelerators disabled */
3076 SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
3077 SOC_FLAG("DM3725", DIS_SGX),
3078 SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
3079 SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
3080 SOC_FLAG("OMAP3621", DIS_ISP),
3081
3082 { /* sentinel */ }
3083};
3084
3085static int sysc_add_disabled(unsigned long base)
3086{
3087 struct sysc_address *disabled_module;
3088
3089 disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
3090 if (!disabled_module)
3091 return -ENOMEM;
3092
3093 disabled_module->base = base;
3094
3095 mutex_lock(&sysc_soc->list_lock);
3096 list_add(&disabled_module->node, &sysc_soc->disabled_modules);
3097 mutex_unlock(&sysc_soc->list_lock);
3098
3099 return 0;
3100}
3101
3102/*
3103 * One time init to detect the booted SoC, disable unavailable features
3104 * and initialize list for optional cpu_pm notifier.
3105 *
3106 * Note that we initialize static data shared across all ti-sysc instances
3107 * so ddata is only used for SoC type. This can be called from module_init
3108 * once we no longer need to rely on platform data.
3109 */
3110static int sysc_init_static_data(struct sysc *ddata)
3111{
3112 const struct soc_device_attribute *match;
3113 struct ti_sysc_platform_data *pdata;
3114 unsigned long features = 0;
3115 struct device_node *np;
3116
3117 if (sysc_soc)
3118 return 0;
3119
3120 sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
3121 if (!sysc_soc)
3122 return -ENOMEM;
3123
3124 mutex_init(&sysc_soc->list_lock);
3125 INIT_LIST_HEAD(&sysc_soc->disabled_modules);
3126 INIT_LIST_HEAD(&sysc_soc->restored_modules);
3127 sysc_soc->general_purpose = true;
3128
3129 pdata = dev_get_platdata(ddata->dev);
3130 if (pdata && pdata->soc_type_gp)
3131 sysc_soc->general_purpose = pdata->soc_type_gp();
3132
3133 match = soc_device_match(sysc_soc_match);
3134 if (match && match->data)
3135 sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
3136
3137 /*
3138 * Check and warn about possible old incomplete dtb. We now want to see
3139 * simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
3140 */
3141 switch (sysc_soc->soc) {
3142 case SOC_AM3:
3143 case SOC_AM4:
3144 case SOC_4430 ... SOC_4470:
3145 case SOC_5430:
3146 case SOC_DRA7:
3147 np = of_find_node_by_path("/ocp");
3148 WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
3149 "ti-sysc: Incomplete old dtb, please update\n");
3150 break;
3151 default:
3152 break;
3153 }
3154
3155 /* Ignore devices that are not available on HS and EMU SoCs */
3156 if (!sysc_soc->general_purpose) {
3157 switch (sysc_soc->soc) {
3158 case SOC_3430 ... SOC_3630:
3159 sysc_add_disabled(0x48304000); /* timer12 */
3160 break;
3161 case SOC_AM3:
3162 sysc_add_disabled(0x48310000); /* rng */
3163 break;
3164 default:
3165 break;
3166 }
3167 }
3168
3169 match = soc_device_match(sysc_soc_feat_match);
3170 if (!match)
3171 return 0;
3172
3173 if (match->data)
3174 features = (unsigned long)match->data;
3175
3176 /*
3177 * Add disabled devices to the list based on the module base.
3178 * Note that this must be done before we attempt to access the
3179 * device and have module revision checks working.
3180 */
3181 if (features & DIS_ISP)
3182 sysc_add_disabled(0x480bd400);
3183 if (features & DIS_IVA)
3184 sysc_add_disabled(0x5d000000);
3185 if (features & DIS_SGX)
3186 sysc_add_disabled(0x50000000);
3187
3188 return 0;
3189}
3190
3191static void sysc_cleanup_static_data(void)
3192{
3193 struct sysc_module *restored_module;
3194 struct sysc_address *disabled_module;
3195 struct list_head *pos, *tmp;
3196
3197 if (!sysc_soc)
3198 return;
3199
3200 if (sysc_soc->nb.notifier_call)
3201 cpu_pm_unregister_notifier(&sysc_soc->nb);
3202
3203 mutex_lock(&sysc_soc->list_lock);
3204 list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
3205 restored_module = list_entry(pos, struct sysc_module, node);
3206 list_del(pos);
3207 kfree(restored_module);
3208 }
3209 list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
3210 disabled_module = list_entry(pos, struct sysc_address, node);
3211 list_del(pos);
3212 kfree(disabled_module);
3213 }
3214 mutex_unlock(&sysc_soc->list_lock);
3215}
3216
3217static int sysc_check_disabled_devices(struct sysc *ddata)
3218{
3219 struct sysc_address *disabled_module;
3220 int error = 0;
3221
3222 mutex_lock(&sysc_soc->list_lock);
3223 list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
3224 if (ddata->module_pa == disabled_module->base) {
3225 dev_dbg(ddata->dev, "module disabled for this SoC\n");
3226 error = -ENODEV;
3227 break;
3228 }
3229 }
3230 mutex_unlock(&sysc_soc->list_lock);
3231
3232 return error;
3233}
3234
3235/*
3236 * Ignore timers tagged with no-reset and no-idle. These are likely in use,
3237 * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
3238 * are needed, we could also look at the timer register configuration.
3239 */
3240static int sysc_check_active_timer(struct sysc *ddata)
3241{
3242 int error;
3243
3244 if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
3245 ddata->cap->type != TI_SYSC_OMAP4_TIMER)
3246 return 0;
3247
3248 /*
3249 * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
3250 * Revision C and later are fixed with commit 23885389dbbb ("ARM:
3251 * dts: Fix timer regression for beagleboard revision c"). This all
3252 * can be dropped if we stop supporting old beagleboard revisions
3253 * A to B4 at some point.
3254 */
3255 if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
3256 error = -ENXIO;
3257 else
3258 error = -EBUSY;
3259
3260 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
3261 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
3262 return error;
3263
3264 return 0;
3265}
3266
3267static const struct of_device_id sysc_match_table[] = {
3268 { .compatible = "simple-bus", },
3269 { /* sentinel */ },
3270};
3271
3272static int sysc_probe(struct platform_device *pdev)
3273{
3274 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
3275 struct sysc *ddata;
3276 int error;
3277
3278 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
3279 if (!ddata)
3280 return -ENOMEM;
3281
3282 ddata->offsets[SYSC_REVISION] = -ENODEV;
3283 ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
3284 ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
3285 ddata->dev = &pdev->dev;
3286 platform_set_drvdata(pdev, ddata);
3287
3288 error = sysc_init_static_data(ddata);
3289 if (error)
3290 return error;
3291
3292 error = sysc_init_match(ddata);
3293 if (error)
3294 return error;
3295
3296 error = sysc_init_dts_quirks(ddata);
3297 if (error)
3298 return error;
3299
3300 error = sysc_map_and_check_registers(ddata);
3301 if (error)
3302 return error;
3303
3304 error = sysc_init_sysc_mask(ddata);
3305 if (error)
3306 return error;
3307
3308 error = sysc_init_idlemodes(ddata);
3309 if (error)
3310 return error;
3311
3312 error = sysc_init_syss_mask(ddata);
3313 if (error)
3314 return error;
3315
3316 error = sysc_init_pdata(ddata);
3317 if (error)
3318 return error;
3319
3320 sysc_init_early_quirks(ddata);
3321
3322 error = sysc_check_disabled_devices(ddata);
3323 if (error)
3324 return error;
3325
3326 error = sysc_check_active_timer(ddata);
3327 if (error == -ENXIO)
3328 ddata->reserved = true;
3329 else if (error)
3330 return error;
3331
3332 error = sysc_get_clocks(ddata);
3333 if (error)
3334 return error;
3335
3336 error = sysc_init_resets(ddata);
3337 if (error)
3338 goto unprepare;
3339
3340 error = sysc_init_module(ddata);
3341 if (error)
3342 goto unprepare;
3343
3344 pm_runtime_enable(ddata->dev);
3345 error = pm_runtime_resume_and_get(ddata->dev);
3346 if (error < 0) {
3347 pm_runtime_disable(ddata->dev);
3348 goto unprepare;
3349 }
3350
3351 /* Balance use counts as PM runtime should have enabled these all */
3352 if (!(ddata->cfg.quirks &
3353 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
3354 sysc_disable_main_clocks(ddata);
3355 sysc_disable_opt_clocks(ddata);
3356 sysc_clkdm_allow_idle(ddata);
3357 }
3358
3359 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
3360 reset_control_assert(ddata->rsts);
3361
3362 sysc_show_registers(ddata);
3363
3364 ddata->dev->type = &sysc_device_type;
3365
3366 if (!ddata->reserved) {
3367 error = of_platform_populate(ddata->dev->of_node,
3368 sysc_match_table,
3369 pdata ? pdata->auxdata : NULL,
3370 ddata->dev);
3371 if (error)
3372 goto err;
3373 }
3374
3375 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
3376
3377 /* At least earlycon won't survive without deferred idle */
3378 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
3379 SYSC_QUIRK_NO_IDLE_ON_INIT |
3380 SYSC_QUIRK_NO_RESET_ON_INIT)) {
3381 schedule_delayed_work(&ddata->idle_work, 3000);
3382 } else {
3383 pm_runtime_put(&pdev->dev);
3384 }
3385
3386 if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
3387 sysc_add_restored(ddata);
3388
3389 return 0;
3390
3391err:
3392 pm_runtime_put_sync(&pdev->dev);
3393 pm_runtime_disable(&pdev->dev);
3394unprepare:
3395 sysc_unprepare(ddata);
3396
3397 return error;
3398}
3399
3400static void sysc_remove(struct platform_device *pdev)
3401{
3402 struct sysc *ddata = platform_get_drvdata(pdev);
3403 int error;
3404
3405 /* Device can still be enabled, see deferred idle quirk in probe */
3406 if (cancel_delayed_work_sync(&ddata->idle_work))
3407 ti_sysc_idle(&ddata->idle_work.work);
3408
3409 error = pm_runtime_resume_and_get(ddata->dev);
3410 if (error < 0) {
3411 pm_runtime_disable(ddata->dev);
3412 goto unprepare;
3413 }
3414
3415 of_platform_depopulate(&pdev->dev);
3416
3417 pm_runtime_put_sync(&pdev->dev);
3418 pm_runtime_disable(&pdev->dev);
3419
3420 if (!reset_control_status(ddata->rsts))
3421 reset_control_assert(ddata->rsts);
3422
3423unprepare:
3424 sysc_unprepare(ddata);
3425}
3426
3427static const struct of_device_id sysc_match[] = {
3428 { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
3429 { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
3430 { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
3431 { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
3432 { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
3433 { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
3434 { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
3435 { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
3436 { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
3437 { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
3438 { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
3439 { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
3440 { .compatible = "ti,sysc-usb-host-fs",
3441 .data = &sysc_omap4_usb_host_fs, },
3442 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
3443 { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
3444 { },
3445};
3446MODULE_DEVICE_TABLE(of, sysc_match);
3447
3448static struct platform_driver sysc_driver = {
3449 .probe = sysc_probe,
3450 .remove_new = sysc_remove,
3451 .driver = {
3452 .name = "ti-sysc",
3453 .of_match_table = sysc_match,
3454 .pm = &sysc_pm_ops,
3455 },
3456};
3457
3458static int __init sysc_init(void)
3459{
3460 bus_register_notifier(&platform_bus_type, &sysc_nb);
3461
3462 return platform_driver_register(&sysc_driver);
3463}
3464module_init(sysc_init);
3465
3466static void __exit sysc_exit(void)
3467{
3468 bus_unregister_notifier(&platform_bus_type, &sysc_nb);
3469 platform_driver_unregister(&sysc_driver);
3470 sysc_cleanup_static_data();
3471}
3472module_exit(sysc_exit);
3473
3474MODULE_DESCRIPTION("TI sysc interconnect target driver");
3475MODULE_LICENSE("GPL v2");