Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OMAP L3 Interconnect error handling driver
4 *
5 * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
7 * Sricharan <r.sricharan@ti.com>
8 */
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/of_device.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18
19#include "omap_l3_noc.h"
20
21/**
22 * l3_handle_target() - Handle Target specific parse and reporting
23 * @l3: pointer to l3 struct
24 * @base: base address of clkdm
25 * @flag_mux: flagmux corresponding to the event
26 * @err_src: error source index of the slave (target)
27 *
28 * This does the second part of the error interrupt handling:
29 * 3) Parse in the slave information
30 * 4) Print the logged information.
31 * 5) Add dump stack to provide kernel trace.
32 * 6) Clear the source if known.
33 *
34 * This handles two types of errors:
35 * 1) Custom errors in L3 :
36 * Target like DMM/FW/EMIF generates SRESP=ERR error
37 * 2) Standard L3 error:
38 * - Unsupported CMD.
39 * L3 tries to access target while it is idle
40 * - OCP disconnect.
41 * - Address hole error:
42 * If DSS/ISS/FDIF/USBHOSTFS access a target where they
43 * do not have connectivity, the error is logged in
44 * their default target which is DMM2.
45 *
46 * On High Secure devices, firewall errors are possible and those
47 * can be trapped as well. But the trapping is implemented as part
48 * secure software and hence need not be implemented here.
49 */
50static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
51 struct l3_flagmux_data *flag_mux, int err_src)
52{
53 int k;
54 u32 std_err_main, clear, masterid;
55 u8 op_code, m_req_info;
56 void __iomem *l3_targ_base;
57 void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
58 void __iomem *l3_targ_hdr, *l3_targ_info;
59 struct l3_target_data *l3_targ_inst;
60 struct l3_masters_data *master;
61 char *target_name, *master_name = "UN IDENTIFIED";
62 char *err_description;
63 char err_string[30] = { 0 };
64 char info_string[60] = { 0 };
65
66 /* We DONOT expect err_src to go out of bounds */
67 BUG_ON(err_src > MAX_CLKDM_TARGETS);
68
69 if (err_src < flag_mux->num_targ_data) {
70 l3_targ_inst = &flag_mux->l3_targ[err_src];
71 target_name = l3_targ_inst->name;
72 l3_targ_base = base + l3_targ_inst->offset;
73 } else {
74 target_name = L3_TARGET_NOT_SUPPORTED;
75 }
76
77 if (target_name == L3_TARGET_NOT_SUPPORTED)
78 return -ENODEV;
79
80 /* Read the stderrlog_main_source from clk domain */
81 l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
82 l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
83
84 std_err_main = readl_relaxed(l3_targ_stderr);
85
86 switch (std_err_main & CUSTOM_ERROR) {
87 case STANDARD_ERROR:
88 err_description = "Standard";
89 snprintf(err_string, sizeof(err_string),
90 ": At Address: 0x%08X ",
91 readl_relaxed(l3_targ_slvofslsb));
92
93 l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
94 l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
95 l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
96 break;
97
98 case CUSTOM_ERROR:
99 err_description = "Custom";
100
101 l3_targ_mstaddr = l3_targ_base +
102 L3_TARG_STDERRLOG_CINFO_MSTADDR;
103 l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
104 l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
105 break;
106
107 default:
108 /* Nothing to be handled here as of now */
109 return 0;
110 }
111
112 /* STDERRLOG_MSTADDR Stores the NTTP master address. */
113 masterid = (readl_relaxed(l3_targ_mstaddr) &
114 l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
115
116 for (k = 0, master = l3->l3_masters; k < l3->num_masters;
117 k++, master++) {
118 if (masterid == master->id) {
119 master_name = master->name;
120 break;
121 }
122 }
123
124 op_code = readl_relaxed(l3_targ_hdr) & 0x7;
125
126 m_req_info = readl_relaxed(l3_targ_info) & 0xF;
127 snprintf(info_string, sizeof(info_string),
128 ": %s in %s mode during %s access",
129 (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
130 (m_req_info & BIT(1)) ? "Supervisor" : "User",
131 (m_req_info & BIT(3)) ? "Debug" : "Functional");
132
133 WARN(true,
134 "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
135 dev_name(l3->dev),
136 err_description,
137 master_name, target_name,
138 l3_transaction_type[op_code],
139 err_string, info_string);
140
141 /* clear the std error log*/
142 clear = std_err_main | CLEAR_STDERR_LOG;
143 writel_relaxed(clear, l3_targ_stderr);
144
145 return 0;
146}
147
148/**
149 * l3_interrupt_handler() - interrupt handler for l3 events
150 * @irq: irq number
151 * @_l3: pointer to l3 structure
152 *
153 * Interrupt Handler for L3 error detection.
154 * 1) Identify the L3 clockdomain partition to which the error belongs to.
155 * 2) Identify the slave where the error information is logged
156 * ... handle the slave event..
157 * 7) if the slave is unknown, mask out the slave.
158 */
159static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
160{
161 struct omap_l3 *l3 = _l3;
162 int inttype, i, ret;
163 int err_src = 0;
164 u32 err_reg, mask_val;
165 void __iomem *base, *mask_reg;
166 struct l3_flagmux_data *flag_mux;
167
168 /* Get the Type of interrupt */
169 inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
170
171 for (i = 0; i < l3->num_modules; i++) {
172 /*
173 * Read the regerr register of the clock domain
174 * to determine the source
175 */
176 base = l3->l3_base[i];
177 flag_mux = l3->l3_flagmux[i];
178 err_reg = readl_relaxed(base + flag_mux->offset +
179 L3_FLAGMUX_REGERR0 + (inttype << 3));
180
181 err_reg &= ~(inttype ? flag_mux->mask_app_bits :
182 flag_mux->mask_dbg_bits);
183
184 /* Get the corresponding error and analyse */
185 if (err_reg) {
186 /* Identify the source from control status register */
187 err_src = __ffs(err_reg);
188
189 ret = l3_handle_target(l3, base, flag_mux, err_src);
190
191 /*
192 * Certain plaforms may have "undocumented" status
193 * pending on boot. So dont generate a severe warning
194 * here. Just mask it off to prevent the error from
195 * reoccuring and locking up the system.
196 */
197 if (ret) {
198 dev_err(l3->dev,
199 "L3 %s error: target %d mod:%d %s\n",
200 inttype ? "debug" : "application",
201 err_src, i, "(unclearable)");
202
203 mask_reg = base + flag_mux->offset +
204 L3_FLAGMUX_MASK0 + (inttype << 3);
205 mask_val = readl_relaxed(mask_reg);
206 mask_val &= ~(1 << err_src);
207 writel_relaxed(mask_val, mask_reg);
208
209 /* Mark these bits as to be ignored */
210 if (inttype)
211 flag_mux->mask_app_bits |= 1 << err_src;
212 else
213 flag_mux->mask_dbg_bits |= 1 << err_src;
214 }
215
216 /* Error found so break the for loop */
217 return IRQ_HANDLED;
218 }
219 }
220
221 dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
222 inttype ? "debug" : "application");
223
224 return IRQ_NONE;
225}
226
227static const struct of_device_id l3_noc_match[] = {
228 {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
229 {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
230 {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
231 {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
232 {},
233};
234MODULE_DEVICE_TABLE(of, l3_noc_match);
235
236static int omap_l3_probe(struct platform_device *pdev)
237{
238 const struct of_device_id *of_id;
239 static struct omap_l3 *l3;
240 int ret, i, res_idx;
241
242 of_id = of_match_device(l3_noc_match, &pdev->dev);
243 if (!of_id) {
244 dev_err(&pdev->dev, "OF data missing\n");
245 return -EINVAL;
246 }
247
248 l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
249 if (!l3)
250 return -ENOMEM;
251
252 memcpy(l3, of_id->data, sizeof(*l3));
253 l3->dev = &pdev->dev;
254 platform_set_drvdata(pdev, l3);
255
256 /* Get mem resources */
257 for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
258 struct resource *res;
259
260 if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
261 /* First entry cannot be submodule */
262 BUG_ON(i == 0);
263 l3->l3_base[i] = l3->l3_base[i - 1];
264 continue;
265 }
266 res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
267 l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
268 if (IS_ERR(l3->l3_base[i])) {
269 dev_err(l3->dev, "ioremap %d failed\n", i);
270 return PTR_ERR(l3->l3_base[i]);
271 }
272 res_idx++;
273 }
274
275 /*
276 * Setup interrupt Handlers
277 */
278 l3->debug_irq = platform_get_irq(pdev, 0);
279 ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
280 IRQF_NO_THREAD, "l3-dbg-irq", l3);
281 if (ret) {
282 dev_err(l3->dev, "request_irq failed for %d\n",
283 l3->debug_irq);
284 return ret;
285 }
286
287 l3->app_irq = platform_get_irq(pdev, 1);
288 ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
289 IRQF_NO_THREAD, "l3-app-irq", l3);
290 if (ret)
291 dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
292
293 return ret;
294}
295
296#ifdef CONFIG_PM_SLEEP
297
298/**
299 * l3_resume_noirq() - resume function for l3_noc
300 * @dev: pointer to l3_noc device structure
301 *
302 * We only have the resume handler only since we
303 * have already maintained the delta register
304 * configuration as part of configuring the system
305 */
306static int l3_resume_noirq(struct device *dev)
307{
308 struct omap_l3 *l3 = dev_get_drvdata(dev);
309 int i;
310 struct l3_flagmux_data *flag_mux;
311 void __iomem *base, *mask_regx = NULL;
312 u32 mask_val;
313
314 for (i = 0; i < l3->num_modules; i++) {
315 base = l3->l3_base[i];
316 flag_mux = l3->l3_flagmux[i];
317 if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
318 continue;
319
320 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
321 (L3_APPLICATION_ERROR << 3);
322 mask_val = readl_relaxed(mask_regx);
323 mask_val &= ~(flag_mux->mask_app_bits);
324
325 writel_relaxed(mask_val, mask_regx);
326 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
327 (L3_DEBUG_ERROR << 3);
328 mask_val = readl_relaxed(mask_regx);
329 mask_val &= ~(flag_mux->mask_dbg_bits);
330
331 writel_relaxed(mask_val, mask_regx);
332 }
333
334 /* Dummy read to force OCP barrier */
335 if (mask_regx)
336 (void)readl(mask_regx);
337
338 return 0;
339}
340
341static const struct dev_pm_ops l3_dev_pm_ops = {
342 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
343};
344
345#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
346#else
347#define L3_DEV_PM_OPS NULL
348#endif
349
350static struct platform_driver omap_l3_driver = {
351 .probe = omap_l3_probe,
352 .driver = {
353 .name = "omap_l3_noc",
354 .pm = L3_DEV_PM_OPS,
355 .of_match_table = of_match_ptr(l3_noc_match),
356 },
357};
358
359static int __init omap_l3_init(void)
360{
361 return platform_driver_register(&omap_l3_driver);
362}
363postcore_initcall_sync(omap_l3_init);
364
365static void __exit omap_l3_exit(void)
366{
367 platform_driver_unregister(&omap_l3_driver);
368}
369module_exit(omap_l3_exit);
370
371MODULE_AUTHOR("Santosh Shilimkar");
372MODULE_AUTHOR("Sricharan R");
373MODULE_DESCRIPTION("OMAP L3 Interconnect error handling driver");
374MODULE_LICENSE("GPL v2");
1/*
2 * OMAP L3 Interconnect error handling driver
3 *
4 * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Sricharan <r.sricharan@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of_device.h>
23#include <linux/of.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27#include "omap_l3_noc.h"
28
29/**
30 * l3_handle_target() - Handle Target specific parse and reporting
31 * @l3: pointer to l3 struct
32 * @base: base address of clkdm
33 * @flag_mux: flagmux corresponding to the event
34 * @err_src: error source index of the slave (target)
35 *
36 * This does the second part of the error interrupt handling:
37 * 3) Parse in the slave information
38 * 4) Print the logged information.
39 * 5) Add dump stack to provide kernel trace.
40 * 6) Clear the source if known.
41 *
42 * This handles two types of errors:
43 * 1) Custom errors in L3 :
44 * Target like DMM/FW/EMIF generates SRESP=ERR error
45 * 2) Standard L3 error:
46 * - Unsupported CMD.
47 * L3 tries to access target while it is idle
48 * - OCP disconnect.
49 * - Address hole error:
50 * If DSS/ISS/FDIF/USBHOSTFS access a target where they
51 * do not have connectivity, the error is logged in
52 * their default target which is DMM2.
53 *
54 * On High Secure devices, firewall errors are possible and those
55 * can be trapped as well. But the trapping is implemented as part
56 * secure software and hence need not be implemented here.
57 */
58static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
59 struct l3_flagmux_data *flag_mux, int err_src)
60{
61 int k;
62 u32 std_err_main, clear, masterid;
63 u8 op_code, m_req_info;
64 void __iomem *l3_targ_base;
65 void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
66 void __iomem *l3_targ_hdr, *l3_targ_info;
67 struct l3_target_data *l3_targ_inst;
68 struct l3_masters_data *master;
69 char *target_name, *master_name = "UN IDENTIFIED";
70 char *err_description;
71 char err_string[30] = { 0 };
72 char info_string[60] = { 0 };
73
74 /* We DONOT expect err_src to go out of bounds */
75 BUG_ON(err_src > MAX_CLKDM_TARGETS);
76
77 if (err_src < flag_mux->num_targ_data) {
78 l3_targ_inst = &flag_mux->l3_targ[err_src];
79 target_name = l3_targ_inst->name;
80 l3_targ_base = base + l3_targ_inst->offset;
81 } else {
82 target_name = L3_TARGET_NOT_SUPPORTED;
83 }
84
85 if (target_name == L3_TARGET_NOT_SUPPORTED)
86 return -ENODEV;
87
88 /* Read the stderrlog_main_source from clk domain */
89 l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
90 l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
91
92 std_err_main = readl_relaxed(l3_targ_stderr);
93
94 switch (std_err_main & CUSTOM_ERROR) {
95 case STANDARD_ERROR:
96 err_description = "Standard";
97 snprintf(err_string, sizeof(err_string),
98 ": At Address: 0x%08X ",
99 readl_relaxed(l3_targ_slvofslsb));
100
101 l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
102 l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
103 l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
104 break;
105
106 case CUSTOM_ERROR:
107 err_description = "Custom";
108
109 l3_targ_mstaddr = l3_targ_base +
110 L3_TARG_STDERRLOG_CINFO_MSTADDR;
111 l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
112 l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
113 break;
114
115 default:
116 /* Nothing to be handled here as of now */
117 return 0;
118 }
119
120 /* STDERRLOG_MSTADDR Stores the NTTP master address. */
121 masterid = (readl_relaxed(l3_targ_mstaddr) &
122 l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
123
124 for (k = 0, master = l3->l3_masters; k < l3->num_masters;
125 k++, master++) {
126 if (masterid == master->id) {
127 master_name = master->name;
128 break;
129 }
130 }
131
132 op_code = readl_relaxed(l3_targ_hdr) & 0x7;
133
134 m_req_info = readl_relaxed(l3_targ_info) & 0xF;
135 snprintf(info_string, sizeof(info_string),
136 ": %s in %s mode during %s access",
137 (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
138 (m_req_info & BIT(1)) ? "Supervisor" : "User",
139 (m_req_info & BIT(3)) ? "Debug" : "Functional");
140
141 WARN(true,
142 "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
143 dev_name(l3->dev),
144 err_description,
145 master_name, target_name,
146 l3_transaction_type[op_code],
147 err_string, info_string);
148
149 /* clear the std error log*/
150 clear = std_err_main | CLEAR_STDERR_LOG;
151 writel_relaxed(clear, l3_targ_stderr);
152
153 return 0;
154}
155
156/**
157 * l3_interrupt_handler() - interrupt handler for l3 events
158 * @irq: irq number
159 * @_l3: pointer to l3 structure
160 *
161 * Interrupt Handler for L3 error detection.
162 * 1) Identify the L3 clockdomain partition to which the error belongs to.
163 * 2) Identify the slave where the error information is logged
164 * ... handle the slave event..
165 * 7) if the slave is unknown, mask out the slave.
166 */
167static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
168{
169 struct omap_l3 *l3 = _l3;
170 int inttype, i, ret;
171 int err_src = 0;
172 u32 err_reg, mask_val;
173 void __iomem *base, *mask_reg;
174 struct l3_flagmux_data *flag_mux;
175
176 /* Get the Type of interrupt */
177 inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
178
179 for (i = 0; i < l3->num_modules; i++) {
180 /*
181 * Read the regerr register of the clock domain
182 * to determine the source
183 */
184 base = l3->l3_base[i];
185 flag_mux = l3->l3_flagmux[i];
186 err_reg = readl_relaxed(base + flag_mux->offset +
187 L3_FLAGMUX_REGERR0 + (inttype << 3));
188
189 err_reg &= ~(inttype ? flag_mux->mask_app_bits :
190 flag_mux->mask_dbg_bits);
191
192 /* Get the corresponding error and analyse */
193 if (err_reg) {
194 /* Identify the source from control status register */
195 err_src = __ffs(err_reg);
196
197 ret = l3_handle_target(l3, base, flag_mux, err_src);
198
199 /*
200 * Certain plaforms may have "undocumented" status
201 * pending on boot. So dont generate a severe warning
202 * here. Just mask it off to prevent the error from
203 * reoccuring and locking up the system.
204 */
205 if (ret) {
206 dev_err(l3->dev,
207 "L3 %s error: target %d mod:%d %s\n",
208 inttype ? "debug" : "application",
209 err_src, i, "(unclearable)");
210
211 mask_reg = base + flag_mux->offset +
212 L3_FLAGMUX_MASK0 + (inttype << 3);
213 mask_val = readl_relaxed(mask_reg);
214 mask_val &= ~(1 << err_src);
215 writel_relaxed(mask_val, mask_reg);
216
217 /* Mark these bits as to be ignored */
218 if (inttype)
219 flag_mux->mask_app_bits |= 1 << err_src;
220 else
221 flag_mux->mask_dbg_bits |= 1 << err_src;
222 }
223
224 /* Error found so break the for loop */
225 return IRQ_HANDLED;
226 }
227 }
228
229 dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
230 inttype ? "debug" : "application");
231
232 return IRQ_NONE;
233}
234
235static const struct of_device_id l3_noc_match[] = {
236 {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
237 {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
238 {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
239 {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
240 {},
241};
242MODULE_DEVICE_TABLE(of, l3_noc_match);
243
244static int omap_l3_probe(struct platform_device *pdev)
245{
246 const struct of_device_id *of_id;
247 static struct omap_l3 *l3;
248 int ret, i, res_idx;
249
250 of_id = of_match_device(l3_noc_match, &pdev->dev);
251 if (!of_id) {
252 dev_err(&pdev->dev, "OF data missing\n");
253 return -EINVAL;
254 }
255
256 l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
257 if (!l3)
258 return -ENOMEM;
259
260 memcpy(l3, of_id->data, sizeof(*l3));
261 l3->dev = &pdev->dev;
262 platform_set_drvdata(pdev, l3);
263
264 /* Get mem resources */
265 for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
266 struct resource *res;
267
268 if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
269 /* First entry cannot be submodule */
270 BUG_ON(i == 0);
271 l3->l3_base[i] = l3->l3_base[i - 1];
272 continue;
273 }
274 res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
275 l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
276 if (IS_ERR(l3->l3_base[i])) {
277 dev_err(l3->dev, "ioremap %d failed\n", i);
278 return PTR_ERR(l3->l3_base[i]);
279 }
280 res_idx++;
281 }
282
283 /*
284 * Setup interrupt Handlers
285 */
286 l3->debug_irq = platform_get_irq(pdev, 0);
287 ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
288 0x0, "l3-dbg-irq", l3);
289 if (ret) {
290 dev_err(l3->dev, "request_irq failed for %d\n",
291 l3->debug_irq);
292 return ret;
293 }
294
295 l3->app_irq = platform_get_irq(pdev, 1);
296 ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
297 0x0, "l3-app-irq", l3);
298 if (ret)
299 dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
300
301 return ret;
302}
303
304#ifdef CONFIG_PM_SLEEP
305
306/**
307 * l3_resume_noirq() - resume function for l3_noc
308 * @dev: pointer to l3_noc device structure
309 *
310 * We only have the resume handler only since we
311 * have already maintained the delta register
312 * configuration as part of configuring the system
313 */
314static int l3_resume_noirq(struct device *dev)
315{
316 struct omap_l3 *l3 = dev_get_drvdata(dev);
317 int i;
318 struct l3_flagmux_data *flag_mux;
319 void __iomem *base, *mask_regx = NULL;
320 u32 mask_val;
321
322 for (i = 0; i < l3->num_modules; i++) {
323 base = l3->l3_base[i];
324 flag_mux = l3->l3_flagmux[i];
325 if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
326 continue;
327
328 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
329 (L3_APPLICATION_ERROR << 3);
330 mask_val = readl_relaxed(mask_regx);
331 mask_val &= ~(flag_mux->mask_app_bits);
332
333 writel_relaxed(mask_val, mask_regx);
334 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
335 (L3_DEBUG_ERROR << 3);
336 mask_val = readl_relaxed(mask_regx);
337 mask_val &= ~(flag_mux->mask_dbg_bits);
338
339 writel_relaxed(mask_val, mask_regx);
340 }
341
342 /* Dummy read to force OCP barrier */
343 if (mask_regx)
344 (void)readl(mask_regx);
345
346 return 0;
347}
348
349static const struct dev_pm_ops l3_dev_pm_ops = {
350 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
351};
352
353#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
354#else
355#define L3_DEV_PM_OPS NULL
356#endif
357
358static struct platform_driver omap_l3_driver = {
359 .probe = omap_l3_probe,
360 .driver = {
361 .name = "omap_l3_noc",
362 .pm = L3_DEV_PM_OPS,
363 .of_match_table = of_match_ptr(l3_noc_match),
364 },
365};
366
367static int __init omap_l3_init(void)
368{
369 return platform_driver_register(&omap_l3_driver);
370}
371postcore_initcall_sync(omap_l3_init);
372
373static void __exit omap_l3_exit(void)
374{
375 platform_driver_unregister(&omap_l3_driver);
376}
377module_exit(omap_l3_exit);