Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
5 * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
6 */
7
8#include <linux/bitfield.h>
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/mailbox_controller.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/pm_wakeirq.h>
17
18#define IPCC_XCR 0x000
19#define XCR_RXOIE BIT(0)
20#define XCR_TXOIE BIT(16)
21
22#define IPCC_XMR 0x004
23#define IPCC_XSCR 0x008
24#define IPCC_XTOYSR 0x00c
25
26#define IPCC_PROC_OFFST 0x010
27
28#define IPCC_HWCFGR 0x3f0
29#define IPCFGR_CHAN_MASK GENMASK(7, 0)
30
31#define IPCC_VER 0x3f4
32#define VER_MINREV_MASK GENMASK(3, 0)
33#define VER_MAJREV_MASK GENMASK(7, 4)
34
35#define RX_BIT_MASK GENMASK(15, 0)
36#define RX_BIT_CHAN(chan) BIT(chan)
37#define TX_BIT_SHIFT 16
38#define TX_BIT_MASK GENMASK(31, 16)
39#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
40
41#define STM32_MAX_PROCS 2
42
43enum {
44 IPCC_IRQ_RX,
45 IPCC_IRQ_TX,
46 IPCC_IRQ_NUM,
47};
48
49struct stm32_ipcc {
50 struct mbox_controller controller;
51 void __iomem *reg_base;
52 void __iomem *reg_proc;
53 struct clk *clk;
54 spinlock_t lock; /* protect access to IPCC registers */
55 int irqs[IPCC_IRQ_NUM];
56 u32 proc_id;
57 u32 n_chans;
58 u32 xcr;
59 u32 xmr;
60};
61
62static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
63 u32 mask)
64{
65 unsigned long flags;
66
67 spin_lock_irqsave(lock, flags);
68 writel_relaxed(readl_relaxed(reg) | mask, reg);
69 spin_unlock_irqrestore(lock, flags);
70}
71
72static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
73 u32 mask)
74{
75 unsigned long flags;
76
77 spin_lock_irqsave(lock, flags);
78 writel_relaxed(readl_relaxed(reg) & ~mask, reg);
79 spin_unlock_irqrestore(lock, flags);
80}
81
82static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
83{
84 struct stm32_ipcc *ipcc = data;
85 struct device *dev = ipcc->controller.dev;
86 u32 status, mr, tosr, chan;
87 irqreturn_t ret = IRQ_NONE;
88 int proc_offset;
89
90 /* read 'channel occupied' status from other proc */
91 proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
92 tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
93 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
94
95 /* search for unmasked 'channel occupied' */
96 status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
97
98 for (chan = 0; chan < ipcc->n_chans; chan++) {
99 if (!(status & (1 << chan)))
100 continue;
101
102 dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
103
104 mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
105
106 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
107 RX_BIT_CHAN(chan));
108
109 ret = IRQ_HANDLED;
110 }
111
112 return ret;
113}
114
115static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
116{
117 struct stm32_ipcc *ipcc = data;
118 struct device *dev = ipcc->controller.dev;
119 u32 status, mr, tosr, chan;
120 irqreturn_t ret = IRQ_NONE;
121
122 tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
123 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
124
125 /* search for unmasked 'channel free' */
126 status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
127
128 for (chan = 0; chan < ipcc->n_chans ; chan++) {
129 if (!(status & (1 << chan)))
130 continue;
131
132 dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
133
134 /* mask 'tx channel free' interrupt */
135 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
136 TX_BIT_CHAN(chan));
137
138 mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
139
140 ret = IRQ_HANDLED;
141 }
142
143 return ret;
144}
145
146static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
147{
148 unsigned long chan = (unsigned long)link->con_priv;
149 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
150 controller);
151
152 dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan);
153
154 /* set channel n occupied */
155 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
156 TX_BIT_CHAN(chan));
157
158 /* unmask 'tx channel free' interrupt */
159 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
160 TX_BIT_CHAN(chan));
161
162 return 0;
163}
164
165static int stm32_ipcc_startup(struct mbox_chan *link)
166{
167 unsigned long chan = (unsigned long)link->con_priv;
168 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
169 controller);
170 int ret;
171
172 ret = clk_prepare_enable(ipcc->clk);
173 if (ret) {
174 dev_err(ipcc->controller.dev, "can not enable the clock\n");
175 return ret;
176 }
177
178 /* unmask 'rx channel occupied' interrupt */
179 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
180 RX_BIT_CHAN(chan));
181
182 return 0;
183}
184
185static void stm32_ipcc_shutdown(struct mbox_chan *link)
186{
187 unsigned long chan = (unsigned long)link->con_priv;
188 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
189 controller);
190
191 /* mask rx/tx interrupt */
192 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
193 RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
194
195 clk_disable_unprepare(ipcc->clk);
196}
197
198static const struct mbox_chan_ops stm32_ipcc_ops = {
199 .send_data = stm32_ipcc_send_data,
200 .startup = stm32_ipcc_startup,
201 .shutdown = stm32_ipcc_shutdown,
202};
203
204static int stm32_ipcc_probe(struct platform_device *pdev)
205{
206 struct device *dev = &pdev->dev;
207 struct device_node *np = dev->of_node;
208 struct stm32_ipcc *ipcc;
209 unsigned long i;
210 int ret;
211 u32 ip_ver;
212 static const char * const irq_name[] = {"rx", "tx"};
213 irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
214
215 if (!np) {
216 dev_err(dev, "No DT found\n");
217 return -ENODEV;
218 }
219
220 ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
221 if (!ipcc)
222 return -ENOMEM;
223
224 spin_lock_init(&ipcc->lock);
225
226 /* proc_id */
227 if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
228 dev_err(dev, "Missing st,proc-id\n");
229 return -ENODEV;
230 }
231
232 if (ipcc->proc_id >= STM32_MAX_PROCS) {
233 dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
234 return -EINVAL;
235 }
236
237 /* regs */
238 ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
239 if (IS_ERR(ipcc->reg_base))
240 return PTR_ERR(ipcc->reg_base);
241
242 ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
243
244 /* clock */
245 ipcc->clk = devm_clk_get(dev, NULL);
246 if (IS_ERR(ipcc->clk))
247 return PTR_ERR(ipcc->clk);
248
249 ret = clk_prepare_enable(ipcc->clk);
250 if (ret) {
251 dev_err(dev, "can not enable the clock\n");
252 return ret;
253 }
254
255 /* irq */
256 for (i = 0; i < IPCC_IRQ_NUM; i++) {
257 ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
258 if (ipcc->irqs[i] < 0) {
259 ret = ipcc->irqs[i];
260 goto err_clk;
261 }
262
263 ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
264 irq_thread[i], IRQF_ONESHOT,
265 dev_name(dev), ipcc);
266 if (ret) {
267 dev_err(dev, "failed to request irq %lu (%d)\n", i, ret);
268 goto err_clk;
269 }
270 }
271
272 /* mask and enable rx/tx irq */
273 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
274 RX_BIT_MASK | TX_BIT_MASK);
275 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
276 XCR_RXOIE | XCR_TXOIE);
277
278 /* wakeup */
279 if (of_property_read_bool(np, "wakeup-source")) {
280 device_set_wakeup_capable(dev, true);
281
282 ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
283 if (ret) {
284 dev_err(dev, "Failed to set wake up irq\n");
285 goto err_init_wkp;
286 }
287 }
288
289 /* mailbox controller */
290 ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
291 ipcc->n_chans &= IPCFGR_CHAN_MASK;
292
293 ipcc->controller.dev = dev;
294 ipcc->controller.txdone_irq = true;
295 ipcc->controller.ops = &stm32_ipcc_ops;
296 ipcc->controller.num_chans = ipcc->n_chans;
297 ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
298 sizeof(*ipcc->controller.chans),
299 GFP_KERNEL);
300 if (!ipcc->controller.chans) {
301 ret = -ENOMEM;
302 goto err_irq_wkp;
303 }
304
305 for (i = 0; i < ipcc->controller.num_chans; i++)
306 ipcc->controller.chans[i].con_priv = (void *)i;
307
308 ret = devm_mbox_controller_register(dev, &ipcc->controller);
309 if (ret)
310 goto err_irq_wkp;
311
312 platform_set_drvdata(pdev, ipcc);
313
314 ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
315
316 dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
317 FIELD_GET(VER_MAJREV_MASK, ip_ver),
318 FIELD_GET(VER_MINREV_MASK, ip_ver),
319 ipcc->controller.num_chans, ipcc->proc_id);
320
321 clk_disable_unprepare(ipcc->clk);
322 return 0;
323
324err_irq_wkp:
325 if (of_property_read_bool(np, "wakeup-source"))
326 dev_pm_clear_wake_irq(dev);
327err_init_wkp:
328 device_set_wakeup_capable(dev, false);
329err_clk:
330 clk_disable_unprepare(ipcc->clk);
331 return ret;
332}
333
334static void stm32_ipcc_remove(struct platform_device *pdev)
335{
336 struct device *dev = &pdev->dev;
337
338 if (of_property_read_bool(dev->of_node, "wakeup-source"))
339 dev_pm_clear_wake_irq(&pdev->dev);
340
341 device_set_wakeup_capable(dev, false);
342}
343
344#ifdef CONFIG_PM_SLEEP
345static int stm32_ipcc_suspend(struct device *dev)
346{
347 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
348
349 ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
350 ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
351
352 return 0;
353}
354
355static int stm32_ipcc_resume(struct device *dev)
356{
357 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
358
359 writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
360 writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
361
362 return 0;
363}
364#endif
365
366static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
367 stm32_ipcc_suspend, stm32_ipcc_resume);
368
369static const struct of_device_id stm32_ipcc_of_match[] = {
370 { .compatible = "st,stm32mp1-ipcc" },
371 {},
372};
373MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
374
375static struct platform_driver stm32_ipcc_driver = {
376 .driver = {
377 .name = "stm32-ipcc",
378 .pm = &stm32_ipcc_pm_ops,
379 .of_match_table = stm32_ipcc_of_match,
380 },
381 .probe = stm32_ipcc_probe,
382 .remove_new = stm32_ipcc_remove,
383};
384
385module_platform_driver(stm32_ipcc_driver);
386
387MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
388MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
389MODULE_DESCRIPTION("STM32 IPCC driver");
390MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
5 * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
6 */
7
8#include <linux/bitfield.h>
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/mailbox_controller.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/pm_wakeirq.h>
16
17#define IPCC_XCR 0x000
18#define XCR_RXOIE BIT(0)
19#define XCR_TXOIE BIT(16)
20
21#define IPCC_XMR 0x004
22#define IPCC_XSCR 0x008
23#define IPCC_XTOYSR 0x00c
24
25#define IPCC_PROC_OFFST 0x010
26
27#define IPCC_HWCFGR 0x3f0
28#define IPCFGR_CHAN_MASK GENMASK(7, 0)
29
30#define IPCC_VER 0x3f4
31#define VER_MINREV_MASK GENMASK(3, 0)
32#define VER_MAJREV_MASK GENMASK(7, 4)
33
34#define RX_BIT_MASK GENMASK(15, 0)
35#define RX_BIT_CHAN(chan) BIT(chan)
36#define TX_BIT_SHIFT 16
37#define TX_BIT_MASK GENMASK(31, 16)
38#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
39
40#define STM32_MAX_PROCS 2
41
42enum {
43 IPCC_IRQ_RX,
44 IPCC_IRQ_TX,
45 IPCC_IRQ_NUM,
46};
47
48struct stm32_ipcc {
49 struct mbox_controller controller;
50 void __iomem *reg_base;
51 void __iomem *reg_proc;
52 struct clk *clk;
53 spinlock_t lock; /* protect access to IPCC registers */
54 int irqs[IPCC_IRQ_NUM];
55 int wkp;
56 u32 proc_id;
57 u32 n_chans;
58 u32 xcr;
59 u32 xmr;
60};
61
62static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
63 u32 mask)
64{
65 unsigned long flags;
66
67 spin_lock_irqsave(lock, flags);
68 writel_relaxed(readl_relaxed(reg) | mask, reg);
69 spin_unlock_irqrestore(lock, flags);
70}
71
72static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
73 u32 mask)
74{
75 unsigned long flags;
76
77 spin_lock_irqsave(lock, flags);
78 writel_relaxed(readl_relaxed(reg) & ~mask, reg);
79 spin_unlock_irqrestore(lock, flags);
80}
81
82static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
83{
84 struct stm32_ipcc *ipcc = data;
85 struct device *dev = ipcc->controller.dev;
86 u32 status, mr, tosr, chan;
87 irqreturn_t ret = IRQ_NONE;
88 int proc_offset;
89
90 /* read 'channel occupied' status from other proc */
91 proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
92 tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
93 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
94
95 /* search for unmasked 'channel occupied' */
96 status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
97
98 for (chan = 0; chan < ipcc->n_chans; chan++) {
99 if (!(status & (1 << chan)))
100 continue;
101
102 dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
103
104 mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
105
106 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
107 RX_BIT_CHAN(chan));
108
109 ret = IRQ_HANDLED;
110 }
111
112 return ret;
113}
114
115static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
116{
117 struct stm32_ipcc *ipcc = data;
118 struct device *dev = ipcc->controller.dev;
119 u32 status, mr, tosr, chan;
120 irqreturn_t ret = IRQ_NONE;
121
122 tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
123 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
124
125 /* search for unmasked 'channel free' */
126 status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
127
128 for (chan = 0; chan < ipcc->n_chans ; chan++) {
129 if (!(status & (1 << chan)))
130 continue;
131
132 dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
133
134 /* mask 'tx channel free' interrupt */
135 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
136 TX_BIT_CHAN(chan));
137
138 mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
139
140 ret = IRQ_HANDLED;
141 }
142
143 return ret;
144}
145
146static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
147{
148 unsigned int chan = (unsigned int)link->con_priv;
149 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
150 controller);
151
152 dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
153
154 /* set channel n occupied */
155 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
156 TX_BIT_CHAN(chan));
157
158 /* unmask 'tx channel free' interrupt */
159 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
160 TX_BIT_CHAN(chan));
161
162 return 0;
163}
164
165static int stm32_ipcc_startup(struct mbox_chan *link)
166{
167 unsigned int chan = (unsigned int)link->con_priv;
168 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
169 controller);
170 int ret;
171
172 ret = clk_prepare_enable(ipcc->clk);
173 if (ret) {
174 dev_err(ipcc->controller.dev, "can not enable the clock\n");
175 return ret;
176 }
177
178 /* unmask 'rx channel occupied' interrupt */
179 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
180 RX_BIT_CHAN(chan));
181
182 return 0;
183}
184
185static void stm32_ipcc_shutdown(struct mbox_chan *link)
186{
187 unsigned int chan = (unsigned int)link->con_priv;
188 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
189 controller);
190
191 /* mask rx/tx interrupt */
192 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
193 RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
194
195 clk_disable_unprepare(ipcc->clk);
196}
197
198static const struct mbox_chan_ops stm32_ipcc_ops = {
199 .send_data = stm32_ipcc_send_data,
200 .startup = stm32_ipcc_startup,
201 .shutdown = stm32_ipcc_shutdown,
202};
203
204static int stm32_ipcc_probe(struct platform_device *pdev)
205{
206 struct device *dev = &pdev->dev;
207 struct device_node *np = dev->of_node;
208 struct stm32_ipcc *ipcc;
209 struct resource *res;
210 unsigned int i;
211 int ret;
212 u32 ip_ver;
213 static const char * const irq_name[] = {"rx", "tx"};
214 irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
215
216 if (!np) {
217 dev_err(dev, "No DT found\n");
218 return -ENODEV;
219 }
220
221 ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
222 if (!ipcc)
223 return -ENOMEM;
224
225 spin_lock_init(&ipcc->lock);
226
227 /* proc_id */
228 if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
229 dev_err(dev, "Missing st,proc-id\n");
230 return -ENODEV;
231 }
232
233 if (ipcc->proc_id >= STM32_MAX_PROCS) {
234 dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
235 return -EINVAL;
236 }
237
238 /* regs */
239 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
240 ipcc->reg_base = devm_ioremap_resource(dev, res);
241 if (IS_ERR(ipcc->reg_base))
242 return PTR_ERR(ipcc->reg_base);
243
244 ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
245
246 /* clock */
247 ipcc->clk = devm_clk_get(dev, NULL);
248 if (IS_ERR(ipcc->clk))
249 return PTR_ERR(ipcc->clk);
250
251 ret = clk_prepare_enable(ipcc->clk);
252 if (ret) {
253 dev_err(dev, "can not enable the clock\n");
254 return ret;
255 }
256
257 /* irq */
258 for (i = 0; i < IPCC_IRQ_NUM; i++) {
259 ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
260 if (ipcc->irqs[i] < 0) {
261 if (ipcc->irqs[i] != -EPROBE_DEFER)
262 dev_err(dev, "no IRQ specified %s\n",
263 irq_name[i]);
264 ret = ipcc->irqs[i];
265 goto err_clk;
266 }
267
268 ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
269 irq_thread[i], IRQF_ONESHOT,
270 dev_name(dev), ipcc);
271 if (ret) {
272 dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
273 goto err_clk;
274 }
275 }
276
277 /* mask and enable rx/tx irq */
278 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
279 RX_BIT_MASK | TX_BIT_MASK);
280 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
281 XCR_RXOIE | XCR_TXOIE);
282
283 /* wakeup */
284 if (of_property_read_bool(np, "wakeup-source")) {
285 ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
286 if (ipcc->wkp < 0) {
287 if (ipcc->wkp != -EPROBE_DEFER)
288 dev_err(dev, "could not get wakeup IRQ\n");
289 ret = ipcc->wkp;
290 goto err_clk;
291 }
292
293 device_set_wakeup_capable(dev, true);
294 ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
295 if (ret) {
296 dev_err(dev, "Failed to set wake up irq\n");
297 goto err_init_wkp;
298 }
299 }
300
301 /* mailbox controller */
302 ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
303 ipcc->n_chans &= IPCFGR_CHAN_MASK;
304
305 ipcc->controller.dev = dev;
306 ipcc->controller.txdone_irq = true;
307 ipcc->controller.ops = &stm32_ipcc_ops;
308 ipcc->controller.num_chans = ipcc->n_chans;
309 ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
310 sizeof(*ipcc->controller.chans),
311 GFP_KERNEL);
312 if (!ipcc->controller.chans) {
313 ret = -ENOMEM;
314 goto err_irq_wkp;
315 }
316
317 for (i = 0; i < ipcc->controller.num_chans; i++)
318 ipcc->controller.chans[i].con_priv = (void *)i;
319
320 ret = devm_mbox_controller_register(dev, &ipcc->controller);
321 if (ret)
322 goto err_irq_wkp;
323
324 platform_set_drvdata(pdev, ipcc);
325
326 ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
327
328 dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
329 FIELD_GET(VER_MAJREV_MASK, ip_ver),
330 FIELD_GET(VER_MINREV_MASK, ip_ver),
331 ipcc->controller.num_chans, ipcc->proc_id);
332
333 clk_disable_unprepare(ipcc->clk);
334 return 0;
335
336err_irq_wkp:
337 if (ipcc->wkp)
338 dev_pm_clear_wake_irq(dev);
339err_init_wkp:
340 device_init_wakeup(dev, false);
341err_clk:
342 clk_disable_unprepare(ipcc->clk);
343 return ret;
344}
345
346static int stm32_ipcc_remove(struct platform_device *pdev)
347{
348 struct stm32_ipcc *ipcc = platform_get_drvdata(pdev);
349
350 if (ipcc->wkp)
351 dev_pm_clear_wake_irq(&pdev->dev);
352
353 device_init_wakeup(&pdev->dev, false);
354
355 return 0;
356}
357
358#ifdef CONFIG_PM_SLEEP
359static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable)
360{
361 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
362 unsigned int i;
363
364 if (device_may_wakeup(dev))
365 for (i = 0; i < IPCC_IRQ_NUM; i++)
366 irq_set_irq_wake(ipcc->irqs[i], enable);
367}
368
369static int stm32_ipcc_suspend(struct device *dev)
370{
371 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
372
373 ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
374 ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
375
376 stm32_ipcc_set_irq_wake(dev, true);
377
378 return 0;
379}
380
381static int stm32_ipcc_resume(struct device *dev)
382{
383 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
384
385 stm32_ipcc_set_irq_wake(dev, false);
386
387 writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
388 writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
389
390 return 0;
391}
392#endif
393
394static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
395 stm32_ipcc_suspend, stm32_ipcc_resume);
396
397static const struct of_device_id stm32_ipcc_of_match[] = {
398 { .compatible = "st,stm32mp1-ipcc" },
399 {},
400};
401MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
402
403static struct platform_driver stm32_ipcc_driver = {
404 .driver = {
405 .name = "stm32-ipcc",
406 .pm = &stm32_ipcc_pm_ops,
407 .of_match_table = stm32_ipcc_of_match,
408 },
409 .probe = stm32_ipcc_probe,
410 .remove = stm32_ipcc_remove,
411};
412
413module_platform_driver(stm32_ipcc_driver);
414
415MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
416MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
417MODULE_DESCRIPTION("STM32 IPCC driver");
418MODULE_LICENSE("GPL v2");