Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
5 * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
6 */
7
8#include <linux/bitfield.h>
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/mailbox_controller.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/pm_wakeirq.h>
16
17#define IPCC_XCR 0x000
18#define XCR_RXOIE BIT(0)
19#define XCR_TXOIE BIT(16)
20
21#define IPCC_XMR 0x004
22#define IPCC_XSCR 0x008
23#define IPCC_XTOYSR 0x00c
24
25#define IPCC_PROC_OFFST 0x010
26
27#define IPCC_HWCFGR 0x3f0
28#define IPCFGR_CHAN_MASK GENMASK(7, 0)
29
30#define IPCC_VER 0x3f4
31#define VER_MINREV_MASK GENMASK(3, 0)
32#define VER_MAJREV_MASK GENMASK(7, 4)
33
34#define RX_BIT_MASK GENMASK(15, 0)
35#define RX_BIT_CHAN(chan) BIT(chan)
36#define TX_BIT_SHIFT 16
37#define TX_BIT_MASK GENMASK(31, 16)
38#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
39
40#define STM32_MAX_PROCS 2
41
42enum {
43 IPCC_IRQ_RX,
44 IPCC_IRQ_TX,
45 IPCC_IRQ_NUM,
46};
47
48struct stm32_ipcc {
49 struct mbox_controller controller;
50 void __iomem *reg_base;
51 void __iomem *reg_proc;
52 struct clk *clk;
53 spinlock_t lock; /* protect access to IPCC registers */
54 int irqs[IPCC_IRQ_NUM];
55 u32 proc_id;
56 u32 n_chans;
57 u32 xcr;
58 u32 xmr;
59};
60
61static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
62 u32 mask)
63{
64 unsigned long flags;
65
66 spin_lock_irqsave(lock, flags);
67 writel_relaxed(readl_relaxed(reg) | mask, reg);
68 spin_unlock_irqrestore(lock, flags);
69}
70
71static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
72 u32 mask)
73{
74 unsigned long flags;
75
76 spin_lock_irqsave(lock, flags);
77 writel_relaxed(readl_relaxed(reg) & ~mask, reg);
78 spin_unlock_irqrestore(lock, flags);
79}
80
81static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
82{
83 struct stm32_ipcc *ipcc = data;
84 struct device *dev = ipcc->controller.dev;
85 u32 status, mr, tosr, chan;
86 irqreturn_t ret = IRQ_NONE;
87 int proc_offset;
88
89 /* read 'channel occupied' status from other proc */
90 proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
91 tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
92 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
93
94 /* search for unmasked 'channel occupied' */
95 status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
96
97 for (chan = 0; chan < ipcc->n_chans; chan++) {
98 if (!(status & (1 << chan)))
99 continue;
100
101 dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
102
103 mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
104
105 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
106 RX_BIT_CHAN(chan));
107
108 ret = IRQ_HANDLED;
109 }
110
111 return ret;
112}
113
114static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
115{
116 struct stm32_ipcc *ipcc = data;
117 struct device *dev = ipcc->controller.dev;
118 u32 status, mr, tosr, chan;
119 irqreturn_t ret = IRQ_NONE;
120
121 tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
122 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
123
124 /* search for unmasked 'channel free' */
125 status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
126
127 for (chan = 0; chan < ipcc->n_chans ; chan++) {
128 if (!(status & (1 << chan)))
129 continue;
130
131 dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
132
133 /* mask 'tx channel free' interrupt */
134 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
135 TX_BIT_CHAN(chan));
136
137 mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
138
139 ret = IRQ_HANDLED;
140 }
141
142 return ret;
143}
144
145static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
146{
147 unsigned int chan = (unsigned int)link->con_priv;
148 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
149 controller);
150
151 dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
152
153 /* set channel n occupied */
154 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
155 TX_BIT_CHAN(chan));
156
157 /* unmask 'tx channel free' interrupt */
158 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
159 TX_BIT_CHAN(chan));
160
161 return 0;
162}
163
164static int stm32_ipcc_startup(struct mbox_chan *link)
165{
166 unsigned int chan = (unsigned int)link->con_priv;
167 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
168 controller);
169 int ret;
170
171 ret = clk_prepare_enable(ipcc->clk);
172 if (ret) {
173 dev_err(ipcc->controller.dev, "can not enable the clock\n");
174 return ret;
175 }
176
177 /* unmask 'rx channel occupied' interrupt */
178 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
179 RX_BIT_CHAN(chan));
180
181 return 0;
182}
183
184static void stm32_ipcc_shutdown(struct mbox_chan *link)
185{
186 unsigned int chan = (unsigned int)link->con_priv;
187 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
188 controller);
189
190 /* mask rx/tx interrupt */
191 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
192 RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
193
194 clk_disable_unprepare(ipcc->clk);
195}
196
197static const struct mbox_chan_ops stm32_ipcc_ops = {
198 .send_data = stm32_ipcc_send_data,
199 .startup = stm32_ipcc_startup,
200 .shutdown = stm32_ipcc_shutdown,
201};
202
203static int stm32_ipcc_probe(struct platform_device *pdev)
204{
205 struct device *dev = &pdev->dev;
206 struct device_node *np = dev->of_node;
207 struct stm32_ipcc *ipcc;
208 struct resource *res;
209 unsigned int i;
210 int ret;
211 u32 ip_ver;
212 static const char * const irq_name[] = {"rx", "tx"};
213 irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
214
215 if (!np) {
216 dev_err(dev, "No DT found\n");
217 return -ENODEV;
218 }
219
220 ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
221 if (!ipcc)
222 return -ENOMEM;
223
224 spin_lock_init(&ipcc->lock);
225
226 /* proc_id */
227 if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
228 dev_err(dev, "Missing st,proc-id\n");
229 return -ENODEV;
230 }
231
232 if (ipcc->proc_id >= STM32_MAX_PROCS) {
233 dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
234 return -EINVAL;
235 }
236
237 /* regs */
238 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
239 ipcc->reg_base = devm_ioremap_resource(dev, res);
240 if (IS_ERR(ipcc->reg_base))
241 return PTR_ERR(ipcc->reg_base);
242
243 ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
244
245 /* clock */
246 ipcc->clk = devm_clk_get(dev, NULL);
247 if (IS_ERR(ipcc->clk))
248 return PTR_ERR(ipcc->clk);
249
250 ret = clk_prepare_enable(ipcc->clk);
251 if (ret) {
252 dev_err(dev, "can not enable the clock\n");
253 return ret;
254 }
255
256 /* irq */
257 for (i = 0; i < IPCC_IRQ_NUM; i++) {
258 ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
259 if (ipcc->irqs[i] < 0) {
260 if (ipcc->irqs[i] != -EPROBE_DEFER)
261 dev_err(dev, "no IRQ specified %s\n",
262 irq_name[i]);
263 ret = ipcc->irqs[i];
264 goto err_clk;
265 }
266
267 ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
268 irq_thread[i], IRQF_ONESHOT,
269 dev_name(dev), ipcc);
270 if (ret) {
271 dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
272 goto err_clk;
273 }
274 }
275
276 /* mask and enable rx/tx irq */
277 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
278 RX_BIT_MASK | TX_BIT_MASK);
279 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
280 XCR_RXOIE | XCR_TXOIE);
281
282 /* wakeup */
283 if (of_property_read_bool(np, "wakeup-source")) {
284 device_set_wakeup_capable(dev, true);
285
286 ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
287 if (ret) {
288 dev_err(dev, "Failed to set wake up irq\n");
289 goto err_init_wkp;
290 }
291 }
292
293 /* mailbox controller */
294 ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
295 ipcc->n_chans &= IPCFGR_CHAN_MASK;
296
297 ipcc->controller.dev = dev;
298 ipcc->controller.txdone_irq = true;
299 ipcc->controller.ops = &stm32_ipcc_ops;
300 ipcc->controller.num_chans = ipcc->n_chans;
301 ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
302 sizeof(*ipcc->controller.chans),
303 GFP_KERNEL);
304 if (!ipcc->controller.chans) {
305 ret = -ENOMEM;
306 goto err_irq_wkp;
307 }
308
309 for (i = 0; i < ipcc->controller.num_chans; i++)
310 ipcc->controller.chans[i].con_priv = (void *)i;
311
312 ret = devm_mbox_controller_register(dev, &ipcc->controller);
313 if (ret)
314 goto err_irq_wkp;
315
316 platform_set_drvdata(pdev, ipcc);
317
318 ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
319
320 dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
321 FIELD_GET(VER_MAJREV_MASK, ip_ver),
322 FIELD_GET(VER_MINREV_MASK, ip_ver),
323 ipcc->controller.num_chans, ipcc->proc_id);
324
325 clk_disable_unprepare(ipcc->clk);
326 return 0;
327
328err_irq_wkp:
329 if (of_property_read_bool(np, "wakeup-source"))
330 dev_pm_clear_wake_irq(dev);
331err_init_wkp:
332 device_set_wakeup_capable(dev, false);
333err_clk:
334 clk_disable_unprepare(ipcc->clk);
335 return ret;
336}
337
338static int stm32_ipcc_remove(struct platform_device *pdev)
339{
340 struct device *dev = &pdev->dev;
341
342 if (of_property_read_bool(dev->of_node, "wakeup-source"))
343 dev_pm_clear_wake_irq(&pdev->dev);
344
345 device_set_wakeup_capable(dev, false);
346
347 return 0;
348}
349
350#ifdef CONFIG_PM_SLEEP
351static int stm32_ipcc_suspend(struct device *dev)
352{
353 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
354
355 ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
356 ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
357
358 return 0;
359}
360
361static int stm32_ipcc_resume(struct device *dev)
362{
363 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
364
365 writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
366 writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
367
368 return 0;
369}
370#endif
371
372static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
373 stm32_ipcc_suspend, stm32_ipcc_resume);
374
375static const struct of_device_id stm32_ipcc_of_match[] = {
376 { .compatible = "st,stm32mp1-ipcc" },
377 {},
378};
379MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
380
381static struct platform_driver stm32_ipcc_driver = {
382 .driver = {
383 .name = "stm32-ipcc",
384 .pm = &stm32_ipcc_pm_ops,
385 .of_match_table = stm32_ipcc_of_match,
386 },
387 .probe = stm32_ipcc_probe,
388 .remove = stm32_ipcc_remove,
389};
390
391module_platform_driver(stm32_ipcc_driver);
392
393MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
394MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
395MODULE_DESCRIPTION("STM32 IPCC driver");
396MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
5 * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
6 */
7
8#include <linux/bitfield.h>
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/mailbox_controller.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/pm_wakeirq.h>
16
17#define IPCC_XCR 0x000
18#define XCR_RXOIE BIT(0)
19#define XCR_TXOIE BIT(16)
20
21#define IPCC_XMR 0x004
22#define IPCC_XSCR 0x008
23#define IPCC_XTOYSR 0x00c
24
25#define IPCC_PROC_OFFST 0x010
26
27#define IPCC_HWCFGR 0x3f0
28#define IPCFGR_CHAN_MASK GENMASK(7, 0)
29
30#define IPCC_VER 0x3f4
31#define VER_MINREV_MASK GENMASK(3, 0)
32#define VER_MAJREV_MASK GENMASK(7, 4)
33
34#define RX_BIT_MASK GENMASK(15, 0)
35#define RX_BIT_CHAN(chan) BIT(chan)
36#define TX_BIT_SHIFT 16
37#define TX_BIT_MASK GENMASK(31, 16)
38#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
39
40#define STM32_MAX_PROCS 2
41
42enum {
43 IPCC_IRQ_RX,
44 IPCC_IRQ_TX,
45 IPCC_IRQ_NUM,
46};
47
48struct stm32_ipcc {
49 struct mbox_controller controller;
50 void __iomem *reg_base;
51 void __iomem *reg_proc;
52 struct clk *clk;
53 spinlock_t lock; /* protect access to IPCC registers */
54 int irqs[IPCC_IRQ_NUM];
55 u32 proc_id;
56 u32 n_chans;
57 u32 xcr;
58 u32 xmr;
59};
60
61static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
62 u32 mask)
63{
64 unsigned long flags;
65
66 spin_lock_irqsave(lock, flags);
67 writel_relaxed(readl_relaxed(reg) | mask, reg);
68 spin_unlock_irqrestore(lock, flags);
69}
70
71static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
72 u32 mask)
73{
74 unsigned long flags;
75
76 spin_lock_irqsave(lock, flags);
77 writel_relaxed(readl_relaxed(reg) & ~mask, reg);
78 spin_unlock_irqrestore(lock, flags);
79}
80
81static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
82{
83 struct stm32_ipcc *ipcc = data;
84 struct device *dev = ipcc->controller.dev;
85 u32 status, mr, tosr, chan;
86 irqreturn_t ret = IRQ_NONE;
87 int proc_offset;
88
89 /* read 'channel occupied' status from other proc */
90 proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
91 tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
92 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
93
94 /* search for unmasked 'channel occupied' */
95 status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
96
97 for (chan = 0; chan < ipcc->n_chans; chan++) {
98 if (!(status & (1 << chan)))
99 continue;
100
101 dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
102
103 mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
104
105 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
106 RX_BIT_CHAN(chan));
107
108 ret = IRQ_HANDLED;
109 }
110
111 return ret;
112}
113
114static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
115{
116 struct stm32_ipcc *ipcc = data;
117 struct device *dev = ipcc->controller.dev;
118 u32 status, mr, tosr, chan;
119 irqreturn_t ret = IRQ_NONE;
120
121 tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
122 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
123
124 /* search for unmasked 'channel free' */
125 status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
126
127 for (chan = 0; chan < ipcc->n_chans ; chan++) {
128 if (!(status & (1 << chan)))
129 continue;
130
131 dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
132
133 /* mask 'tx channel free' interrupt */
134 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
135 TX_BIT_CHAN(chan));
136
137 mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
138
139 ret = IRQ_HANDLED;
140 }
141
142 return ret;
143}
144
145static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
146{
147 unsigned long chan = (unsigned long)link->con_priv;
148 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
149 controller);
150
151 dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan);
152
153 /* set channel n occupied */
154 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
155 TX_BIT_CHAN(chan));
156
157 /* unmask 'tx channel free' interrupt */
158 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
159 TX_BIT_CHAN(chan));
160
161 return 0;
162}
163
164static int stm32_ipcc_startup(struct mbox_chan *link)
165{
166 unsigned long chan = (unsigned long)link->con_priv;
167 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
168 controller);
169 int ret;
170
171 ret = clk_prepare_enable(ipcc->clk);
172 if (ret) {
173 dev_err(ipcc->controller.dev, "can not enable the clock\n");
174 return ret;
175 }
176
177 /* unmask 'rx channel occupied' interrupt */
178 stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
179 RX_BIT_CHAN(chan));
180
181 return 0;
182}
183
184static void stm32_ipcc_shutdown(struct mbox_chan *link)
185{
186 unsigned long chan = (unsigned long)link->con_priv;
187 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
188 controller);
189
190 /* mask rx/tx interrupt */
191 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
192 RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
193
194 clk_disable_unprepare(ipcc->clk);
195}
196
197static const struct mbox_chan_ops stm32_ipcc_ops = {
198 .send_data = stm32_ipcc_send_data,
199 .startup = stm32_ipcc_startup,
200 .shutdown = stm32_ipcc_shutdown,
201};
202
203static int stm32_ipcc_probe(struct platform_device *pdev)
204{
205 struct device *dev = &pdev->dev;
206 struct device_node *np = dev->of_node;
207 struct stm32_ipcc *ipcc;
208 unsigned long i;
209 int ret;
210 u32 ip_ver;
211 static const char * const irq_name[] = {"rx", "tx"};
212 irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
213
214 if (!np) {
215 dev_err(dev, "No DT found\n");
216 return -ENODEV;
217 }
218
219 ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
220 if (!ipcc)
221 return -ENOMEM;
222
223 spin_lock_init(&ipcc->lock);
224
225 /* proc_id */
226 if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
227 dev_err(dev, "Missing st,proc-id\n");
228 return -ENODEV;
229 }
230
231 if (ipcc->proc_id >= STM32_MAX_PROCS) {
232 dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
233 return -EINVAL;
234 }
235
236 /* regs */
237 ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
238 if (IS_ERR(ipcc->reg_base))
239 return PTR_ERR(ipcc->reg_base);
240
241 ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
242
243 /* clock */
244 ipcc->clk = devm_clk_get(dev, NULL);
245 if (IS_ERR(ipcc->clk))
246 return PTR_ERR(ipcc->clk);
247
248 ret = clk_prepare_enable(ipcc->clk);
249 if (ret) {
250 dev_err(dev, "can not enable the clock\n");
251 return ret;
252 }
253
254 /* irq */
255 for (i = 0; i < IPCC_IRQ_NUM; i++) {
256 ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
257 if (ipcc->irqs[i] < 0) {
258 ret = ipcc->irqs[i];
259 goto err_clk;
260 }
261
262 ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
263 irq_thread[i], IRQF_ONESHOT,
264 dev_name(dev), ipcc);
265 if (ret) {
266 dev_err(dev, "failed to request irq %lu (%d)\n", i, ret);
267 goto err_clk;
268 }
269 }
270
271 /* mask and enable rx/tx irq */
272 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
273 RX_BIT_MASK | TX_BIT_MASK);
274 stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
275 XCR_RXOIE | XCR_TXOIE);
276
277 /* wakeup */
278 if (of_property_read_bool(np, "wakeup-source")) {
279 device_set_wakeup_capable(dev, true);
280
281 ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
282 if (ret) {
283 dev_err(dev, "Failed to set wake up irq\n");
284 goto err_init_wkp;
285 }
286 }
287
288 /* mailbox controller */
289 ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
290 ipcc->n_chans &= IPCFGR_CHAN_MASK;
291
292 ipcc->controller.dev = dev;
293 ipcc->controller.txdone_irq = true;
294 ipcc->controller.ops = &stm32_ipcc_ops;
295 ipcc->controller.num_chans = ipcc->n_chans;
296 ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
297 sizeof(*ipcc->controller.chans),
298 GFP_KERNEL);
299 if (!ipcc->controller.chans) {
300 ret = -ENOMEM;
301 goto err_irq_wkp;
302 }
303
304 for (i = 0; i < ipcc->controller.num_chans; i++)
305 ipcc->controller.chans[i].con_priv = (void *)i;
306
307 ret = devm_mbox_controller_register(dev, &ipcc->controller);
308 if (ret)
309 goto err_irq_wkp;
310
311 platform_set_drvdata(pdev, ipcc);
312
313 ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
314
315 dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
316 FIELD_GET(VER_MAJREV_MASK, ip_ver),
317 FIELD_GET(VER_MINREV_MASK, ip_ver),
318 ipcc->controller.num_chans, ipcc->proc_id);
319
320 clk_disable_unprepare(ipcc->clk);
321 return 0;
322
323err_irq_wkp:
324 if (of_property_read_bool(np, "wakeup-source"))
325 dev_pm_clear_wake_irq(dev);
326err_init_wkp:
327 device_set_wakeup_capable(dev, false);
328err_clk:
329 clk_disable_unprepare(ipcc->clk);
330 return ret;
331}
332
333static int stm32_ipcc_remove(struct platform_device *pdev)
334{
335 struct device *dev = &pdev->dev;
336
337 if (of_property_read_bool(dev->of_node, "wakeup-source"))
338 dev_pm_clear_wake_irq(&pdev->dev);
339
340 device_set_wakeup_capable(dev, false);
341
342 return 0;
343}
344
345#ifdef CONFIG_PM_SLEEP
346static int stm32_ipcc_suspend(struct device *dev)
347{
348 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
349
350 ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
351 ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
352
353 return 0;
354}
355
356static int stm32_ipcc_resume(struct device *dev)
357{
358 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
359
360 writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
361 writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
362
363 return 0;
364}
365#endif
366
367static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
368 stm32_ipcc_suspend, stm32_ipcc_resume);
369
370static const struct of_device_id stm32_ipcc_of_match[] = {
371 { .compatible = "st,stm32mp1-ipcc" },
372 {},
373};
374MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
375
376static struct platform_driver stm32_ipcc_driver = {
377 .driver = {
378 .name = "stm32-ipcc",
379 .pm = &stm32_ipcc_pm_ops,
380 .of_match_table = stm32_ipcc_of_match,
381 },
382 .probe = stm32_ipcc_probe,
383 .remove = stm32_ipcc_remove,
384};
385
386module_platform_driver(stm32_ipcc_driver);
387
388MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
389MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
390MODULE_DESCRIPTION("STM32 IPCC driver");
391MODULE_LICENSE("GPL v2");