Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  4 * Copyright (C) 2018-2022 Linaro Ltd.
  5 */
  6
  7#include <linux/clk.h>
  8#include <linux/device.h>
  9#include <linux/interconnect.h>
 10#include <linux/pm.h>
 11#include <linux/pm_runtime.h>
 12#include <linux/bitops.h>
 13
 14#include "linux/soc/qcom/qcom_aoss.h"
 15
 16#include "ipa.h"
 17#include "ipa_power.h"
 18#include "ipa_endpoint.h"
 19#include "ipa_modem.h"
 20#include "ipa_data.h"
 21
 22/**
 23 * DOC: IPA Power Management
 24 *
 25 * The IPA hardware is enabled when the IPA core clock and all the
 26 * interconnects (buses) it depends on are enabled.  Runtime power
 27 * management is used to determine whether the core clock and
 28 * interconnects are enabled, and if not in use to be suspended
 29 * automatically.
 30 *
 31 * The core clock currently runs at a fixed clock rate when enabled,
 32 * an all interconnects use a fixed average and peak bandwidth.
 33 */
 34
 35#define IPA_AUTOSUSPEND_DELAY	500	/* milliseconds */
 36
 37/**
 38 * enum ipa_power_flag - IPA power flags
 39 * @IPA_POWER_FLAG_RESUMED:	Whether resume from suspend has been signaled
 40 * @IPA_POWER_FLAG_SYSTEM:	Hardware is system (not runtime) suspended
 41 * @IPA_POWER_FLAG_STOPPED:	Modem TX is disabled by ipa_start_xmit()
 42 * @IPA_POWER_FLAG_STARTED:	Modem TX was enabled by ipa_runtime_resume()
 43 * @IPA_POWER_FLAG_COUNT:	Number of defined power flags
 44 */
 45enum ipa_power_flag {
 46	IPA_POWER_FLAG_RESUMED,
 47	IPA_POWER_FLAG_SYSTEM,
 48	IPA_POWER_FLAG_STOPPED,
 49	IPA_POWER_FLAG_STARTED,
 50	IPA_POWER_FLAG_COUNT,		/* Last; not a flag */
 51};
 52
 53/**
 54 * struct ipa_power - IPA power management information
 55 * @dev:		IPA device pointer
 56 * @core:		IPA core clock
 57 * @qmp:		QMP handle for AOSS communication
 58 * @spinlock:		Protects modem TX queue enable/disable
 59 * @flags:		Boolean state flags
 60 * @interconnect_count:	Number of elements in interconnect[]
 61 * @interconnect:	Interconnect array
 62 */
 63struct ipa_power {
 64	struct device *dev;
 65	struct clk *core;
 66	struct qmp *qmp;
 67	spinlock_t spinlock;	/* used with STOPPED/STARTED power flags */
 68	DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
 69	u32 interconnect_count;
 70	struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
 71};
 72
 73/* Initialize interconnects required for IPA operation */
 74static int ipa_interconnect_init(struct ipa_power *power,
 75				 const struct ipa_interconnect_data *data)
 76{
 77	struct icc_bulk_data *interconnect;
 78	int ret;
 79	u32 i;
 80
 81	/* Initialize our interconnect data array for bulk operations */
 82	interconnect = &power->interconnect[0];
 83	for (i = 0; i < power->interconnect_count; i++) {
 84		/* interconnect->path is filled in by of_icc_bulk_get() */
 85		interconnect->name = data->name;
 86		interconnect->avg_bw = data->average_bandwidth;
 87		interconnect->peak_bw = data->peak_bandwidth;
 88		data++;
 89		interconnect++;
 90	}
 91
 92	ret = of_icc_bulk_get(power->dev, power->interconnect_count,
 93			      power->interconnect);
 94	if (ret)
 95		return ret;
 96
 97	/* All interconnects are initially disabled */
 98	icc_bulk_disable(power->interconnect_count, power->interconnect);
 99
100	/* Set the bandwidth values to be used when enabled */
101	ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
102	if (ret)
103		icc_bulk_put(power->interconnect_count, power->interconnect);
104
105	return ret;
106}
107
108/* Inverse of ipa_interconnect_init() */
109static void ipa_interconnect_exit(struct ipa_power *power)
110{
111	icc_bulk_put(power->interconnect_count, power->interconnect);
112}
113
114/* Enable IPA power, enabling interconnects and the core clock */
115static int ipa_power_enable(struct ipa *ipa)
116{
117	struct ipa_power *power = ipa->power;
118	int ret;
119
120	ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
121	if (ret)
122		return ret;
123
124	ret = clk_prepare_enable(power->core);
125	if (ret) {
126		dev_err(power->dev, "error %d enabling core clock\n", ret);
127		icc_bulk_disable(power->interconnect_count,
128				 power->interconnect);
129	}
130
131	return ret;
132}
133
134/* Inverse of ipa_power_enable() */
135static void ipa_power_disable(struct ipa *ipa)
136{
137	struct ipa_power *power = ipa->power;
138
139	clk_disable_unprepare(power->core);
140
141	icc_bulk_disable(power->interconnect_count, power->interconnect);
142}
143
144static int ipa_runtime_suspend(struct device *dev)
145{
146	struct ipa *ipa = dev_get_drvdata(dev);
147
148	/* Endpoints aren't usable until setup is complete */
149	if (ipa->setup_complete) {
150		__clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
151		ipa_endpoint_suspend(ipa);
152		gsi_suspend(&ipa->gsi);
153	}
154
155	ipa_power_disable(ipa);
156
157	return 0;
158}
159
160static int ipa_runtime_resume(struct device *dev)
161{
162	struct ipa *ipa = dev_get_drvdata(dev);
163	int ret;
164
165	ret = ipa_power_enable(ipa);
166	if (WARN_ON(ret < 0))
167		return ret;
168
169	/* Endpoints aren't usable until setup is complete */
170	if (ipa->setup_complete) {
171		gsi_resume(&ipa->gsi);
172		ipa_endpoint_resume(ipa);
173	}
174
175	return 0;
176}
177
178static int ipa_suspend(struct device *dev)
179{
180	struct ipa *ipa = dev_get_drvdata(dev);
181
182	__set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
183
184	/* Increment the disable depth to ensure that the IRQ won't
185	 * be re-enabled until the matching _enable call in
186	 * ipa_resume(). We do this to ensure that the interrupt
187	 * handler won't run whilst PM runtime is disabled.
188	 *
189	 * Note that disabling the IRQ is NOT the same as disabling
190	 * irq wake. If wakeup is enabled for the IPA then the IRQ
191	 * will still cause the system to wake up, see irq_set_irq_wake().
192	 */
193	ipa_interrupt_irq_disable(ipa);
194
195	return pm_runtime_force_suspend(dev);
196}
197
198static int ipa_resume(struct device *dev)
199{
200	struct ipa *ipa = dev_get_drvdata(dev);
201	int ret;
202
203	ret = pm_runtime_force_resume(dev);
204
205	__clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
206
207	/* Now that PM runtime is enabled again it's safe
208	 * to turn the IRQ back on and process any data
209	 * that was received during suspend.
210	 */
211	ipa_interrupt_irq_enable(ipa);
212
213	return ret;
214}
215
216/* Return the current IPA core clock rate */
217u32 ipa_core_clock_rate(struct ipa *ipa)
218{
219	return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
220}
221
222void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
223{
224	/* To handle an IPA interrupt we will have resumed the hardware
225	 * just to handle the interrupt, so we're done.  If we are in a
226	 * system suspend, trigger a system resume.
227	 */
228	if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
229		if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
230			pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
231
232	/* Acknowledge/clear the suspend interrupt on all endpoints */
233	ipa_interrupt_suspend_clear_all(ipa->interrupt);
234}
235
236/* The next few functions coordinate stopping and starting the modem
237 * network device transmit queue.
238 *
239 * Transmit can be running concurrent with power resume, and there's a
240 * chance the resume completes before the transmit path stops the queue,
241 * leaving the queue in a stopped state.  The next two functions are used
242 * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
243 * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
244 * is used by ipa_runtime_resume() to conditionally restart it.
245 *
246 * Two flags and a spinlock are used.  If the queue is stopped, the STOPPED
247 * power flag is set.  And if the queue is started, the STARTED flag is set.
248 * The queue is only started on resume if the STOPPED flag is set.  And the
249 * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
250 * set.  As a result, the queue remains operational if the two activites
251 * happen concurrently regardless of the order they complete.  The spinlock
252 * ensures the flag and TX queue operations are done atomically.
253 *
254 * The first function stops the modem netdev transmit queue, but only if
255 * the STARTED flag is *not* set.  That flag is cleared if it was set.
256 * If the queue is stopped, the STOPPED flag is set.  This is called only
257 * from the power ->runtime_resume operation.
258 */
259void ipa_power_modem_queue_stop(struct ipa *ipa)
260{
261	struct ipa_power *power = ipa->power;
262	unsigned long flags;
263
264	spin_lock_irqsave(&power->spinlock, flags);
265
266	if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
267		netif_stop_queue(ipa->modem_netdev);
268		__set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
269	}
270
271	spin_unlock_irqrestore(&power->spinlock, flags);
272}
273
274/* This function starts the modem netdev transmit queue, but only if the
275 * STOPPED flag is set.  That flag is cleared if it was set.  If the queue
276 * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
277 * to skip stopping the queue in the event of a race.
278 */
279void ipa_power_modem_queue_wake(struct ipa *ipa)
280{
281	struct ipa_power *power = ipa->power;
282	unsigned long flags;
283
284	spin_lock_irqsave(&power->spinlock, flags);
285
286	if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
287		__set_bit(IPA_POWER_FLAG_STARTED, power->flags);
288		netif_wake_queue(ipa->modem_netdev);
289	}
290
291	spin_unlock_irqrestore(&power->spinlock, flags);
292}
293
294/* This function clears the STARTED flag once the TX queue is operating */
295void ipa_power_modem_queue_active(struct ipa *ipa)
296{
297	clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
298}
299
300static int ipa_power_retention_init(struct ipa_power *power)
301{
302	struct qmp *qmp = qmp_get(power->dev);
303
304	if (IS_ERR(qmp)) {
305		if (PTR_ERR(qmp) == -EPROBE_DEFER)
306			return -EPROBE_DEFER;
307
308		/* We assume any other error means it's not defined/needed */
309		qmp = NULL;
310	}
311	power->qmp = qmp;
312
313	return 0;
314}
315
316static void ipa_power_retention_exit(struct ipa_power *power)
317{
318	qmp_put(power->qmp);
319	power->qmp = NULL;
320}
321
322/* Control register retention on power collapse */
323void ipa_power_retention(struct ipa *ipa, bool enable)
324{
325	static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
326	struct ipa_power *power = ipa->power;
327	int ret;
328
329	if (!power->qmp)
330		return;		/* Not needed on this platform */
331
332	ret = qmp_send(power->qmp, fmt, enable ? '1' : '0');
333	if (ret)
334		dev_err(power->dev, "error %d sending QMP %sable request\n",
335			ret, enable ? "en" : "dis");
336}
337
338int ipa_power_setup(struct ipa *ipa)
339{
340	int ret;
341
342	ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
343
344	ret = device_init_wakeup(&ipa->pdev->dev, true);
345	if (ret)
346		ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
347
348	return ret;
349}
350
351void ipa_power_teardown(struct ipa *ipa)
352{
353	(void)device_init_wakeup(&ipa->pdev->dev, false);
354	ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
355}
356
357/* Initialize IPA power management */
358struct ipa_power *
359ipa_power_init(struct device *dev, const struct ipa_power_data *data)
360{
361	struct ipa_power *power;
362	struct clk *clk;
363	size_t size;
364	int ret;
365
366	clk = clk_get(dev, "core");
367	if (IS_ERR(clk)) {
368		dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
369
370		return ERR_CAST(clk);
371	}
372
373	ret = clk_set_rate(clk, data->core_clock_rate);
374	if (ret) {
375		dev_err(dev, "error %d setting core clock rate to %u\n",
376			ret, data->core_clock_rate);
377		goto err_clk_put;
378	}
379
380	size = struct_size(power, interconnect, data->interconnect_count);
381	power = kzalloc(size, GFP_KERNEL);
382	if (!power) {
383		ret = -ENOMEM;
384		goto err_clk_put;
385	}
386	power->dev = dev;
387	power->core = clk;
388	spin_lock_init(&power->spinlock);
389	power->interconnect_count = data->interconnect_count;
390
391	ret = ipa_interconnect_init(power, data->interconnect_data);
392	if (ret)
393		goto err_kfree;
394
395	ret = ipa_power_retention_init(power);
396	if (ret)
397		goto err_interconnect_exit;
398
399	pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
400	pm_runtime_use_autosuspend(dev);
401	pm_runtime_enable(dev);
402
403	return power;
404
405err_interconnect_exit:
406	ipa_interconnect_exit(power);
407err_kfree:
408	kfree(power);
409err_clk_put:
410	clk_put(clk);
411
412	return ERR_PTR(ret);
413}
414
415/* Inverse of ipa_power_init() */
416void ipa_power_exit(struct ipa_power *power)
417{
418	struct device *dev = power->dev;
419	struct clk *clk = power->core;
420
421	pm_runtime_disable(dev);
422	pm_runtime_dont_use_autosuspend(dev);
423	ipa_power_retention_exit(power);
424	ipa_interconnect_exit(power);
425	kfree(power);
426	clk_put(clk);
427}
428
429const struct dev_pm_ops ipa_pm_ops = {
430	.suspend		= ipa_suspend,
431	.resume			= ipa_resume,
432	.runtime_suspend	= ipa_runtime_suspend,
433	.runtime_resume		= ipa_runtime_resume,
434};