Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * OMAP2+ common Power & Reset Management (PRM) IP block functions
  3 *
  4 * Copyright (C) 2011 Texas Instruments, Inc.
  5 * Tero Kristo <t-kristo@ti.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 *
 12 * For historical purposes, the API used to configure the PRM
 13 * interrupt handler refers to it as the "PRCM interrupt."  The
 14 * underlying registers are located in the PRM on OMAP3/4.
 15 *
 16 * XXX This code should eventually be moved to a PRM driver.
 17 */
 18
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/init.h>
 22#include <linux/io.h>
 23#include <linux/irq.h>
 24#include <linux/interrupt.h>
 25#include <linux/slab.h>
 26#include <linux/of.h>
 27#include <linux/of_address.h>
 28#include <linux/clk-provider.h>
 29#include <linux/clk/ti.h>
 30
 31#include "soc.h"
 32#include "prm2xxx_3xxx.h"
 33#include "prm2xxx.h"
 34#include "prm3xxx.h"
 
 35#include "prm44xx.h"
 
 
 
 36#include "common.h"
 37#include "clock.h"
 
 
 38
 39/*
 40 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
 41 * XXX this is technically not needed, since
 42 * omap_prcm_register_chain_handler() could allocate this based on the
 43 * actual amount of memory needed for the SoC
 44 */
 45#define OMAP_PRCM_MAX_NR_PENDING_REG		2
 46
 47/*
 48 * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
 49 * by the PRCM interrupt handler code.  There will be one 'chip' per
 50 * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair.  (So OMAP3 will have
 51 * one "chip" and OMAP4 will have two.)
 52 */
 53static struct irq_chip_generic **prcm_irq_chips;
 54
 55/*
 56 * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
 57 * is currently running on.  Defined and passed by initialization code
 58 * that calls omap_prcm_register_chain_handler().
 59 */
 60static struct omap_prcm_irq_setup *prcm_irq_setup;
 61
 62/* prm_base: base virtual address of the PRM IP block */
 63void __iomem *prm_base;
 64
 
 
 65/*
 66 * prm_ll_data: function pointers to SoC-specific implementations of
 67 * common PRM functions
 68 */
 69static struct prm_ll_data null_prm_ll_data;
 70static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
 71
 72/* Private functions */
 73
 74/*
 75 * Move priority events from events to priority_events array
 76 */
 77static void omap_prcm_events_filter_priority(unsigned long *events,
 78	unsigned long *priority_events)
 79{
 80	int i;
 81
 82	for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
 83		priority_events[i] =
 84			events[i] & prcm_irq_setup->priority_mask[i];
 85		events[i] ^= priority_events[i];
 86	}
 87}
 88
 89/*
 90 * PRCM Interrupt Handler
 91 *
 92 * This is a common handler for the OMAP PRCM interrupts. Pending
 93 * interrupts are detected by a call to prcm_pending_events and
 94 * dispatched accordingly. Clearing of the wakeup events should be
 95 * done by the SoC specific individual handlers.
 96 */
 97static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
 98{
 99	unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
100	unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
101	struct irq_chip *chip = irq_desc_get_chip(desc);
102	unsigned int virtirq;
103	int nr_irq = prcm_irq_setup->nr_regs * 32;
104
105	/*
106	 * If we are suspended, mask all interrupts from PRCM level,
107	 * this does not ack them, and they will be pending until we
108	 * re-enable the interrupts, at which point the
109	 * omap_prcm_irq_handler will be executed again.  The
110	 * _save_and_clear_irqen() function must ensure that the PRM
111	 * write to disable all IRQs has reached the PRM before
112	 * returning, or spurious PRCM interrupts may occur during
113	 * suspend.
114	 */
115	if (prcm_irq_setup->suspended) {
116		prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
117		prcm_irq_setup->suspend_save_flag = true;
118	}
119
120	/*
121	 * Loop until all pending irqs are handled, since
122	 * generic_handle_irq() can cause new irqs to come
123	 */
124	while (!prcm_irq_setup->suspended) {
125		prcm_irq_setup->read_pending_irqs(pending);
126
127		/* No bit set, then all IRQs are handled */
128		if (find_first_bit(pending, nr_irq) >= nr_irq)
129			break;
130
131		omap_prcm_events_filter_priority(pending, priority_pending);
132
133		/*
134		 * Loop on all currently pending irqs so that new irqs
135		 * cannot starve previously pending irqs
136		 */
137
138		/* Serve priority events first */
139		for_each_set_bit(virtirq, priority_pending, nr_irq)
140			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
141
142		/* Serve normal events next */
143		for_each_set_bit(virtirq, pending, nr_irq)
144			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
145	}
146	if (chip->irq_ack)
147		chip->irq_ack(&desc->irq_data);
148	if (chip->irq_eoi)
149		chip->irq_eoi(&desc->irq_data);
150	chip->irq_unmask(&desc->irq_data);
151
152	prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
153}
154
155/* Public functions */
156
157/**
158 * omap_prcm_event_to_irq - given a PRCM event name, returns the
159 * corresponding IRQ on which the handler should be registered
160 * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
161 *
162 * Returns the Linux internal IRQ ID corresponding to @name upon success,
163 * or -ENOENT upon failure.
164 */
165int omap_prcm_event_to_irq(const char *name)
166{
167	int i;
168
169	if (!prcm_irq_setup || !name)
170		return -ENOENT;
171
172	for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
173		if (!strcmp(prcm_irq_setup->irqs[i].name, name))
174			return prcm_irq_setup->base_irq +
175				prcm_irq_setup->irqs[i].offset;
176
177	return -ENOENT;
178}
179
180/**
181 * omap_prcm_irq_cleanup - reverses memory allocated and other steps
182 * done by omap_prcm_register_chain_handler()
183 *
184 * No return value.
185 */
186void omap_prcm_irq_cleanup(void)
187{
 
188	int i;
189
190	if (!prcm_irq_setup) {
191		pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
192		return;
193	}
194
195	if (prcm_irq_chips) {
196		for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
197			if (prcm_irq_chips[i])
198				irq_remove_generic_chip(prcm_irq_chips[i],
199					0xffffffff, 0, 0);
200			prcm_irq_chips[i] = NULL;
201		}
202		kfree(prcm_irq_chips);
203		prcm_irq_chips = NULL;
204	}
205
206	kfree(prcm_irq_setup->saved_mask);
207	prcm_irq_setup->saved_mask = NULL;
208
209	kfree(prcm_irq_setup->priority_mask);
210	prcm_irq_setup->priority_mask = NULL;
211
212	irq_set_chained_handler(prcm_irq_setup->irq, NULL);
 
 
 
 
213
214	if (prcm_irq_setup->base_irq > 0)
215		irq_free_descs(prcm_irq_setup->base_irq,
216			prcm_irq_setup->nr_regs * 32);
217	prcm_irq_setup->base_irq = 0;
218}
219
220void omap_prcm_irq_prepare(void)
221{
222	prcm_irq_setup->suspended = true;
223}
224
225void omap_prcm_irq_complete(void)
226{
227	prcm_irq_setup->suspended = false;
228
229	/* If we have not saved the masks, do not attempt to restore */
230	if (!prcm_irq_setup->suspend_save_flag)
231		return;
232
233	prcm_irq_setup->suspend_save_flag = false;
234
235	/*
236	 * Re-enable all masked PRCM irq sources, this causes the PRCM
237	 * interrupt to fire immediately if the events were masked
238	 * previously in the chain handler
239	 */
240	prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
241}
242
243/**
244 * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
245 * handler based on provided parameters
246 * @irq_setup: hardware data about the underlying PRM/PRCM
247 *
248 * Set up the PRCM chained interrupt handler on the PRCM IRQ.  Sets up
249 * one generic IRQ chip per PRM interrupt status/enable register pair.
250 * Returns 0 upon success, -EINVAL if called twice or if invalid
251 * arguments are passed, or -ENOMEM on any other error.
252 */
253int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
254{
255	int nr_regs;
256	u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
257	int offset, i;
258	struct irq_chip_generic *gc;
259	struct irq_chip_type *ct;
 
260
261	if (!irq_setup)
262		return -EINVAL;
263
264	nr_regs = irq_setup->nr_regs;
265
266	if (prcm_irq_setup) {
267		pr_err("PRCM: already initialized; won't reinitialize\n");
268		return -EINVAL;
269	}
270
271	if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
272		pr_err("PRCM: nr_regs too large\n");
273		return -EINVAL;
274	}
275
276	prcm_irq_setup = irq_setup;
277
278	prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
279	prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
280	prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
281		GFP_KERNEL);
282
283	if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
284	    !prcm_irq_setup->priority_mask) {
285		pr_err("PRCM: kzalloc failed\n");
286		goto err;
287	}
288
289	memset(mask, 0, sizeof(mask));
290
291	for (i = 0; i < irq_setup->nr_irqs; i++) {
292		offset = irq_setup->irqs[i].offset;
293		mask[offset >> 5] |= 1 << (offset & 0x1f);
294		if (irq_setup->irqs[i].priority)
295			irq_setup->priority_mask[offset >> 5] |=
296				1 << (offset & 0x1f);
297	}
298
299	irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
 
 
 
 
300
301	irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
302		0);
303
304	if (irq_setup->base_irq < 0) {
305		pr_err("PRCM: failed to allocate irq descs: %d\n",
306			irq_setup->base_irq);
307		goto err;
308	}
309
310	for (i = 0; i < irq_setup->nr_regs; i++) {
311		gc = irq_alloc_generic_chip("PRCM", 1,
312			irq_setup->base_irq + i * 32, prm_base,
313			handle_level_irq);
314
315		if (!gc) {
316			pr_err("PRCM: failed to allocate generic chip\n");
317			goto err;
318		}
319		ct = gc->chip_types;
320		ct->chip.irq_ack = irq_gc_ack_set_bit;
321		ct->chip.irq_mask = irq_gc_mask_clr_bit;
322		ct->chip.irq_unmask = irq_gc_mask_set_bit;
323
324		ct->regs.ack = irq_setup->ack + i * 4;
325		ct->regs.mask = irq_setup->mask + i * 4;
326
327		irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
328		prcm_irq_chips[i] = gc;
329	}
330
331	if (of_have_populated_dt()) {
332		int irq = omap_prcm_event_to_irq("io");
333		if (cpu_is_omap34xx())
334			omap_pcs_legacy_init(irq,
335				omap3xxx_prm_reconfigure_io_chain);
336		else
337			omap_pcs_legacy_init(irq,
338				omap44xx_prm_reconfigure_io_chain);
339	}
340
341	return 0;
342
343err:
344	omap_prcm_irq_cleanup();
345	return -ENOMEM;
346}
347
348/**
349 * omap2_set_globals_prm - set the PRM base address (for early use)
350 * @prm: PRM base virtual address
351 *
352 * XXX Will be replaced when the PRM/CM drivers are completed.
353 */
354void __init omap2_set_globals_prm(void __iomem *prm)
355{
356	prm_base = prm;
357}
358
359/**
360 * prm_read_reset_sources - return the sources of the SoC's last reset
361 *
362 * Return a u32 bitmask representing the reset sources that caused the
363 * SoC to reset.  The low-level per-SoC functions called by this
364 * function remap the SoC-specific reset source bits into an
365 * OMAP-common set of reset source bits, defined in
366 * arch/arm/mach-omap2/prm.h.  Returns the standardized reset source
367 * u32 bitmask from the hardware upon success, or returns (1 <<
368 * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources()
369 * function was registered.
370 */
371u32 prm_read_reset_sources(void)
372{
373	u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT;
374
375	if (prm_ll_data->read_reset_sources)
376		ret = prm_ll_data->read_reset_sources();
377	else
378		WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__);
379
380	return ret;
381}
382
383/**
384 * prm_was_any_context_lost_old - was device context lost? (old API)
385 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
386 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
387 * @idx: CONTEXT register offset
388 *
389 * Return 1 if any bits were set in the *_CONTEXT_* register
390 * identified by (@part, @inst, @idx), which means that some context
391 * was lost for that module; otherwise, return 0.  XXX Deprecated;
392 * callers need to use a less-SoC-dependent way to identify hardware
393 * IP blocks.
394 */
395bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
396{
397	bool ret = true;
398
399	if (prm_ll_data->was_any_context_lost_old)
400		ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
401	else
402		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
403			  __func__);
404
405	return ret;
406}
407
408/**
409 * prm_clear_context_lost_flags_old - clear context loss flags (old API)
410 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
411 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
412 * @idx: CONTEXT register offset
413 *
414 * Clear hardware context loss bits for the module identified by
415 * (@part, @inst, @idx).  No return value.  XXX Deprecated; callers
416 * need to use a less-SoC-dependent way to identify hardware IP
417 * blocks.
418 */
419void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
420{
421	if (prm_ll_data->clear_context_loss_flags_old)
422		prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
423	else
424		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
425			  __func__);
426}
427
428/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429 * prm_register - register per-SoC low-level data with the PRM
430 * @pld: low-level per-SoC OMAP PRM data & function pointers to register
431 *
432 * Register per-SoC low-level OMAP PRM data and function pointers with
433 * the OMAP PRM common interface.  The caller must keep the data
434 * pointed to by @pld valid until it calls prm_unregister() and
435 * it returns successfully.  Returns 0 upon success, -EINVAL if @pld
436 * is NULL, or -EEXIST if prm_register() has already been called
437 * without an intervening prm_unregister().
438 */
439int prm_register(struct prm_ll_data *pld)
440{
441	if (!pld)
442		return -EINVAL;
443
444	if (prm_ll_data != &null_prm_ll_data)
445		return -EEXIST;
446
447	prm_ll_data = pld;
448
449	return 0;
450}
451
452/**
453 * prm_unregister - unregister per-SoC low-level data & function pointers
454 * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
455 *
456 * Unregister per-SoC low-level OMAP PRM data and function pointers
457 * that were previously registered with prm_register().  The
458 * caller may not destroy any of the data pointed to by @pld until
459 * this function returns successfully.  Returns 0 upon success, or
460 * -EINVAL if @pld is NULL or if @pld does not match the struct
461 * prm_ll_data * previously registered by prm_register().
462 */
463int prm_unregister(struct prm_ll_data *pld)
464{
465	if (!pld || prm_ll_data != pld)
466		return -EINVAL;
467
468	prm_ll_data = &null_prm_ll_data;
469
470	return 0;
471}
472
473static struct of_device_id omap_prcm_dt_match_table[] = {
474	{ .compatible = "ti,am3-prcm" },
475	{ .compatible = "ti,am3-scrm" },
476	{ .compatible = "ti,am4-prcm" },
477	{ .compatible = "ti,am4-scrm" },
478	{ .compatible = "ti,omap3-prm" },
479	{ .compatible = "ti,omap3-cm" },
480	{ .compatible = "ti,omap3-scrm" },
481	{ .compatible = "ti,omap4-cm1" },
482	{ .compatible = "ti,omap4-prm" },
483	{ .compatible = "ti,omap4-cm2" },
484	{ .compatible = "ti,omap4-scrm" },
485	{ .compatible = "ti,omap5-prm" },
486	{ .compatible = "ti,omap5-cm-core-aon" },
487	{ .compatible = "ti,omap5-scrm" },
488	{ .compatible = "ti,omap5-cm-core" },
489	{ .compatible = "ti,dra7-prm" },
490	{ .compatible = "ti,dra7-cm-core-aon" },
491	{ .compatible = "ti,dra7-cm-core" },
492	{ }
493};
 
494
495static struct clk_hw_omap memmap_dummy_ck = {
496	.flags = MEMMAP_ADDRESSING,
 
 
 
 
 
 
 
 
497};
 
498
499static u32 prm_clk_readl(void __iomem *reg)
500{
501	return omap2_clk_readl(&memmap_dummy_ck, reg);
502}
 
 
503
504static void prm_clk_writel(u32 val, void __iomem *reg)
505{
506	omap2_clk_writel(val, &memmap_dummy_ck, reg);
507}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
508
509static struct ti_clk_ll_ops omap_clk_ll_ops = {
510	.clk_readl = prm_clk_readl,
511	.clk_writel = prm_clk_writel,
512};
 
513
514int __init of_prcm_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515{
516	struct device_node *np;
 
 
517	void __iomem *mem;
518	int memmap_index = 0;
519
520	ti_clk_ll_ops = &omap_clk_ll_ops;
 
521
522	for_each_matching_node(np, omap_prcm_dt_match_table) {
523		mem = of_iomap(np, 0);
524		clk_memmaps[memmap_index] = mem;
525		ti_dt_clk_init_provider(np, memmap_index);
526		memmap_index++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527	}
528
529	ti_dt_clockdomains_setup();
530
531	return 0;
532}
v4.6
  1/*
  2 * OMAP2+ common Power & Reset Management (PRM) IP block functions
  3 *
  4 * Copyright (C) 2011 Texas Instruments, Inc.
  5 * Tero Kristo <t-kristo@ti.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 *
 12 * For historical purposes, the API used to configure the PRM
 13 * interrupt handler refers to it as the "PRCM interrupt."  The
 14 * underlying registers are located in the PRM on OMAP3/4.
 15 *
 16 * XXX This code should eventually be moved to a PRM driver.
 17 */
 18
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/init.h>
 22#include <linux/io.h>
 23#include <linux/irq.h>
 24#include <linux/interrupt.h>
 25#include <linux/slab.h>
 26#include <linux/of.h>
 27#include <linux/of_address.h>
 28#include <linux/clk-provider.h>
 29#include <linux/clk/ti.h>
 30
 31#include "soc.h"
 32#include "prm2xxx_3xxx.h"
 33#include "prm2xxx.h"
 34#include "prm3xxx.h"
 35#include "prm33xx.h"
 36#include "prm44xx.h"
 37#include "prm54xx.h"
 38#include "prm7xx.h"
 39#include "prcm43xx.h"
 40#include "common.h"
 41#include "clock.h"
 42#include "cm.h"
 43#include "control.h"
 44
 45/*
 46 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
 47 * XXX this is technically not needed, since
 48 * omap_prcm_register_chain_handler() could allocate this based on the
 49 * actual amount of memory needed for the SoC
 50 */
 51#define OMAP_PRCM_MAX_NR_PENDING_REG		2
 52
 53/*
 54 * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
 55 * by the PRCM interrupt handler code.  There will be one 'chip' per
 56 * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair.  (So OMAP3 will have
 57 * one "chip" and OMAP4 will have two.)
 58 */
 59static struct irq_chip_generic **prcm_irq_chips;
 60
 61/*
 62 * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
 63 * is currently running on.  Defined and passed by initialization code
 64 * that calls omap_prcm_register_chain_handler().
 65 */
 66static struct omap_prcm_irq_setup *prcm_irq_setup;
 67
 68/* prm_base: base virtual address of the PRM IP block */
 69void __iomem *prm_base;
 70
 71u16 prm_features;
 72
 73/*
 74 * prm_ll_data: function pointers to SoC-specific implementations of
 75 * common PRM functions
 76 */
 77static struct prm_ll_data null_prm_ll_data;
 78static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
 79
 80/* Private functions */
 81
 82/*
 83 * Move priority events from events to priority_events array
 84 */
 85static void omap_prcm_events_filter_priority(unsigned long *events,
 86	unsigned long *priority_events)
 87{
 88	int i;
 89
 90	for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
 91		priority_events[i] =
 92			events[i] & prcm_irq_setup->priority_mask[i];
 93		events[i] ^= priority_events[i];
 94	}
 95}
 96
 97/*
 98 * PRCM Interrupt Handler
 99 *
100 * This is a common handler for the OMAP PRCM interrupts. Pending
101 * interrupts are detected by a call to prcm_pending_events and
102 * dispatched accordingly. Clearing of the wakeup events should be
103 * done by the SoC specific individual handlers.
104 */
105static void omap_prcm_irq_handler(struct irq_desc *desc)
106{
107	unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
108	unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
109	struct irq_chip *chip = irq_desc_get_chip(desc);
110	unsigned int virtirq;
111	int nr_irq = prcm_irq_setup->nr_regs * 32;
112
113	/*
114	 * If we are suspended, mask all interrupts from PRCM level,
115	 * this does not ack them, and they will be pending until we
116	 * re-enable the interrupts, at which point the
117	 * omap_prcm_irq_handler will be executed again.  The
118	 * _save_and_clear_irqen() function must ensure that the PRM
119	 * write to disable all IRQs has reached the PRM before
120	 * returning, or spurious PRCM interrupts may occur during
121	 * suspend.
122	 */
123	if (prcm_irq_setup->suspended) {
124		prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
125		prcm_irq_setup->suspend_save_flag = true;
126	}
127
128	/*
129	 * Loop until all pending irqs are handled, since
130	 * generic_handle_irq() can cause new irqs to come
131	 */
132	while (!prcm_irq_setup->suspended) {
133		prcm_irq_setup->read_pending_irqs(pending);
134
135		/* No bit set, then all IRQs are handled */
136		if (find_first_bit(pending, nr_irq) >= nr_irq)
137			break;
138
139		omap_prcm_events_filter_priority(pending, priority_pending);
140
141		/*
142		 * Loop on all currently pending irqs so that new irqs
143		 * cannot starve previously pending irqs
144		 */
145
146		/* Serve priority events first */
147		for_each_set_bit(virtirq, priority_pending, nr_irq)
148			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
149
150		/* Serve normal events next */
151		for_each_set_bit(virtirq, pending, nr_irq)
152			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
153	}
154	if (chip->irq_ack)
155		chip->irq_ack(&desc->irq_data);
156	if (chip->irq_eoi)
157		chip->irq_eoi(&desc->irq_data);
158	chip->irq_unmask(&desc->irq_data);
159
160	prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
161}
162
163/* Public functions */
164
165/**
166 * omap_prcm_event_to_irq - given a PRCM event name, returns the
167 * corresponding IRQ on which the handler should be registered
168 * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
169 *
170 * Returns the Linux internal IRQ ID corresponding to @name upon success,
171 * or -ENOENT upon failure.
172 */
173int omap_prcm_event_to_irq(const char *name)
174{
175	int i;
176
177	if (!prcm_irq_setup || !name)
178		return -ENOENT;
179
180	for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
181		if (!strcmp(prcm_irq_setup->irqs[i].name, name))
182			return prcm_irq_setup->base_irq +
183				prcm_irq_setup->irqs[i].offset;
184
185	return -ENOENT;
186}
187
188/**
189 * omap_prcm_irq_cleanup - reverses memory allocated and other steps
190 * done by omap_prcm_register_chain_handler()
191 *
192 * No return value.
193 */
194void omap_prcm_irq_cleanup(void)
195{
196	unsigned int irq;
197	int i;
198
199	if (!prcm_irq_setup) {
200		pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
201		return;
202	}
203
204	if (prcm_irq_chips) {
205		for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
206			if (prcm_irq_chips[i])
207				irq_remove_generic_chip(prcm_irq_chips[i],
208					0xffffffff, 0, 0);
209			prcm_irq_chips[i] = NULL;
210		}
211		kfree(prcm_irq_chips);
212		prcm_irq_chips = NULL;
213	}
214
215	kfree(prcm_irq_setup->saved_mask);
216	prcm_irq_setup->saved_mask = NULL;
217
218	kfree(prcm_irq_setup->priority_mask);
219	prcm_irq_setup->priority_mask = NULL;
220
221	if (prcm_irq_setup->xlate_irq)
222		irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
223	else
224		irq = prcm_irq_setup->irq;
225	irq_set_chained_handler(irq, NULL);
226
227	if (prcm_irq_setup->base_irq > 0)
228		irq_free_descs(prcm_irq_setup->base_irq,
229			prcm_irq_setup->nr_regs * 32);
230	prcm_irq_setup->base_irq = 0;
231}
232
233void omap_prcm_irq_prepare(void)
234{
235	prcm_irq_setup->suspended = true;
236}
237
238void omap_prcm_irq_complete(void)
239{
240	prcm_irq_setup->suspended = false;
241
242	/* If we have not saved the masks, do not attempt to restore */
243	if (!prcm_irq_setup->suspend_save_flag)
244		return;
245
246	prcm_irq_setup->suspend_save_flag = false;
247
248	/*
249	 * Re-enable all masked PRCM irq sources, this causes the PRCM
250	 * interrupt to fire immediately if the events were masked
251	 * previously in the chain handler
252	 */
253	prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
254}
255
256/**
257 * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
258 * handler based on provided parameters
259 * @irq_setup: hardware data about the underlying PRM/PRCM
260 *
261 * Set up the PRCM chained interrupt handler on the PRCM IRQ.  Sets up
262 * one generic IRQ chip per PRM interrupt status/enable register pair.
263 * Returns 0 upon success, -EINVAL if called twice or if invalid
264 * arguments are passed, or -ENOMEM on any other error.
265 */
266int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
267{
268	int nr_regs;
269	u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
270	int offset, i;
271	struct irq_chip_generic *gc;
272	struct irq_chip_type *ct;
273	unsigned int irq;
274
275	if (!irq_setup)
276		return -EINVAL;
277
278	nr_regs = irq_setup->nr_regs;
279
280	if (prcm_irq_setup) {
281		pr_err("PRCM: already initialized; won't reinitialize\n");
282		return -EINVAL;
283	}
284
285	if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
286		pr_err("PRCM: nr_regs too large\n");
287		return -EINVAL;
288	}
289
290	prcm_irq_setup = irq_setup;
291
292	prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
293	prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
294	prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
295		GFP_KERNEL);
296
297	if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
298	    !prcm_irq_setup->priority_mask) {
299		pr_err("PRCM: kzalloc failed\n");
300		goto err;
301	}
302
303	memset(mask, 0, sizeof(mask));
304
305	for (i = 0; i < irq_setup->nr_irqs; i++) {
306		offset = irq_setup->irqs[i].offset;
307		mask[offset >> 5] |= 1 << (offset & 0x1f);
308		if (irq_setup->irqs[i].priority)
309			irq_setup->priority_mask[offset >> 5] |=
310				1 << (offset & 0x1f);
311	}
312
313	if (irq_setup->xlate_irq)
314		irq = irq_setup->xlate_irq(irq_setup->irq);
315	else
316		irq = irq_setup->irq;
317	irq_set_chained_handler(irq, omap_prcm_irq_handler);
318
319	irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
320		0);
321
322	if (irq_setup->base_irq < 0) {
323		pr_err("PRCM: failed to allocate irq descs: %d\n",
324			irq_setup->base_irq);
325		goto err;
326	}
327
328	for (i = 0; i < irq_setup->nr_regs; i++) {
329		gc = irq_alloc_generic_chip("PRCM", 1,
330			irq_setup->base_irq + i * 32, prm_base,
331			handle_level_irq);
332
333		if (!gc) {
334			pr_err("PRCM: failed to allocate generic chip\n");
335			goto err;
336		}
337		ct = gc->chip_types;
338		ct->chip.irq_ack = irq_gc_ack_set_bit;
339		ct->chip.irq_mask = irq_gc_mask_clr_bit;
340		ct->chip.irq_unmask = irq_gc_mask_set_bit;
341
342		ct->regs.ack = irq_setup->ack + i * 4;
343		ct->regs.mask = irq_setup->mask + i * 4;
344
345		irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
346		prcm_irq_chips[i] = gc;
347	}
348
349	if (of_have_populated_dt()) {
350		int irq = omap_prcm_event_to_irq("io");
351		omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain);
 
 
 
 
 
352	}
353
354	return 0;
355
356err:
357	omap_prcm_irq_cleanup();
358	return -ENOMEM;
359}
360
361/**
362 * omap2_set_globals_prm - set the PRM base address (for early use)
363 * @prm: PRM base virtual address
364 *
365 * XXX Will be replaced when the PRM/CM drivers are completed.
366 */
367void __init omap2_set_globals_prm(void __iomem *prm)
368{
369	prm_base = prm;
370}
371
372/**
373 * prm_read_reset_sources - return the sources of the SoC's last reset
374 *
375 * Return a u32 bitmask representing the reset sources that caused the
376 * SoC to reset.  The low-level per-SoC functions called by this
377 * function remap the SoC-specific reset source bits into an
378 * OMAP-common set of reset source bits, defined in
379 * arch/arm/mach-omap2/prm.h.  Returns the standardized reset source
380 * u32 bitmask from the hardware upon success, or returns (1 <<
381 * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources()
382 * function was registered.
383 */
384u32 prm_read_reset_sources(void)
385{
386	u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT;
387
388	if (prm_ll_data->read_reset_sources)
389		ret = prm_ll_data->read_reset_sources();
390	else
391		WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__);
392
393	return ret;
394}
395
396/**
397 * prm_was_any_context_lost_old - was device context lost? (old API)
398 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
399 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
400 * @idx: CONTEXT register offset
401 *
402 * Return 1 if any bits were set in the *_CONTEXT_* register
403 * identified by (@part, @inst, @idx), which means that some context
404 * was lost for that module; otherwise, return 0.  XXX Deprecated;
405 * callers need to use a less-SoC-dependent way to identify hardware
406 * IP blocks.
407 */
408bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
409{
410	bool ret = true;
411
412	if (prm_ll_data->was_any_context_lost_old)
413		ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
414	else
415		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
416			  __func__);
417
418	return ret;
419}
420
421/**
422 * prm_clear_context_lost_flags_old - clear context loss flags (old API)
423 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
424 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
425 * @idx: CONTEXT register offset
426 *
427 * Clear hardware context loss bits for the module identified by
428 * (@part, @inst, @idx).  No return value.  XXX Deprecated; callers
429 * need to use a less-SoC-dependent way to identify hardware IP
430 * blocks.
431 */
432void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
433{
434	if (prm_ll_data->clear_context_loss_flags_old)
435		prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
436	else
437		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
438			  __func__);
439}
440
441/**
442 * omap_prm_assert_hardreset - assert hardreset for an IP block
443 * @shift: register bit shift corresponding to the reset line
444 * @part: PRM partition
445 * @prm_mod: PRM submodule base or instance offset
446 * @offset: register offset
447 *
448 * Asserts a hardware reset line for an IP block.
449 */
450int omap_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset)
451{
452	if (!prm_ll_data->assert_hardreset) {
453		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
454			  __func__);
455		return -EINVAL;
456	}
457
458	return prm_ll_data->assert_hardreset(shift, part, prm_mod, offset);
459}
460
461/**
462 * omap_prm_deassert_hardreset - deassert hardreset for an IP block
463 * @shift: register bit shift corresponding to the reset line
464 * @st_shift: reset status bit shift corresponding to the reset line
465 * @part: PRM partition
466 * @prm_mod: PRM submodule base or instance offset
467 * @offset: register offset
468 * @st_offset: status register offset
469 *
470 * Deasserts a hardware reset line for an IP block.
471 */
472int omap_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 prm_mod,
473				u16 offset, u16 st_offset)
474{
475	if (!prm_ll_data->deassert_hardreset) {
476		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
477			  __func__);
478		return -EINVAL;
479	}
480
481	return prm_ll_data->deassert_hardreset(shift, st_shift, part, prm_mod,
482					       offset, st_offset);
483}
484
485/**
486 * omap_prm_is_hardreset_asserted - check the hardreset status for an IP block
487 * @shift: register bit shift corresponding to the reset line
488 * @part: PRM partition
489 * @prm_mod: PRM submodule base or instance offset
490 * @offset: register offset
491 *
492 * Checks if a hardware reset line for an IP block is enabled or not.
493 */
494int omap_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset)
495{
496	if (!prm_ll_data->is_hardreset_asserted) {
497		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
498			  __func__);
499		return -EINVAL;
500	}
501
502	return prm_ll_data->is_hardreset_asserted(shift, part, prm_mod, offset);
503}
504
505/**
506 * omap_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
507 *
508 * Clear any previously-latched I/O wakeup events and ensure that the
509 * I/O wakeup gates are aligned with the current mux settings.
510 * Calls SoC specific I/O chain reconfigure function if available,
511 * otherwise does nothing.
512 */
513void omap_prm_reconfigure_io_chain(void)
514{
515	if (!prcm_irq_setup || !prcm_irq_setup->reconfigure_io_chain)
516		return;
517
518	prcm_irq_setup->reconfigure_io_chain();
519}
520
521/**
522 * omap_prm_reset_system - trigger global SW reset
523 *
524 * Triggers SoC specific global warm reset to reboot the device.
525 */
526void omap_prm_reset_system(void)
527{
528	if (!prm_ll_data->reset_system) {
529		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
530			  __func__);
531		return;
532	}
533
534	prm_ll_data->reset_system();
535
536	while (1)
537		cpu_relax();
538}
539
540/**
541 * omap_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt
542 * @module: PRM module to clear wakeups from
543 * @regs: register to clear
544 * @wkst_mask: wkst bits to clear
545 *
546 * Clears any wakeup events for the module and register set defined.
547 * Uses SoC specific implementation to do the actual wakeup status
548 * clearing.
549 */
550int omap_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask)
551{
552	if (!prm_ll_data->clear_mod_irqs) {
553		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
554			  __func__);
555		return -EINVAL;
556	}
557
558	return prm_ll_data->clear_mod_irqs(module, regs, wkst_mask);
559}
560
561/**
562 * omap_prm_vp_check_txdone - check voltage processor TX done status
563 *
564 * Checks if voltage processor transmission has been completed.
565 * Returns non-zero if a transmission has completed, 0 otherwise.
566 */
567u32 omap_prm_vp_check_txdone(u8 vp_id)
568{
569	if (!prm_ll_data->vp_check_txdone) {
570		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
571			  __func__);
572		return 0;
573	}
574
575	return prm_ll_data->vp_check_txdone(vp_id);
576}
577
578/**
579 * omap_prm_vp_clear_txdone - clears voltage processor TX done status
580 *
581 * Clears the status bit for completed voltage processor transmission
582 * returned by prm_vp_check_txdone.
583 */
584void omap_prm_vp_clear_txdone(u8 vp_id)
585{
586	if (!prm_ll_data->vp_clear_txdone) {
587		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
588			  __func__);
589		return;
590	}
591
592	prm_ll_data->vp_clear_txdone(vp_id);
593}
594
595/**
596 * prm_register - register per-SoC low-level data with the PRM
597 * @pld: low-level per-SoC OMAP PRM data & function pointers to register
598 *
599 * Register per-SoC low-level OMAP PRM data and function pointers with
600 * the OMAP PRM common interface.  The caller must keep the data
601 * pointed to by @pld valid until it calls prm_unregister() and
602 * it returns successfully.  Returns 0 upon success, -EINVAL if @pld
603 * is NULL, or -EEXIST if prm_register() has already been called
604 * without an intervening prm_unregister().
605 */
606int prm_register(struct prm_ll_data *pld)
607{
608	if (!pld)
609		return -EINVAL;
610
611	if (prm_ll_data != &null_prm_ll_data)
612		return -EEXIST;
613
614	prm_ll_data = pld;
615
616	return 0;
617}
618
619/**
620 * prm_unregister - unregister per-SoC low-level data & function pointers
621 * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
622 *
623 * Unregister per-SoC low-level OMAP PRM data and function pointers
624 * that were previously registered with prm_register().  The
625 * caller may not destroy any of the data pointed to by @pld until
626 * this function returns successfully.  Returns 0 upon success, or
627 * -EINVAL if @pld is NULL or if @pld does not match the struct
628 * prm_ll_data * previously registered by prm_register().
629 */
630int prm_unregister(struct prm_ll_data *pld)
631{
632	if (!pld || prm_ll_data != pld)
633		return -EINVAL;
634
635	prm_ll_data = &null_prm_ll_data;
636
637	return 0;
638}
639
640#ifdef CONFIG_ARCH_OMAP2
641static struct omap_prcm_init_data omap2_prm_data __initdata = {
642	.index = TI_CLKM_PRM,
643	.init = omap2xxx_prm_init,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
644};
645#endif
646
647#ifdef CONFIG_ARCH_OMAP3
648static struct omap_prcm_init_data omap3_prm_data __initdata = {
649	.index = TI_CLKM_PRM,
650	.init = omap3xxx_prm_init,
651
652	/*
653	 * IVA2 offset is a negative value, must offset the prm_base
654	 * address by this to get it to positive
655	 */
656	.offset = -OMAP3430_IVA2_MOD,
657};
658#endif
659
660#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX)
661static struct omap_prcm_init_data am3_prm_data __initdata = {
662	.index = TI_CLKM_PRM,
663	.init = am33xx_prm_init,
664};
665#endif
666
667#ifdef CONFIG_SOC_TI81XX
668static struct omap_prcm_init_data dm814_pllss_data __initdata = {
669	.index = TI_CLKM_PLLSS,
670	.init = am33xx_prm_init,
671};
672#endif
673
674#ifdef CONFIG_ARCH_OMAP4
675static struct omap_prcm_init_data omap4_prm_data __initdata = {
676	.index = TI_CLKM_PRM,
677	.init = omap44xx_prm_init,
678	.device_inst_offset = OMAP4430_PRM_DEVICE_INST,
679	.flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE | PRM_IRQ_DEFAULT,
680};
681#endif
682
683#ifdef CONFIG_SOC_OMAP5
684static struct omap_prcm_init_data omap5_prm_data __initdata = {
685	.index = TI_CLKM_PRM,
686	.init = omap44xx_prm_init,
687	.device_inst_offset = OMAP54XX_PRM_DEVICE_INST,
688	.flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE,
689};
690#endif
691
692#ifdef CONFIG_SOC_DRA7XX
693static struct omap_prcm_init_data dra7_prm_data __initdata = {
694	.index = TI_CLKM_PRM,
695	.init = omap44xx_prm_init,
696	.device_inst_offset = DRA7XX_PRM_DEVICE_INST,
697	.flags = PRM_HAS_IO_WAKEUP,
698};
699#endif
700
701#ifdef CONFIG_SOC_AM43XX
702static struct omap_prcm_init_data am4_prm_data __initdata = {
703	.index = TI_CLKM_PRM,
704	.init = omap44xx_prm_init,
705	.device_inst_offset = AM43XX_PRM_DEVICE_INST,
706	.flags = PRM_HAS_IO_WAKEUP,
707};
708#endif
709
710#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
711static struct omap_prcm_init_data scrm_data __initdata = {
712	.index = TI_CLKM_SCRM,
713};
714#endif
715
716static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = {
717#ifdef CONFIG_SOC_AM33XX
718	{ .compatible = "ti,am3-prcm", .data = &am3_prm_data },
719#endif
720#ifdef CONFIG_SOC_AM43XX
721	{ .compatible = "ti,am4-prcm", .data = &am4_prm_data },
722#endif
723#ifdef CONFIG_SOC_TI81XX
724	{ .compatible = "ti,dm814-prcm", .data = &am3_prm_data },
725	{ .compatible = "ti,dm814-pllss", .data = &dm814_pllss_data },
726	{ .compatible = "ti,dm816-prcm", .data = &am3_prm_data },
727#endif
728#ifdef CONFIG_ARCH_OMAP2
729	{ .compatible = "ti,omap2-prcm", .data = &omap2_prm_data },
730#endif
731#ifdef CONFIG_ARCH_OMAP3
732	{ .compatible = "ti,omap3-prm", .data = &omap3_prm_data },
733#endif
734#ifdef CONFIG_ARCH_OMAP4
735	{ .compatible = "ti,omap4-prm", .data = &omap4_prm_data },
736	{ .compatible = "ti,omap4-scrm", .data = &scrm_data },
737#endif
738#ifdef CONFIG_SOC_OMAP5
739	{ .compatible = "ti,omap5-prm", .data = &omap5_prm_data },
740	{ .compatible = "ti,omap5-scrm", .data = &scrm_data },
741#endif
742#ifdef CONFIG_SOC_DRA7XX
743	{ .compatible = "ti,dra7-prm", .data = &dra7_prm_data },
744#endif
745	{ }
746};
747
748/**
749 * omap2_prm_base_init - initialize iomappings for the PRM driver
750 *
751 * Detects and initializes the iomappings for the PRM driver, based
752 * on the DT data. Returns 0 in success, negative error value
753 * otherwise.
754 */
755int __init omap2_prm_base_init(void)
756{
757	struct device_node *np;
758	const struct of_device_id *match;
759	struct omap_prcm_init_data *data;
760	void __iomem *mem;
 
761
762	for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
763		data = (struct omap_prcm_init_data *)match->data;
764
 
765		mem = of_iomap(np, 0);
766		if (!mem)
767			return -ENOMEM;
768
769		if (data->index == TI_CLKM_PRM)
770			prm_base = mem + data->offset;
771
772		data->mem = mem;
773
774		data->np = np;
775
776		if (data->init)
777			data->init(data);
778	}
779
780	return 0;
781}
782
783int __init omap2_prcm_base_init(void)
784{
785	int ret;
786
787	ret = omap2_prm_base_init();
788	if (ret)
789		return ret;
790
791	return omap2_cm_base_init();
792}
793
794/**
795 * omap_prcm_init - low level init for the PRCM drivers
796 *
797 * Initializes the low level clock infrastructure for PRCM drivers.
798 * Returns 0 in success, negative error value in failure.
799 */
800int __init omap_prcm_init(void)
801{
802	struct device_node *np;
803	const struct of_device_id *match;
804	const struct omap_prcm_init_data *data;
805	int ret;
806
807	for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
808		data = match->data;
809
810		ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
811		if (ret)
812			return ret;
813	}
814
815	omap_cm_init();
816
817	return 0;
818}
819
820static int __init prm_late_init(void)
821{
822	if (prm_ll_data->late_init)
823		return prm_ll_data->late_init();
824	return 0;
825}
826subsys_initcall(prm_late_init);