Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/drivers/mfd/ucb1x00-core.c
4 *
5 * Copyright (C) 2001 Russell King, All Rights Reserved.
6 *
7 * The UCB1x00 core driver provides basic services for handling IO,
8 * the ADC, interrupts, and accessing registers. It is designed
9 * such that everything goes through this layer, thereby providing
10 * a consistent locking methodology, as well as allowing the drivers
11 * to be used on other non-MCP-enabled hardware platforms.
12 *
13 * Note that all locks are private to this file. Nothing else may
14 * touch them.
15 */
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24#include <linux/device.h>
25#include <linux/mutex.h>
26#include <linux/mfd/ucb1x00.h>
27#include <linux/pm.h>
28#include <linux/gpio/driver.h>
29
30static DEFINE_MUTEX(ucb1x00_mutex);
31static LIST_HEAD(ucb1x00_drivers);
32static LIST_HEAD(ucb1x00_devices);
33
34/**
35 * ucb1x00_io_set_dir - set IO direction
36 * @ucb: UCB1x00 structure describing chip
37 * @in: bitfield of IO pins to be set as inputs
38 * @out: bitfield of IO pins to be set as outputs
39 *
40 * Set the IO direction of the ten general purpose IO pins on
41 * the UCB1x00 chip. The @in bitfield has priority over the
42 * @out bitfield, in that if you specify a pin as both input
43 * and output, it will end up as an input.
44 *
45 * ucb1x00_enable must have been called to enable the comms
46 * before using this function.
47 *
48 * This function takes a spinlock, disabling interrupts.
49 */
50void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&ucb->io_lock, flags);
55 ucb->io_dir |= out;
56 ucb->io_dir &= ~in;
57
58 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
59 spin_unlock_irqrestore(&ucb->io_lock, flags);
60}
61
62/**
63 * ucb1x00_io_write - set or clear IO outputs
64 * @ucb: UCB1x00 structure describing chip
65 * @set: bitfield of IO pins to set to logic '1'
66 * @clear: bitfield of IO pins to set to logic '0'
67 *
68 * Set the IO output state of the specified IO pins. The value
69 * is retained if the pins are subsequently configured as inputs.
70 * The @clear bitfield has priority over the @set bitfield -
71 * outputs will be cleared.
72 *
73 * ucb1x00_enable must have been called to enable the comms
74 * before using this function.
75 *
76 * This function takes a spinlock, disabling interrupts.
77 */
78void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
79{
80 unsigned long flags;
81
82 spin_lock_irqsave(&ucb->io_lock, flags);
83 ucb->io_out |= set;
84 ucb->io_out &= ~clear;
85
86 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
87 spin_unlock_irqrestore(&ucb->io_lock, flags);
88}
89
90/**
91 * ucb1x00_io_read - read the current state of the IO pins
92 * @ucb: UCB1x00 structure describing chip
93 *
94 * Return a bitfield describing the logic state of the ten
95 * general purpose IO pins.
96 *
97 * ucb1x00_enable must have been called to enable the comms
98 * before using this function.
99 *
100 * This function does not take any mutexes or spinlocks.
101 */
102unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
103{
104 return ucb1x00_reg_read(ucb, UCB_IO_DATA);
105}
106
107static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
108{
109 struct ucb1x00 *ucb = gpiochip_get_data(chip);
110 unsigned long flags;
111
112 spin_lock_irqsave(&ucb->io_lock, flags);
113 if (value)
114 ucb->io_out |= 1 << offset;
115 else
116 ucb->io_out &= ~(1 << offset);
117
118 ucb1x00_enable(ucb);
119 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
120 ucb1x00_disable(ucb);
121 spin_unlock_irqrestore(&ucb->io_lock, flags);
122}
123
124static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
125{
126 struct ucb1x00 *ucb = gpiochip_get_data(chip);
127 unsigned val;
128
129 ucb1x00_enable(ucb);
130 val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
131 ucb1x00_disable(ucb);
132
133 return !!(val & (1 << offset));
134}
135
136static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
137{
138 struct ucb1x00 *ucb = gpiochip_get_data(chip);
139 unsigned long flags;
140
141 spin_lock_irqsave(&ucb->io_lock, flags);
142 ucb->io_dir &= ~(1 << offset);
143 ucb1x00_enable(ucb);
144 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
145 ucb1x00_disable(ucb);
146 spin_unlock_irqrestore(&ucb->io_lock, flags);
147
148 return 0;
149}
150
151static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
152 , int value)
153{
154 struct ucb1x00 *ucb = gpiochip_get_data(chip);
155 unsigned long flags;
156 unsigned old, mask = 1 << offset;
157
158 spin_lock_irqsave(&ucb->io_lock, flags);
159 old = ucb->io_out;
160 if (value)
161 ucb->io_out |= mask;
162 else
163 ucb->io_out &= ~mask;
164
165 ucb1x00_enable(ucb);
166 if (old != ucb->io_out)
167 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
168
169 if (!(ucb->io_dir & mask)) {
170 ucb->io_dir |= mask;
171 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
172 }
173 ucb1x00_disable(ucb);
174 spin_unlock_irqrestore(&ucb->io_lock, flags);
175
176 return 0;
177}
178
179static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
180{
181 struct ucb1x00 *ucb = gpiochip_get_data(chip);
182
183 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
184}
185
186/*
187 * UCB1300 data sheet says we must:
188 * 1. enable ADC => 5us (including reference startup time)
189 * 2. select input => 51*tsibclk => 4.3us
190 * 3. start conversion => 102*tsibclk => 8.5us
191 * (tsibclk = 1/11981000)
192 * Period between SIB 128-bit frames = 10.7us
193 */
194
195/**
196 * ucb1x00_adc_enable - enable the ADC converter
197 * @ucb: UCB1x00 structure describing chip
198 *
199 * Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
200 * Any code wishing to use the ADC converter must call this
201 * function prior to using it.
202 *
203 * This function takes the ADC mutex to prevent two or more
204 * concurrent uses, and therefore may sleep. As a result, it
205 * can only be called from process context, not interrupt
206 * context.
207 *
208 * You should release the ADC as soon as possible using
209 * ucb1x00_adc_disable.
210 */
211void ucb1x00_adc_enable(struct ucb1x00 *ucb)
212{
213 mutex_lock(&ucb->adc_mutex);
214
215 ucb->adc_cr |= UCB_ADC_ENA;
216
217 ucb1x00_enable(ucb);
218 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
219}
220
221/**
222 * ucb1x00_adc_read - read the specified ADC channel
223 * @ucb: UCB1x00 structure describing chip
224 * @adc_channel: ADC channel mask
225 * @sync: wait for syncronisation pulse.
226 *
227 * Start an ADC conversion and wait for the result. Note that
228 * synchronised ADC conversions (via the ADCSYNC pin) must wait
229 * until the trigger is asserted and the conversion is finished.
230 *
231 * This function currently spins waiting for the conversion to
232 * complete (2 frames max without sync).
233 *
234 * If called for a synchronised ADC conversion, it may sleep
235 * with the ADC mutex held.
236 */
237unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
238{
239 unsigned int val;
240
241 if (sync)
242 adc_channel |= UCB_ADC_SYNC_ENA;
243
244 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
245 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
246
247 for (;;) {
248 val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
249 if (val & UCB_ADC_DAT_VAL)
250 break;
251 /* yield to other processes */
252 set_current_state(TASK_INTERRUPTIBLE);
253 schedule_timeout(1);
254 }
255
256 return UCB_ADC_DAT(val);
257}
258
259/**
260 * ucb1x00_adc_disable - disable the ADC converter
261 * @ucb: UCB1x00 structure describing chip
262 *
263 * Disable the ADC converter and release the ADC mutex.
264 */
265void ucb1x00_adc_disable(struct ucb1x00 *ucb)
266{
267 ucb->adc_cr &= ~UCB_ADC_ENA;
268 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
269 ucb1x00_disable(ucb);
270
271 mutex_unlock(&ucb->adc_mutex);
272}
273
274/*
275 * UCB1x00 Interrupt handling.
276 *
277 * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
278 * Since we need to read an internal register, we must re-enable
279 * SIBCLK to talk to the chip. We leave the clock running until
280 * we have finished processing all interrupts from the chip.
281 */
282static void ucb1x00_irq(struct irq_desc *desc)
283{
284 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
285 unsigned int isr, i;
286
287 ucb1x00_enable(ucb);
288 isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
289 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
290 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
291
292 for (i = 0; i < 16 && isr; i++, isr >>= 1)
293 if (isr & 1)
294 generic_handle_irq(ucb->irq_base + i);
295 ucb1x00_disable(ucb);
296}
297
298static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
299{
300 ucb1x00_enable(ucb);
301 if (ucb->irq_ris_enbl & mask)
302 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
303 ucb->irq_mask);
304 if (ucb->irq_fal_enbl & mask)
305 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
306 ucb->irq_mask);
307 ucb1x00_disable(ucb);
308}
309
310static void ucb1x00_irq_noop(struct irq_data *data)
311{
312}
313
314static void ucb1x00_irq_mask(struct irq_data *data)
315{
316 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
317 unsigned mask = 1 << (data->irq - ucb->irq_base);
318
319 raw_spin_lock(&ucb->irq_lock);
320 ucb->irq_mask &= ~mask;
321 ucb1x00_irq_update(ucb, mask);
322 raw_spin_unlock(&ucb->irq_lock);
323}
324
325static void ucb1x00_irq_unmask(struct irq_data *data)
326{
327 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
328 unsigned mask = 1 << (data->irq - ucb->irq_base);
329
330 raw_spin_lock(&ucb->irq_lock);
331 ucb->irq_mask |= mask;
332 ucb1x00_irq_update(ucb, mask);
333 raw_spin_unlock(&ucb->irq_lock);
334}
335
336static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
337{
338 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
339 unsigned mask = 1 << (data->irq - ucb->irq_base);
340
341 raw_spin_lock(&ucb->irq_lock);
342 if (type & IRQ_TYPE_EDGE_RISING)
343 ucb->irq_ris_enbl |= mask;
344 else
345 ucb->irq_ris_enbl &= ~mask;
346
347 if (type & IRQ_TYPE_EDGE_FALLING)
348 ucb->irq_fal_enbl |= mask;
349 else
350 ucb->irq_fal_enbl &= ~mask;
351 if (ucb->irq_mask & mask) {
352 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
353 ucb->irq_mask);
354 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
355 ucb->irq_mask);
356 }
357 raw_spin_unlock(&ucb->irq_lock);
358
359 return 0;
360}
361
362static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
363{
364 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
365 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
366 unsigned mask = 1 << (data->irq - ucb->irq_base);
367
368 if (!pdata || !pdata->can_wakeup)
369 return -EINVAL;
370
371 raw_spin_lock(&ucb->irq_lock);
372 if (on)
373 ucb->irq_wake |= mask;
374 else
375 ucb->irq_wake &= ~mask;
376 raw_spin_unlock(&ucb->irq_lock);
377
378 return 0;
379}
380
381static struct irq_chip ucb1x00_irqchip = {
382 .name = "ucb1x00",
383 .irq_ack = ucb1x00_irq_noop,
384 .irq_mask = ucb1x00_irq_mask,
385 .irq_unmask = ucb1x00_irq_unmask,
386 .irq_set_type = ucb1x00_irq_set_type,
387 .irq_set_wake = ucb1x00_irq_set_wake,
388};
389
390static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
391{
392 struct ucb1x00_dev *dev;
393 int ret;
394
395 dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
396 if (!dev)
397 return -ENOMEM;
398
399 dev->ucb = ucb;
400 dev->drv = drv;
401
402 ret = drv->add(dev);
403 if (ret) {
404 kfree(dev);
405 return ret;
406 }
407
408 list_add_tail(&dev->dev_node, &ucb->devs);
409 list_add_tail(&dev->drv_node, &drv->devs);
410
411 return ret;
412}
413
414static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
415{
416 dev->drv->remove(dev);
417 list_del(&dev->dev_node);
418 list_del(&dev->drv_node);
419 kfree(dev);
420}
421
422/*
423 * Try to probe our interrupt, rather than relying on lots of
424 * hard-coded machine dependencies. For reference, the expected
425 * IRQ mappings are:
426 *
427 * Machine Default IRQ
428 * adsbitsy IRQ_GPCIN4
429 * cerf IRQ_GPIO_UCB1200_IRQ
430 * flexanet IRQ_GPIO_GUI
431 * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ
432 * graphicsclient ADS_EXT_IRQ(8)
433 * graphicsmaster ADS_EXT_IRQ(8)
434 * lart LART_IRQ_UCB1200
435 * omnimeter IRQ_GPIO23
436 * pfs168 IRQ_GPIO_UCB1300_IRQ
437 * simpad IRQ_GPIO_UCB1300_IRQ
438 * shannon SHANNON_IRQ_GPIO_IRQ_CODEC
439 * yopy IRQ_GPIO_UCB1200_IRQ
440 */
441static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
442{
443 unsigned long mask;
444
445 mask = probe_irq_on();
446
447 /*
448 * Enable the ADC interrupt.
449 */
450 ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
451 ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
452 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
453 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
454
455 /*
456 * Cause an ADC interrupt.
457 */
458 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
459 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
460
461 /*
462 * Wait for the conversion to complete.
463 */
464 while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
465 ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
466
467 /*
468 * Disable and clear interrupt.
469 */
470 ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
471 ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
472 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
473 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
474
475 /*
476 * Read triggered interrupt.
477 */
478 return probe_irq_off(mask);
479}
480
481static void ucb1x00_release(struct device *dev)
482{
483 struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
484 kfree(ucb);
485}
486
487static struct class ucb1x00_class = {
488 .name = "ucb1x00",
489 .dev_release = ucb1x00_release,
490};
491
492static int ucb1x00_probe(struct mcp *mcp)
493{
494 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
495 struct ucb1x00_driver *drv;
496 struct ucb1x00 *ucb;
497 unsigned id, i, irq_base;
498 int ret = -ENODEV;
499
500 /* Tell the platform to deassert the UCB1x00 reset */
501 if (pdata && pdata->reset)
502 pdata->reset(UCB_RST_PROBE);
503
504 mcp_enable(mcp);
505 id = mcp_reg_read(mcp, UCB_ID);
506 mcp_disable(mcp);
507
508 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
509 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
510 goto out;
511 }
512
513 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
514 ret = -ENOMEM;
515 if (!ucb)
516 goto out;
517
518 device_initialize(&ucb->dev);
519 ucb->dev.class = &ucb1x00_class;
520 ucb->dev.parent = &mcp->attached_device;
521 dev_set_name(&ucb->dev, "ucb1x00");
522
523 raw_spin_lock_init(&ucb->irq_lock);
524 spin_lock_init(&ucb->io_lock);
525 mutex_init(&ucb->adc_mutex);
526
527 ucb->id = id;
528 ucb->mcp = mcp;
529
530 ret = device_add(&ucb->dev);
531 if (ret)
532 goto err_dev_add;
533
534 ucb1x00_enable(ucb);
535 ucb->irq = ucb1x00_detect_irq(ucb);
536 ucb1x00_disable(ucb);
537 if (!ucb->irq) {
538 dev_err(&ucb->dev, "IRQ probe failed\n");
539 ret = -ENODEV;
540 goto err_no_irq;
541 }
542
543 ucb->gpio.base = -1;
544 irq_base = pdata ? pdata->irq_base : 0;
545 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
546 if (ucb->irq_base < 0) {
547 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
548 ucb->irq_base);
549 ret = ucb->irq_base;
550 goto err_irq_alloc;
551 }
552
553 for (i = 0; i < 16; i++) {
554 unsigned irq = ucb->irq_base + i;
555
556 irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
557 irq_set_chip_data(irq, ucb);
558 irq_clear_status_flags(irq, IRQ_NOREQUEST);
559 }
560
561 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
562 irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb);
563
564 if (pdata && pdata->gpio_base) {
565 ucb->gpio.label = dev_name(&ucb->dev);
566 ucb->gpio.parent = &ucb->dev;
567 ucb->gpio.owner = THIS_MODULE;
568 ucb->gpio.base = pdata->gpio_base;
569 ucb->gpio.ngpio = 10;
570 ucb->gpio.set = ucb1x00_gpio_set;
571 ucb->gpio.get = ucb1x00_gpio_get;
572 ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
573 ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
574 ucb->gpio.to_irq = ucb1x00_to_irq;
575 ret = gpiochip_add_data(&ucb->gpio, ucb);
576 if (ret)
577 goto err_gpio_add;
578 } else
579 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
580
581 mcp_set_drvdata(mcp, ucb);
582
583 if (pdata)
584 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
585
586 INIT_LIST_HEAD(&ucb->devs);
587 mutex_lock(&ucb1x00_mutex);
588 list_add_tail(&ucb->node, &ucb1x00_devices);
589 list_for_each_entry(drv, &ucb1x00_drivers, node) {
590 ucb1x00_add_dev(ucb, drv);
591 }
592 mutex_unlock(&ucb1x00_mutex);
593
594 return ret;
595
596 err_gpio_add:
597 irq_set_chained_handler(ucb->irq, NULL);
598 err_irq_alloc:
599 if (ucb->irq_base > 0)
600 irq_free_descs(ucb->irq_base, 16);
601 err_no_irq:
602 device_del(&ucb->dev);
603 err_dev_add:
604 put_device(&ucb->dev);
605 out:
606 if (pdata && pdata->reset)
607 pdata->reset(UCB_RST_PROBE_FAIL);
608 return ret;
609}
610
611static void ucb1x00_remove(struct mcp *mcp)
612{
613 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
614 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
615 struct list_head *l, *n;
616
617 mutex_lock(&ucb1x00_mutex);
618 list_del(&ucb->node);
619 list_for_each_safe(l, n, &ucb->devs) {
620 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
621 ucb1x00_remove_dev(dev);
622 }
623 mutex_unlock(&ucb1x00_mutex);
624
625 if (ucb->gpio.base != -1)
626 gpiochip_remove(&ucb->gpio);
627
628 irq_set_chained_handler(ucb->irq, NULL);
629 irq_free_descs(ucb->irq_base, 16);
630 device_unregister(&ucb->dev);
631
632 if (pdata && pdata->reset)
633 pdata->reset(UCB_RST_REMOVE);
634}
635
636int ucb1x00_register_driver(struct ucb1x00_driver *drv)
637{
638 struct ucb1x00 *ucb;
639
640 INIT_LIST_HEAD(&drv->devs);
641 mutex_lock(&ucb1x00_mutex);
642 list_add_tail(&drv->node, &ucb1x00_drivers);
643 list_for_each_entry(ucb, &ucb1x00_devices, node) {
644 ucb1x00_add_dev(ucb, drv);
645 }
646 mutex_unlock(&ucb1x00_mutex);
647 return 0;
648}
649
650void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
651{
652 struct list_head *n, *l;
653
654 mutex_lock(&ucb1x00_mutex);
655 list_del(&drv->node);
656 list_for_each_safe(l, n, &drv->devs) {
657 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
658 ucb1x00_remove_dev(dev);
659 }
660 mutex_unlock(&ucb1x00_mutex);
661}
662
663static int ucb1x00_suspend(struct device *dev)
664{
665 struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
666 struct ucb1x00 *ucb = dev_get_drvdata(dev);
667 struct ucb1x00_dev *udev;
668
669 mutex_lock(&ucb1x00_mutex);
670 list_for_each_entry(udev, &ucb->devs, dev_node) {
671 if (udev->drv->suspend)
672 udev->drv->suspend(udev);
673 }
674 mutex_unlock(&ucb1x00_mutex);
675
676 if (ucb->irq_wake) {
677 unsigned long flags;
678
679 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
680 ucb1x00_enable(ucb);
681 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
682 ucb->irq_wake);
683 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
684 ucb->irq_wake);
685 ucb1x00_disable(ucb);
686 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
687
688 enable_irq_wake(ucb->irq);
689 } else if (pdata && pdata->reset)
690 pdata->reset(UCB_RST_SUSPEND);
691
692 return 0;
693}
694
695static int ucb1x00_resume(struct device *dev)
696{
697 struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
698 struct ucb1x00 *ucb = dev_get_drvdata(dev);
699 struct ucb1x00_dev *udev;
700
701 if (!ucb->irq_wake && pdata && pdata->reset)
702 pdata->reset(UCB_RST_RESUME);
703
704 ucb1x00_enable(ucb);
705 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
706 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
707
708 if (ucb->irq_wake) {
709 unsigned long flags;
710
711 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
712 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
713 ucb->irq_mask);
714 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
715 ucb->irq_mask);
716 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
717
718 disable_irq_wake(ucb->irq);
719 }
720 ucb1x00_disable(ucb);
721
722 mutex_lock(&ucb1x00_mutex);
723 list_for_each_entry(udev, &ucb->devs, dev_node) {
724 if (udev->drv->resume)
725 udev->drv->resume(udev);
726 }
727 mutex_unlock(&ucb1x00_mutex);
728 return 0;
729}
730
731static DEFINE_SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops,
732 ucb1x00_suspend, ucb1x00_resume);
733
734static struct mcp_driver ucb1x00_driver = {
735 .drv = {
736 .name = "ucb1x00",
737 .owner = THIS_MODULE,
738 .pm = pm_sleep_ptr(&ucb1x00_pm_ops),
739 },
740 .probe = ucb1x00_probe,
741 .remove = ucb1x00_remove,
742};
743
744static int __init ucb1x00_init(void)
745{
746 int ret = class_register(&ucb1x00_class);
747 if (ret == 0) {
748 ret = mcp_driver_register(&ucb1x00_driver);
749 if (ret)
750 class_unregister(&ucb1x00_class);
751 }
752 return ret;
753}
754
755static void __exit ucb1x00_exit(void)
756{
757 mcp_driver_unregister(&ucb1x00_driver);
758 class_unregister(&ucb1x00_class);
759}
760
761module_init(ucb1x00_init);
762module_exit(ucb1x00_exit);
763
764EXPORT_SYMBOL(ucb1x00_io_set_dir);
765EXPORT_SYMBOL(ucb1x00_io_write);
766EXPORT_SYMBOL(ucb1x00_io_read);
767
768EXPORT_SYMBOL(ucb1x00_adc_enable);
769EXPORT_SYMBOL(ucb1x00_adc_read);
770EXPORT_SYMBOL(ucb1x00_adc_disable);
771
772EXPORT_SYMBOL(ucb1x00_register_driver);
773EXPORT_SYMBOL(ucb1x00_unregister_driver);
774
775MODULE_ALIAS("mcp:ucb1x00");
776MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
777MODULE_DESCRIPTION("UCB1x00 core driver");
778MODULE_LICENSE("GPL");
1/*
2 * linux/drivers/mfd/ucb1x00-core.c
3 *
4 * Copyright (C) 2001 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 *
10 * The UCB1x00 core driver provides basic services for handling IO,
11 * the ADC, interrupts, and accessing registers. It is designed
12 * such that everything goes through this layer, thereby providing
13 * a consistent locking methodology, as well as allowing the drivers
14 * to be used on other non-MCP-enabled hardware platforms.
15 *
16 * Note that all locks are private to this file. Nothing else may
17 * touch them.
18 */
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/device.h>
28#include <linux/mutex.h>
29#include <linux/mfd/ucb1x00.h>
30#include <linux/pm.h>
31#include <linux/gpio.h>
32
33static DEFINE_MUTEX(ucb1x00_mutex);
34static LIST_HEAD(ucb1x00_drivers);
35static LIST_HEAD(ucb1x00_devices);
36
37/**
38 * ucb1x00_io_set_dir - set IO direction
39 * @ucb: UCB1x00 structure describing chip
40 * @in: bitfield of IO pins to be set as inputs
41 * @out: bitfield of IO pins to be set as outputs
42 *
43 * Set the IO direction of the ten general purpose IO pins on
44 * the UCB1x00 chip. The @in bitfield has priority over the
45 * @out bitfield, in that if you specify a pin as both input
46 * and output, it will end up as an input.
47 *
48 * ucb1x00_enable must have been called to enable the comms
49 * before using this function.
50 *
51 * This function takes a spinlock, disabling interrupts.
52 */
53void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
54{
55 unsigned long flags;
56
57 spin_lock_irqsave(&ucb->io_lock, flags);
58 ucb->io_dir |= out;
59 ucb->io_dir &= ~in;
60
61 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
62 spin_unlock_irqrestore(&ucb->io_lock, flags);
63}
64
65/**
66 * ucb1x00_io_write - set or clear IO outputs
67 * @ucb: UCB1x00 structure describing chip
68 * @set: bitfield of IO pins to set to logic '1'
69 * @clear: bitfield of IO pins to set to logic '0'
70 *
71 * Set the IO output state of the specified IO pins. The value
72 * is retained if the pins are subsequently configured as inputs.
73 * The @clear bitfield has priority over the @set bitfield -
74 * outputs will be cleared.
75 *
76 * ucb1x00_enable must have been called to enable the comms
77 * before using this function.
78 *
79 * This function takes a spinlock, disabling interrupts.
80 */
81void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
82{
83 unsigned long flags;
84
85 spin_lock_irqsave(&ucb->io_lock, flags);
86 ucb->io_out |= set;
87 ucb->io_out &= ~clear;
88
89 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
90 spin_unlock_irqrestore(&ucb->io_lock, flags);
91}
92
93/**
94 * ucb1x00_io_read - read the current state of the IO pins
95 * @ucb: UCB1x00 structure describing chip
96 *
97 * Return a bitfield describing the logic state of the ten
98 * general purpose IO pins.
99 *
100 * ucb1x00_enable must have been called to enable the comms
101 * before using this function.
102 *
103 * This function does not take any mutexes or spinlocks.
104 */
105unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
106{
107 return ucb1x00_reg_read(ucb, UCB_IO_DATA);
108}
109
110static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
111{
112 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
113 unsigned long flags;
114
115 spin_lock_irqsave(&ucb->io_lock, flags);
116 if (value)
117 ucb->io_out |= 1 << offset;
118 else
119 ucb->io_out &= ~(1 << offset);
120
121 ucb1x00_enable(ucb);
122 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
123 ucb1x00_disable(ucb);
124 spin_unlock_irqrestore(&ucb->io_lock, flags);
125}
126
127static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
128{
129 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
130 unsigned val;
131
132 ucb1x00_enable(ucb);
133 val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
134 ucb1x00_disable(ucb);
135
136 return !!(val & (1 << offset));
137}
138
139static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
140{
141 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
142 unsigned long flags;
143
144 spin_lock_irqsave(&ucb->io_lock, flags);
145 ucb->io_dir &= ~(1 << offset);
146 ucb1x00_enable(ucb);
147 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
148 ucb1x00_disable(ucb);
149 spin_unlock_irqrestore(&ucb->io_lock, flags);
150
151 return 0;
152}
153
154static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
155 , int value)
156{
157 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
158 unsigned long flags;
159 unsigned old, mask = 1 << offset;
160
161 spin_lock_irqsave(&ucb->io_lock, flags);
162 old = ucb->io_out;
163 if (value)
164 ucb->io_out |= mask;
165 else
166 ucb->io_out &= ~mask;
167
168 ucb1x00_enable(ucb);
169 if (old != ucb->io_out)
170 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
171
172 if (!(ucb->io_dir & mask)) {
173 ucb->io_dir |= mask;
174 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
175 }
176 ucb1x00_disable(ucb);
177 spin_unlock_irqrestore(&ucb->io_lock, flags);
178
179 return 0;
180}
181
182static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
183{
184 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
185
186 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
187}
188
189/*
190 * UCB1300 data sheet says we must:
191 * 1. enable ADC => 5us (including reference startup time)
192 * 2. select input => 51*tsibclk => 4.3us
193 * 3. start conversion => 102*tsibclk => 8.5us
194 * (tsibclk = 1/11981000)
195 * Period between SIB 128-bit frames = 10.7us
196 */
197
198/**
199 * ucb1x00_adc_enable - enable the ADC converter
200 * @ucb: UCB1x00 structure describing chip
201 *
202 * Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
203 * Any code wishing to use the ADC converter must call this
204 * function prior to using it.
205 *
206 * This function takes the ADC mutex to prevent two or more
207 * concurrent uses, and therefore may sleep. As a result, it
208 * can only be called from process context, not interrupt
209 * context.
210 *
211 * You should release the ADC as soon as possible using
212 * ucb1x00_adc_disable.
213 */
214void ucb1x00_adc_enable(struct ucb1x00 *ucb)
215{
216 mutex_lock(&ucb->adc_mutex);
217
218 ucb->adc_cr |= UCB_ADC_ENA;
219
220 ucb1x00_enable(ucb);
221 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
222}
223
224/**
225 * ucb1x00_adc_read - read the specified ADC channel
226 * @ucb: UCB1x00 structure describing chip
227 * @adc_channel: ADC channel mask
228 * @sync: wait for syncronisation pulse.
229 *
230 * Start an ADC conversion and wait for the result. Note that
231 * synchronised ADC conversions (via the ADCSYNC pin) must wait
232 * until the trigger is asserted and the conversion is finished.
233 *
234 * This function currently spins waiting for the conversion to
235 * complete (2 frames max without sync).
236 *
237 * If called for a synchronised ADC conversion, it may sleep
238 * with the ADC mutex held.
239 */
240unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
241{
242 unsigned int val;
243
244 if (sync)
245 adc_channel |= UCB_ADC_SYNC_ENA;
246
247 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
248 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
249
250 for (;;) {
251 val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
252 if (val & UCB_ADC_DAT_VAL)
253 break;
254 /* yield to other processes */
255 set_current_state(TASK_INTERRUPTIBLE);
256 schedule_timeout(1);
257 }
258
259 return UCB_ADC_DAT(val);
260}
261
262/**
263 * ucb1x00_adc_disable - disable the ADC converter
264 * @ucb: UCB1x00 structure describing chip
265 *
266 * Disable the ADC converter and release the ADC mutex.
267 */
268void ucb1x00_adc_disable(struct ucb1x00 *ucb)
269{
270 ucb->adc_cr &= ~UCB_ADC_ENA;
271 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
272 ucb1x00_disable(ucb);
273
274 mutex_unlock(&ucb->adc_mutex);
275}
276
277/*
278 * UCB1x00 Interrupt handling.
279 *
280 * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
281 * Since we need to read an internal register, we must re-enable
282 * SIBCLK to talk to the chip. We leave the clock running until
283 * we have finished processing all interrupts from the chip.
284 */
285static void ucb1x00_irq(struct irq_desc *desc)
286{
287 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
288 unsigned int isr, i;
289
290 ucb1x00_enable(ucb);
291 isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
292 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
293 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
294
295 for (i = 0; i < 16 && isr; i++, isr >>= 1)
296 if (isr & 1)
297 generic_handle_irq(ucb->irq_base + i);
298 ucb1x00_disable(ucb);
299}
300
301static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
302{
303 ucb1x00_enable(ucb);
304 if (ucb->irq_ris_enbl & mask)
305 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
306 ucb->irq_mask);
307 if (ucb->irq_fal_enbl & mask)
308 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
309 ucb->irq_mask);
310 ucb1x00_disable(ucb);
311}
312
313static void ucb1x00_irq_noop(struct irq_data *data)
314{
315}
316
317static void ucb1x00_irq_mask(struct irq_data *data)
318{
319 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
320 unsigned mask = 1 << (data->irq - ucb->irq_base);
321
322 raw_spin_lock(&ucb->irq_lock);
323 ucb->irq_mask &= ~mask;
324 ucb1x00_irq_update(ucb, mask);
325 raw_spin_unlock(&ucb->irq_lock);
326}
327
328static void ucb1x00_irq_unmask(struct irq_data *data)
329{
330 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
331 unsigned mask = 1 << (data->irq - ucb->irq_base);
332
333 raw_spin_lock(&ucb->irq_lock);
334 ucb->irq_mask |= mask;
335 ucb1x00_irq_update(ucb, mask);
336 raw_spin_unlock(&ucb->irq_lock);
337}
338
339static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
340{
341 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
342 unsigned mask = 1 << (data->irq - ucb->irq_base);
343
344 raw_spin_lock(&ucb->irq_lock);
345 if (type & IRQ_TYPE_EDGE_RISING)
346 ucb->irq_ris_enbl |= mask;
347 else
348 ucb->irq_ris_enbl &= ~mask;
349
350 if (type & IRQ_TYPE_EDGE_FALLING)
351 ucb->irq_fal_enbl |= mask;
352 else
353 ucb->irq_fal_enbl &= ~mask;
354 if (ucb->irq_mask & mask) {
355 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
356 ucb->irq_mask);
357 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
358 ucb->irq_mask);
359 }
360 raw_spin_unlock(&ucb->irq_lock);
361
362 return 0;
363}
364
365static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
366{
367 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
368 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
369 unsigned mask = 1 << (data->irq - ucb->irq_base);
370
371 if (!pdata || !pdata->can_wakeup)
372 return -EINVAL;
373
374 raw_spin_lock(&ucb->irq_lock);
375 if (on)
376 ucb->irq_wake |= mask;
377 else
378 ucb->irq_wake &= ~mask;
379 raw_spin_unlock(&ucb->irq_lock);
380
381 return 0;
382}
383
384static struct irq_chip ucb1x00_irqchip = {
385 .name = "ucb1x00",
386 .irq_ack = ucb1x00_irq_noop,
387 .irq_mask = ucb1x00_irq_mask,
388 .irq_unmask = ucb1x00_irq_unmask,
389 .irq_set_type = ucb1x00_irq_set_type,
390 .irq_set_wake = ucb1x00_irq_set_wake,
391};
392
393static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
394{
395 struct ucb1x00_dev *dev;
396 int ret;
397
398 dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
399 if (!dev)
400 return -ENOMEM;
401
402 dev->ucb = ucb;
403 dev->drv = drv;
404
405 ret = drv->add(dev);
406 if (ret) {
407 kfree(dev);
408 return ret;
409 }
410
411 list_add_tail(&dev->dev_node, &ucb->devs);
412 list_add_tail(&dev->drv_node, &drv->devs);
413
414 return ret;
415}
416
417static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
418{
419 dev->drv->remove(dev);
420 list_del(&dev->dev_node);
421 list_del(&dev->drv_node);
422 kfree(dev);
423}
424
425/*
426 * Try to probe our interrupt, rather than relying on lots of
427 * hard-coded machine dependencies. For reference, the expected
428 * IRQ mappings are:
429 *
430 * Machine Default IRQ
431 * adsbitsy IRQ_GPCIN4
432 * cerf IRQ_GPIO_UCB1200_IRQ
433 * flexanet IRQ_GPIO_GUI
434 * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ
435 * graphicsclient ADS_EXT_IRQ(8)
436 * graphicsmaster ADS_EXT_IRQ(8)
437 * lart LART_IRQ_UCB1200
438 * omnimeter IRQ_GPIO23
439 * pfs168 IRQ_GPIO_UCB1300_IRQ
440 * simpad IRQ_GPIO_UCB1300_IRQ
441 * shannon SHANNON_IRQ_GPIO_IRQ_CODEC
442 * yopy IRQ_GPIO_UCB1200_IRQ
443 */
444static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
445{
446 unsigned long mask;
447
448 mask = probe_irq_on();
449 if (!mask) {
450 probe_irq_off(mask);
451 return NO_IRQ;
452 }
453
454 /*
455 * Enable the ADC interrupt.
456 */
457 ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
458 ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
459 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
460 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
461
462 /*
463 * Cause an ADC interrupt.
464 */
465 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
466 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
467
468 /*
469 * Wait for the conversion to complete.
470 */
471 while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
472 ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
473
474 /*
475 * Disable and clear interrupt.
476 */
477 ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
478 ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
479 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
480 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
481
482 /*
483 * Read triggered interrupt.
484 */
485 return probe_irq_off(mask);
486}
487
488static void ucb1x00_release(struct device *dev)
489{
490 struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
491 kfree(ucb);
492}
493
494static struct class ucb1x00_class = {
495 .name = "ucb1x00",
496 .dev_release = ucb1x00_release,
497};
498
499static int ucb1x00_probe(struct mcp *mcp)
500{
501 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
502 struct ucb1x00_driver *drv;
503 struct ucb1x00 *ucb;
504 unsigned id, i, irq_base;
505 int ret = -ENODEV;
506
507 /* Tell the platform to deassert the UCB1x00 reset */
508 if (pdata && pdata->reset)
509 pdata->reset(UCB_RST_PROBE);
510
511 mcp_enable(mcp);
512 id = mcp_reg_read(mcp, UCB_ID);
513 mcp_disable(mcp);
514
515 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
516 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
517 goto out;
518 }
519
520 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
521 ret = -ENOMEM;
522 if (!ucb)
523 goto out;
524
525 device_initialize(&ucb->dev);
526 ucb->dev.class = &ucb1x00_class;
527 ucb->dev.parent = &mcp->attached_device;
528 dev_set_name(&ucb->dev, "ucb1x00");
529
530 raw_spin_lock_init(&ucb->irq_lock);
531 spin_lock_init(&ucb->io_lock);
532 mutex_init(&ucb->adc_mutex);
533
534 ucb->id = id;
535 ucb->mcp = mcp;
536
537 ret = device_add(&ucb->dev);
538 if (ret)
539 goto err_dev_add;
540
541 ucb1x00_enable(ucb);
542 ucb->irq = ucb1x00_detect_irq(ucb);
543 ucb1x00_disable(ucb);
544 if (ucb->irq == NO_IRQ) {
545 dev_err(&ucb->dev, "IRQ probe failed\n");
546 ret = -ENODEV;
547 goto err_no_irq;
548 }
549
550 ucb->gpio.base = -1;
551 irq_base = pdata ? pdata->irq_base : 0;
552 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
553 if (ucb->irq_base < 0) {
554 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
555 ucb->irq_base);
556 ret = ucb->irq_base;
557 goto err_irq_alloc;
558 }
559
560 for (i = 0; i < 16; i++) {
561 unsigned irq = ucb->irq_base + i;
562
563 irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
564 irq_set_chip_data(irq, ucb);
565 irq_clear_status_flags(irq, IRQ_NOREQUEST);
566 }
567
568 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
569 irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb);
570
571 if (pdata && pdata->gpio_base) {
572 ucb->gpio.label = dev_name(&ucb->dev);
573 ucb->gpio.parent = &ucb->dev;
574 ucb->gpio.owner = THIS_MODULE;
575 ucb->gpio.base = pdata->gpio_base;
576 ucb->gpio.ngpio = 10;
577 ucb->gpio.set = ucb1x00_gpio_set;
578 ucb->gpio.get = ucb1x00_gpio_get;
579 ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
580 ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
581 ucb->gpio.to_irq = ucb1x00_to_irq;
582 ret = gpiochip_add(&ucb->gpio);
583 if (ret)
584 goto err_gpio_add;
585 } else
586 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
587
588 mcp_set_drvdata(mcp, ucb);
589
590 if (pdata)
591 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
592
593 INIT_LIST_HEAD(&ucb->devs);
594 mutex_lock(&ucb1x00_mutex);
595 list_add_tail(&ucb->node, &ucb1x00_devices);
596 list_for_each_entry(drv, &ucb1x00_drivers, node) {
597 ucb1x00_add_dev(ucb, drv);
598 }
599 mutex_unlock(&ucb1x00_mutex);
600
601 return ret;
602
603 err_gpio_add:
604 irq_set_chained_handler(ucb->irq, NULL);
605 err_irq_alloc:
606 if (ucb->irq_base > 0)
607 irq_free_descs(ucb->irq_base, 16);
608 err_no_irq:
609 device_del(&ucb->dev);
610 err_dev_add:
611 put_device(&ucb->dev);
612 out:
613 if (pdata && pdata->reset)
614 pdata->reset(UCB_RST_PROBE_FAIL);
615 return ret;
616}
617
618static void ucb1x00_remove(struct mcp *mcp)
619{
620 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
621 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
622 struct list_head *l, *n;
623
624 mutex_lock(&ucb1x00_mutex);
625 list_del(&ucb->node);
626 list_for_each_safe(l, n, &ucb->devs) {
627 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
628 ucb1x00_remove_dev(dev);
629 }
630 mutex_unlock(&ucb1x00_mutex);
631
632 if (ucb->gpio.base != -1)
633 gpiochip_remove(&ucb->gpio);
634
635 irq_set_chained_handler(ucb->irq, NULL);
636 irq_free_descs(ucb->irq_base, 16);
637 device_unregister(&ucb->dev);
638
639 if (pdata && pdata->reset)
640 pdata->reset(UCB_RST_REMOVE);
641}
642
643int ucb1x00_register_driver(struct ucb1x00_driver *drv)
644{
645 struct ucb1x00 *ucb;
646
647 INIT_LIST_HEAD(&drv->devs);
648 mutex_lock(&ucb1x00_mutex);
649 list_add_tail(&drv->node, &ucb1x00_drivers);
650 list_for_each_entry(ucb, &ucb1x00_devices, node) {
651 ucb1x00_add_dev(ucb, drv);
652 }
653 mutex_unlock(&ucb1x00_mutex);
654 return 0;
655}
656
657void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
658{
659 struct list_head *n, *l;
660
661 mutex_lock(&ucb1x00_mutex);
662 list_del(&drv->node);
663 list_for_each_safe(l, n, &drv->devs) {
664 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
665 ucb1x00_remove_dev(dev);
666 }
667 mutex_unlock(&ucb1x00_mutex);
668}
669
670#ifdef CONFIG_PM_SLEEP
671static int ucb1x00_suspend(struct device *dev)
672{
673 struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
674 struct ucb1x00 *ucb = dev_get_drvdata(dev);
675 struct ucb1x00_dev *udev;
676
677 mutex_lock(&ucb1x00_mutex);
678 list_for_each_entry(udev, &ucb->devs, dev_node) {
679 if (udev->drv->suspend)
680 udev->drv->suspend(udev);
681 }
682 mutex_unlock(&ucb1x00_mutex);
683
684 if (ucb->irq_wake) {
685 unsigned long flags;
686
687 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
688 ucb1x00_enable(ucb);
689 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
690 ucb->irq_wake);
691 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
692 ucb->irq_wake);
693 ucb1x00_disable(ucb);
694 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
695
696 enable_irq_wake(ucb->irq);
697 } else if (pdata && pdata->reset)
698 pdata->reset(UCB_RST_SUSPEND);
699
700 return 0;
701}
702
703static int ucb1x00_resume(struct device *dev)
704{
705 struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
706 struct ucb1x00 *ucb = dev_get_drvdata(dev);
707 struct ucb1x00_dev *udev;
708
709 if (!ucb->irq_wake && pdata && pdata->reset)
710 pdata->reset(UCB_RST_RESUME);
711
712 ucb1x00_enable(ucb);
713 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
714 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
715
716 if (ucb->irq_wake) {
717 unsigned long flags;
718
719 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
720 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
721 ucb->irq_mask);
722 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
723 ucb->irq_mask);
724 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
725
726 disable_irq_wake(ucb->irq);
727 }
728 ucb1x00_disable(ucb);
729
730 mutex_lock(&ucb1x00_mutex);
731 list_for_each_entry(udev, &ucb->devs, dev_node) {
732 if (udev->drv->resume)
733 udev->drv->resume(udev);
734 }
735 mutex_unlock(&ucb1x00_mutex);
736 return 0;
737}
738#endif
739
740static SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops, ucb1x00_suspend, ucb1x00_resume);
741
742static struct mcp_driver ucb1x00_driver = {
743 .drv = {
744 .name = "ucb1x00",
745 .owner = THIS_MODULE,
746 .pm = &ucb1x00_pm_ops,
747 },
748 .probe = ucb1x00_probe,
749 .remove = ucb1x00_remove,
750};
751
752static int __init ucb1x00_init(void)
753{
754 int ret = class_register(&ucb1x00_class);
755 if (ret == 0) {
756 ret = mcp_driver_register(&ucb1x00_driver);
757 if (ret)
758 class_unregister(&ucb1x00_class);
759 }
760 return ret;
761}
762
763static void __exit ucb1x00_exit(void)
764{
765 mcp_driver_unregister(&ucb1x00_driver);
766 class_unregister(&ucb1x00_class);
767}
768
769module_init(ucb1x00_init);
770module_exit(ucb1x00_exit);
771
772EXPORT_SYMBOL(ucb1x00_io_set_dir);
773EXPORT_SYMBOL(ucb1x00_io_write);
774EXPORT_SYMBOL(ucb1x00_io_read);
775
776EXPORT_SYMBOL(ucb1x00_adc_enable);
777EXPORT_SYMBOL(ucb1x00_adc_read);
778EXPORT_SYMBOL(ucb1x00_adc_disable);
779
780EXPORT_SYMBOL(ucb1x00_register_driver);
781EXPORT_SYMBOL(ucb1x00_unregister_driver);
782
783MODULE_ALIAS("mcp:ucb1x00");
784MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
785MODULE_DESCRIPTION("UCB1x00 core driver");
786MODULE_LICENSE("GPL");