Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2007,2012 Texas Instruments, Inc.
 
 
 
 
 
 
 
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/platform_device.h>
  8#include <linux/interrupt.h>
  9#include <linux/slab.h>
 10#include <linux/err.h>
 
 11#include <linux/io.h>
 12#include <linux/sched.h>
 13#include <linux/pm_runtime.h>
 14#include <linux/of.h>
 15
 16#include <linux/w1.h>
 
 
 
 
 17
 18#define	MOD_NAME	"OMAP_HDQ:"
 19
 20#define OMAP_HDQ_REVISION			0x00
 21#define OMAP_HDQ_TX_DATA			0x04
 22#define OMAP_HDQ_RX_DATA			0x08
 23#define OMAP_HDQ_CTRL_STATUS			0x0c
 24#define OMAP_HDQ_CTRL_STATUS_SINGLE		BIT(7)
 25#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK	BIT(6)
 26#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE	BIT(5)
 27#define OMAP_HDQ_CTRL_STATUS_GO                 BIT(4)
 28#define OMAP_HDQ_CTRL_STATUS_PRESENCE		BIT(3)
 29#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION	BIT(2)
 30#define OMAP_HDQ_CTRL_STATUS_DIR		BIT(1)
 31#define OMAP_HDQ_INT_STATUS			0x10
 32#define OMAP_HDQ_INT_STATUS_TXCOMPLETE		BIT(2)
 33#define OMAP_HDQ_INT_STATUS_RXCOMPLETE		BIT(1)
 34#define OMAP_HDQ_INT_STATUS_TIMEOUT		BIT(0)
 
 
 
 
 
 35
 36#define OMAP_HDQ_FLAG_CLEAR			0
 37#define OMAP_HDQ_FLAG_SET			1
 38#define OMAP_HDQ_TIMEOUT			(HZ/5)
 39
 40#define OMAP_HDQ_MAX_USER			4
 41
 42static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
 43
 44static int w1_id;
 45module_param(w1_id, int, 0400);
 46MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
 47
 48struct hdq_data {
 49	struct device		*dev;
 50	void __iomem		*hdq_base;
 51	/* lock read/write/break operations */
 52	struct  mutex		hdq_mutex;
 53	/* interrupt status and a lock for it */
 
 
 54	u8			hdq_irqstatus;
 
 55	spinlock_t		hdq_spinlock;
 56	/* mode: 0-HDQ 1-W1 */
 57	int                     mode;
 
 
 
 
 
 
 
 
 58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59};
 60
 61/* HDQ register I/O routines */
 62static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
 63{
 64	return __raw_readl(hdq_data->hdq_base + offset);
 65}
 66
 67static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
 68{
 69	__raw_writel(val, hdq_data->hdq_base + offset);
 70}
 71
 72static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
 73			u8 val, u8 mask)
 74{
 75	u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
 76			| (val & mask);
 77	__raw_writel(new_val, hdq_data->hdq_base + offset);
 78
 79	return new_val;
 80}
 81
 82/*
 83 * Wait for one or more bits in flag change.
 84 * HDQ_FLAG_SET: wait until any bit in the flag is set.
 85 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
 86 * return 0 on success and -ETIMEDOUT in the case of timeout.
 87 */
 88static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
 89		u8 flag, u8 flag_set, u8 *status)
 90{
 91	int ret = 0;
 92	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
 93
 94	if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
 95		/* wait for the flag clear */
 96		while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
 97			&& time_before(jiffies, timeout)) {
 98			schedule_timeout_uninterruptible(1);
 99		}
100		if (*status & flag)
101			ret = -ETIMEDOUT;
102	} else if (flag_set == OMAP_HDQ_FLAG_SET) {
103		/* wait for the flag set */
104		while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
105			&& time_before(jiffies, timeout)) {
106			schedule_timeout_uninterruptible(1);
107		}
108		if (!(*status & flag))
109			ret = -ETIMEDOUT;
110	} else
111		return -EINVAL;
112
113	return ret;
114}
115
116/* Clear saved irqstatus after using an interrupt */
117static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
118{
119	unsigned long irqflags;
120	u8 status;
121
122	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
123	status = hdq_data->hdq_irqstatus;
124	/* this is a read-modify-write */
125	hdq_data->hdq_irqstatus &= ~bits;
126	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
127
128	return status;
129}
130
131/* write out a byte and fill *status with HDQ_INT_STATUS */
132static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
133{
134	int ret;
135	u8 tmp_status;
136
137	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
138	if (ret < 0) {
139		ret = -EINTR;
140		goto rtn;
141	}
142
143	if (hdq_data->hdq_irqstatus)
144		dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
145			hdq_data->hdq_irqstatus);
146
147	*status = 0;
148
 
 
 
 
 
 
 
149	hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
150
151	/* set the GO bit */
152	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
153		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
154	/* wait for the TXCOMPLETE bit */
155	ret = wait_event_timeout(hdq_wait_queue,
156		(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
157		OMAP_HDQ_TIMEOUT);
158	*status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
159	if (ret == 0) {
160		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
161		ret = -ETIMEDOUT;
162		goto out;
163	}
164
 
165	/* check irqstatus */
166	if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
167		dev_dbg(hdq_data->dev, "timeout waiting for"
168			" TXCOMPLETE/RXCOMPLETE, %x\n", *status);
169		ret = -ETIMEDOUT;
170		goto out;
171	}
172
173	/* wait for the GO bit return to zero */
174	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
175			OMAP_HDQ_CTRL_STATUS_GO,
176			OMAP_HDQ_FLAG_CLEAR, &tmp_status);
177	if (ret) {
178		dev_dbg(hdq_data->dev, "timeout waiting GO bit"
179			" return to zero, %x\n", tmp_status);
180	}
181
182out:
183	mutex_unlock(&hdq_data->hdq_mutex);
184rtn:
185	return ret;
186}
187
188/* HDQ Interrupt service routine */
189static irqreturn_t hdq_isr(int irq, void *_hdq)
190{
191	struct hdq_data *hdq_data = _hdq;
192	unsigned long irqflags;
193
194	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
195	hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
196	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
197	dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
198
199	if (hdq_data->hdq_irqstatus &
200		(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
201		| OMAP_HDQ_INT_STATUS_TIMEOUT)) {
202		/* wake up sleeping process */
203		wake_up(&hdq_wait_queue);
204	}
205
206	return IRQ_HANDLED;
207}
208
209/* W1 search callback function  in HDQ mode */
 
 
 
 
 
 
210static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
211		u8 search_type, w1_slave_found_callback slave_found)
212{
213	u64 module_id, rn_le, cs, id;
214
215	if (w1_id)
216		module_id = w1_id;
217	else
218		module_id = 0x1;
219
220	rn_le = cpu_to_le64(module_id);
221	/*
222	 * HDQ might not obey truly the 1-wire spec.
223	 * So calculate CRC based on module parameter.
224	 */
225	cs = w1_calc_crc8((u8 *)&rn_le, 7);
226	id = (cs << 56) | module_id;
227
228	slave_found(master_dev, id);
229}
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231/* Issue break pulse to the device */
232static int omap_hdq_break(struct hdq_data *hdq_data)
233{
234	int ret = 0;
235	u8 tmp_status;
 
236
237	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
238	if (ret < 0) {
239		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
240		ret = -EINTR;
241		goto rtn;
242	}
243
244	if (hdq_data->hdq_irqstatus)
245		dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
246			hdq_data->hdq_irqstatus);
 
 
 
247
248	/* set the INIT and GO bit */
249	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
250		OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
251		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
252		OMAP_HDQ_CTRL_STATUS_GO);
253
254	/* wait for the TIMEOUT bit */
255	ret = wait_event_timeout(hdq_wait_queue,
256		(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
257		OMAP_HDQ_TIMEOUT);
258	tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
259	if (ret == 0) {
260		dev_dbg(hdq_data->dev, "break wait elapsed\n");
261		ret = -EINTR;
262		goto out;
263	}
264
 
265	/* check irqstatus */
266	if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
267		dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
268			tmp_status);
269		ret = -ETIMEDOUT;
270		goto out;
271	}
272
273	/*
274	 * check for the presence detect bit to get
275	 * set to show that the slave is responding
276	 */
277	if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
278			OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
279		dev_dbg(hdq_data->dev, "Presence bit not set\n");
280		ret = -ETIMEDOUT;
281		goto out;
282	}
283
284	/*
285	 * wait for both INIT and GO bits rerurn to zero.
286	 * zero wait time expected for interrupt mode.
287	 */
288	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
289			OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
290			OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
291			&tmp_status);
292	if (ret)
293		dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
294			" return to zero, %x\n", tmp_status);
295
296out:
297	mutex_unlock(&hdq_data->hdq_mutex);
298rtn:
299	return ret;
300}
301
302static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
303{
304	int ret = 0;
305	u8 status;
 
306
307	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
308	if (ret < 0) {
309		ret = -EINTR;
310		goto rtn;
311	}
312
313	if (pm_runtime_suspended(hdq_data->dev)) {
314		ret = -EINVAL;
315		goto out;
316	}
317
318	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
319		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
320			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
321			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
322		/*
323		 * The RX comes immediately after TX.
 
 
324		 */
325		wait_event_timeout(hdq_wait_queue,
326				   (hdq_data->hdq_irqstatus
327				    & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
328				       OMAP_HDQ_INT_STATUS_TIMEOUT)),
329				   OMAP_HDQ_TIMEOUT);
330		status = hdq_reset_irqstatus(hdq_data,
331					     OMAP_HDQ_INT_STATUS_RXCOMPLETE |
332					     OMAP_HDQ_INT_STATUS_TIMEOUT);
333		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
334			OMAP_HDQ_CTRL_STATUS_DIR);
335
336		/* check irqstatus */
337		if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
338			dev_dbg(hdq_data->dev, "timeout waiting for"
339				" RXCOMPLETE, %x", status);
340			ret = -ETIMEDOUT;
341			goto out;
342		}
343	} else { /* interrupt had occurred before hdq_read_byte was called */
344		hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
345	}
346	/* the data is ready. Read it in! */
347	*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
348out:
349	mutex_unlock(&hdq_data->hdq_mutex);
350rtn:
351	return ret;
352
353}
354
355/*
356 * W1 triplet callback function - used for searching ROM addresses.
357 * Registered only when controller is in 1-wire mode.
358 */
359static u8 omap_w1_triplet(void *_hdq, u8 bdir)
360{
361	u8 id_bit, comp_bit;
362	int err;
363	u8 ret = 0x3; /* no slaves responded */
364	struct hdq_data *hdq_data = _hdq;
365	u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
366		  OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
367	u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
368
369	err = pm_runtime_get_sync(hdq_data->dev);
370	if (err < 0) {
371		pm_runtime_put_noidle(hdq_data->dev);
372
373		return err;
374	}
375
376	err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
377	if (err < 0) {
378		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
379		goto rtn;
380	}
381
382	/* read id_bit */
383	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
384		      ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
385	err = wait_event_timeout(hdq_wait_queue,
386				 (hdq_data->hdq_irqstatus
387				  & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
388				 OMAP_HDQ_TIMEOUT);
389	/* Must clear irqstatus for another RXCOMPLETE interrupt */
390	hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
391
392	if (err == 0) {
393		dev_dbg(hdq_data->dev, "RX wait elapsed\n");
394		goto out;
395	}
396	id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
397
398	/* read comp_bit */
399	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
400		      ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
401	err = wait_event_timeout(hdq_wait_queue,
402				 (hdq_data->hdq_irqstatus
403				  & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
404				 OMAP_HDQ_TIMEOUT);
405	/* Must clear irqstatus for another RXCOMPLETE interrupt */
406	hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
407
408	if (err == 0) {
409		dev_dbg(hdq_data->dev, "RX wait elapsed\n");
410		goto out;
411	}
412	comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
413
414	if (id_bit && comp_bit) {
415		ret = 0x03;  /* no slaves responded */
416		goto out;
417	}
418	if (!id_bit && !comp_bit) {
419		/* Both bits are valid, take the direction given */
420		ret = bdir ? 0x04 : 0;
421	} else {
422		/* Only one bit is valid, take that direction */
423		bdir = id_bit;
424		ret = id_bit ? 0x05 : 0x02;
425	}
426
427	/* write bdir bit */
428	hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
429	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
430	err = wait_event_timeout(hdq_wait_queue,
431				 (hdq_data->hdq_irqstatus
432				  & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
433				 OMAP_HDQ_TIMEOUT);
434	/* Must clear irqstatus for another TXCOMPLETE interrupt */
435	hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
436
437	if (err == 0) {
438		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
439		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440	}
441
442	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
443		      OMAP_HDQ_CTRL_STATUS_SINGLE);
444
445out:
446	mutex_unlock(&hdq_data->hdq_mutex);
447rtn:
448	pm_runtime_mark_last_busy(hdq_data->dev);
449	pm_runtime_put_autosuspend(hdq_data->dev);
450
451	return ret;
452}
453
454/* reset callback */
455static u8 omap_w1_reset_bus(void *_hdq)
456{
457	struct hdq_data *hdq_data = _hdq;
458	int err;
459
460	err = pm_runtime_get_sync(hdq_data->dev);
461	if (err < 0) {
462		pm_runtime_put_noidle(hdq_data->dev);
463
464		return err;
 
 
 
 
 
 
 
 
 
 
465	}
 
466
467	omap_hdq_break(hdq_data);
468
469	pm_runtime_mark_last_busy(hdq_data->dev);
470	pm_runtime_put_autosuspend(hdq_data->dev);
471
472	return 0;
473}
474
475/* Read a byte of data from the device */
476static u8 omap_w1_read_byte(void *_hdq)
477{
478	struct hdq_data *hdq_data = _hdq;
479	u8 val = 0;
480	int ret;
481
482	ret = pm_runtime_get_sync(hdq_data->dev);
483	if (ret < 0) {
484		pm_runtime_put_noidle(hdq_data->dev);
485
 
 
 
 
 
 
486		return -1;
487	}
488
489	ret = hdq_read_byte(hdq_data, &val);
490	if (ret)
491		val = -1;
492
493	pm_runtime_mark_last_busy(hdq_data->dev);
494	pm_runtime_put_autosuspend(hdq_data->dev);
 
 
 
 
 
495
496	return val;
497}
498
499/* Write a byte of data to the device */
500static void omap_w1_write_byte(void *_hdq, u8 byte)
501{
502	struct hdq_data *hdq_data = _hdq;
503	int ret;
504	u8 status;
505
506	ret = pm_runtime_get_sync(hdq_data->dev);
507	if (ret < 0) {
508		pm_runtime_put_noidle(hdq_data->dev);
509
 
 
 
510		return;
511	}
512
513	/*
514	 * We need to reset the slave before
515	 * issuing the SKIP ROM command, else
516	 * the slave will not work.
517	 */
518	if (byte == W1_SKIP_ROM)
519		omap_hdq_break(hdq_data);
520
521	ret = hdq_write_byte(hdq_data, byte, &status);
522	if (ret < 0) {
523		dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
524		goto out_err;
525	}
526
527out_err:
528	pm_runtime_mark_last_busy(hdq_data->dev);
529	pm_runtime_put_autosuspend(hdq_data->dev);
530}
531
532static struct w1_bus_master omap_w1_master = {
533	.read_byte	= omap_w1_read_byte,
534	.write_byte	= omap_w1_write_byte,
535	.reset_bus	= omap_w1_reset_bus,
536};
537
538static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
539{
540	struct hdq_data *hdq_data = dev_get_drvdata(dev);
541
542	hdq_reg_out(hdq_data, 0, hdq_data->mode);
543	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
544
545	return 0;
546}
547
548static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
549{
550	struct hdq_data *hdq_data = dev_get_drvdata(dev);
551
552	/* select HDQ/1W mode & enable clocks */
553	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
554		    OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
555		    OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
556		    hdq_data->mode);
557	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
558
559	return 0;
560}
561
562static const struct dev_pm_ops omap_hdq_pm_ops = {
563	SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
564			   omap_hdq_runtime_resume, NULL)
565};
566
567static int omap_hdq_probe(struct platform_device *pdev)
568{
569	struct device *dev = &pdev->dev;
570	struct hdq_data *hdq_data;
 
571	int ret, irq;
572	u8 rev;
573	const char *mode;
574
575	hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
576	if (!hdq_data)
577		return -ENOMEM;
 
 
 
578
579	hdq_data->dev = dev;
580	platform_set_drvdata(pdev, hdq_data);
581
582	hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
583	if (IS_ERR(hdq_data->hdq_base))
584		return PTR_ERR(hdq_data->hdq_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
 
586	mutex_init(&hdq_data->hdq_mutex);
587
588	ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
589	if (ret < 0 || !strcmp(mode, "hdq")) {
590		hdq_data->mode = 0;
591		omap_w1_master.search = omap_w1_search_bus;
592	} else {
593		hdq_data->mode = 1;
594		omap_w1_master.triplet = omap_w1_triplet;
595	}
596
597	pm_runtime_enable(&pdev->dev);
598	pm_runtime_use_autosuspend(&pdev->dev);
599	pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
600	ret = pm_runtime_get_sync(&pdev->dev);
601	if (ret < 0) {
602		pm_runtime_put_noidle(&pdev->dev);
603		dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
604		goto err_w1;
605	}
606
607	rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
608	dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
609		(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
610
611	spin_lock_init(&hdq_data->hdq_spinlock);
612
613	irq = platform_get_irq(pdev, 0);
614	if (irq	< 0) {
615		dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
616		ret = irq;
617		goto err_irq;
618	}
619
620	ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
621	if (ret < 0) {
622		dev_dbg(&pdev->dev, "could not request irq\n");
623		goto err_irq;
624	}
625
626	omap_hdq_break(hdq_data);
627
628	pm_runtime_mark_last_busy(&pdev->dev);
629	pm_runtime_put_autosuspend(&pdev->dev);
 
630
631	omap_w1_master.data = hdq_data;
632
633	ret = w1_add_master_device(&omap_w1_master);
634	if (ret) {
635		dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
636		goto err_w1;
637	}
638
639	return 0;
640
641err_irq:
642	pm_runtime_put_sync(&pdev->dev);
643err_w1:
644	pm_runtime_dont_use_autosuspend(&pdev->dev);
645	pm_runtime_disable(&pdev->dev);
 
 
 
 
 
 
 
 
 
646
 
 
 
 
 
 
 
 
 
647	return ret;
 
648}
649
650static int omap_hdq_remove(struct platform_device *pdev)
651{
652	int active;
 
 
 
 
 
 
 
 
653
654	active = pm_runtime_get_sync(&pdev->dev);
655	if (active < 0)
656		pm_runtime_put_noidle(&pdev->dev);
657
658	w1_remove_master_device(&omap_w1_master);
659
660	pm_runtime_dont_use_autosuspend(&pdev->dev);
661	if (active >= 0)
662		pm_runtime_put_sync(&pdev->dev);
663	pm_runtime_disable(&pdev->dev);
664
665	return 0;
666}
667
668static const struct of_device_id omap_hdq_dt_ids[] = {
669	{ .compatible = "ti,omap3-1w" },
670	{ .compatible = "ti,am4372-hdq" },
671	{}
672};
673MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
674
675static struct platform_driver omap_hdq_driver = {
676	.probe = omap_hdq_probe,
677	.remove = omap_hdq_remove,
678	.driver = {
679		.name =	"omap_hdq",
680		.of_match_table = omap_hdq_dt_ids,
681		.pm = &omap_hdq_pm_ops,
682	},
683};
684module_platform_driver(omap_hdq_driver);
685
686MODULE_AUTHOR("Texas Instruments");
687MODULE_DESCRIPTION("HDQ-1W driver Library");
688MODULE_LICENSE("GPL");
v3.1
 
  1/*
  2 * drivers/w1/masters/omap_hdq.c
  3 *
  4 * Copyright (C) 2007 Texas Instruments, Inc.
  5 *
  6 * This file is licensed under the terms of the GNU General Public License
  7 * version 2. This program is licensed "as is" without any warranty of any
  8 * kind, whether express or implied.
  9 *
 10 */
 11#include <linux/kernel.h>
 12#include <linux/module.h>
 13#include <linux/platform_device.h>
 14#include <linux/interrupt.h>
 15#include <linux/slab.h>
 16#include <linux/err.h>
 17#include <linux/clk.h>
 18#include <linux/io.h>
 19#include <linux/sched.h>
 
 
 20
 21#include <asm/irq.h>
 22#include <mach/hardware.h>
 23
 24#include "../w1.h"
 25#include "../w1_int.h"
 26
 27#define	MOD_NAME	"OMAP_HDQ:"
 28
 29#define OMAP_HDQ_REVISION			0x00
 30#define OMAP_HDQ_TX_DATA			0x04
 31#define OMAP_HDQ_RX_DATA			0x08
 32#define OMAP_HDQ_CTRL_STATUS			0x0c
 33#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK	(1<<6)
 34#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE	(1<<5)
 35#define OMAP_HDQ_CTRL_STATUS_GO			(1<<4)
 36#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION	(1<<2)
 37#define OMAP_HDQ_CTRL_STATUS_DIR		(1<<1)
 38#define OMAP_HDQ_CTRL_STATUS_MODE		(1<<0)
 
 39#define OMAP_HDQ_INT_STATUS			0x10
 40#define OMAP_HDQ_INT_STATUS_TXCOMPLETE		(1<<2)
 41#define OMAP_HDQ_INT_STATUS_RXCOMPLETE		(1<<1)
 42#define OMAP_HDQ_INT_STATUS_TIMEOUT		(1<<0)
 43#define OMAP_HDQ_SYSCONFIG			0x14
 44#define OMAP_HDQ_SYSCONFIG_SOFTRESET		(1<<1)
 45#define OMAP_HDQ_SYSCONFIG_AUTOIDLE		(1<<0)
 46#define OMAP_HDQ_SYSSTATUS			0x18
 47#define OMAP_HDQ_SYSSTATUS_RESETDONE		(1<<0)
 48
 49#define OMAP_HDQ_FLAG_CLEAR			0
 50#define OMAP_HDQ_FLAG_SET			1
 51#define OMAP_HDQ_TIMEOUT			(HZ/5)
 52
 53#define OMAP_HDQ_MAX_USER			4
 54
 55static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
 
 56static int w1_id;
 
 
 57
 58struct hdq_data {
 59	struct device		*dev;
 60	void __iomem		*hdq_base;
 61	/* lock status update */
 62	struct  mutex		hdq_mutex;
 63	int			hdq_usecount;
 64	struct	clk		*hdq_ick;
 65	struct	clk		*hdq_fck;
 66	u8			hdq_irqstatus;
 67	/* device lock */
 68	spinlock_t		hdq_spinlock;
 69	/*
 70	 * Used to control the call to omap_hdq_get and omap_hdq_put.
 71	 * HDQ Protocol: Write the CMD|REG_address first, followed by
 72	 * the data wrire or read.
 73	 */
 74	int			init_trans;
 75};
 76
 77static int __devinit omap_hdq_probe(struct platform_device *pdev);
 78static int omap_hdq_remove(struct platform_device *pdev);
 79
 80static struct platform_driver omap_hdq_driver = {
 81	.probe =	omap_hdq_probe,
 82	.remove =	omap_hdq_remove,
 83	.driver =	{
 84		.name =	"omap_hdq",
 85	},
 86};
 87
 88static u8 omap_w1_read_byte(void *_hdq);
 89static void omap_w1_write_byte(void *_hdq, u8 byte);
 90static u8 omap_w1_reset_bus(void *_hdq);
 91static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
 92		u8 search_type,	w1_slave_found_callback slave_found);
 93
 94
 95static struct w1_bus_master omap_w1_master = {
 96	.read_byte	= omap_w1_read_byte,
 97	.write_byte	= omap_w1_write_byte,
 98	.reset_bus	= omap_w1_reset_bus,
 99	.search		= omap_w1_search_bus,
100};
101
102/* HDQ register I/O routines */
103static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
104{
105	return __raw_readb(hdq_data->hdq_base + offset);
106}
107
108static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
109{
110	__raw_writeb(val, hdq_data->hdq_base + offset);
111}
112
113static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
114			u8 val, u8 mask)
115{
116	u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
117			| (val & mask);
118	__raw_writeb(new_val, hdq_data->hdq_base + offset);
119
120	return new_val;
121}
122
123/*
124 * Wait for one or more bits in flag change.
125 * HDQ_FLAG_SET: wait until any bit in the flag is set.
126 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
127 * return 0 on success and -ETIMEDOUT in the case of timeout.
128 */
129static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
130		u8 flag, u8 flag_set, u8 *status)
131{
132	int ret = 0;
133	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
134
135	if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
136		/* wait for the flag clear */
137		while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
138			&& time_before(jiffies, timeout)) {
139			schedule_timeout_uninterruptible(1);
140		}
141		if (*status & flag)
142			ret = -ETIMEDOUT;
143	} else if (flag_set == OMAP_HDQ_FLAG_SET) {
144		/* wait for the flag set */
145		while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
146			&& time_before(jiffies, timeout)) {
147			schedule_timeout_uninterruptible(1);
148		}
149		if (!(*status & flag))
150			ret = -ETIMEDOUT;
151	} else
152		return -EINVAL;
153
154	return ret;
155}
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157/* write out a byte and fill *status with HDQ_INT_STATUS */
158static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
159{
160	int ret;
161	u8 tmp_status;
162	unsigned long irqflags;
 
 
 
 
 
 
 
 
 
163
164	*status = 0;
165
166	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
167	/* clear interrupt flags via a dummy read */
168	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
169	/* ISR loads it with new INT_STATUS */
170	hdq_data->hdq_irqstatus = 0;
171	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
172
173	hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
174
175	/* set the GO bit */
176	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
177		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
178	/* wait for the TXCOMPLETE bit */
179	ret = wait_event_timeout(hdq_wait_queue,
180		hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
 
 
181	if (ret == 0) {
182		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
 
183		goto out;
184	}
185
186	*status = hdq_data->hdq_irqstatus;
187	/* check irqstatus */
188	if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
189		dev_dbg(hdq_data->dev, "timeout waiting for"
190			"TXCOMPLETE/RXCOMPLETE, %x", *status);
191		ret = -ETIMEDOUT;
192		goto out;
193	}
194
195	/* wait for the GO bit return to zero */
196	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
197			OMAP_HDQ_CTRL_STATUS_GO,
198			OMAP_HDQ_FLAG_CLEAR, &tmp_status);
199	if (ret) {
200		dev_dbg(hdq_data->dev, "timeout waiting GO bit"
201			"return to zero, %x", tmp_status);
202	}
203
204out:
 
 
205	return ret;
206}
207
208/* HDQ Interrupt service routine */
209static irqreturn_t hdq_isr(int irq, void *_hdq)
210{
211	struct hdq_data *hdq_data = _hdq;
212	unsigned long irqflags;
213
214	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
215	hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
216	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
217	dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
218
219	if (hdq_data->hdq_irqstatus &
220		(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
221		| OMAP_HDQ_INT_STATUS_TIMEOUT)) {
222		/* wake up sleeping process */
223		wake_up(&hdq_wait_queue);
224	}
225
226	return IRQ_HANDLED;
227}
228
229/* HDQ Mode: always return success */
230static u8 omap_w1_reset_bus(void *_hdq)
231{
232	return 0;
233}
234
235/* W1 search callback function */
236static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
237		u8 search_type, w1_slave_found_callback slave_found)
238{
239	u64 module_id, rn_le, cs, id;
240
241	if (w1_id)
242		module_id = w1_id;
243	else
244		module_id = 0x1;
245
246	rn_le = cpu_to_le64(module_id);
247	/*
248	 * HDQ might not obey truly the 1-wire spec.
249	 * So calculate CRC based on module parameter.
250	 */
251	cs = w1_calc_crc8((u8 *)&rn_le, 7);
252	id = (cs << 56) | module_id;
253
254	slave_found(master_dev, id);
255}
256
257static int _omap_hdq_reset(struct hdq_data *hdq_data)
258{
259	int ret;
260	u8 tmp_status;
261
262	hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
263	/*
264	 * Select HDQ mode & enable clocks.
265	 * It is observed that INT flags can't be cleared via a read and GO/INIT
266	 * won't return to zero if interrupt is disabled. So we always enable
267	 * interrupt.
268	 */
269	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
270		OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
271		OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
272
273	/* wait for reset to complete */
274	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
275		OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
276	if (ret)
277		dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
278				tmp_status);
279	else {
280		hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
281			OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
282			OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
283		hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
284			OMAP_HDQ_SYSCONFIG_AUTOIDLE);
285	}
286
287	return ret;
288}
289
290/* Issue break pulse to the device */
291static int omap_hdq_break(struct hdq_data *hdq_data)
292{
293	int ret = 0;
294	u8 tmp_status;
295	unsigned long irqflags;
296
297	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
298	if (ret < 0) {
299		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
300		ret = -EINTR;
301		goto rtn;
302	}
303
304	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
305	/* clear interrupt flags via a dummy read */
306	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
307	/* ISR loads it with new INT_STATUS */
308	hdq_data->hdq_irqstatus = 0;
309	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
310
311	/* set the INIT and GO bit */
312	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
313		OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
314		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
315		OMAP_HDQ_CTRL_STATUS_GO);
316
317	/* wait for the TIMEOUT bit */
318	ret = wait_event_timeout(hdq_wait_queue,
319		hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
 
 
320	if (ret == 0) {
321		dev_dbg(hdq_data->dev, "break wait elapsed\n");
322		ret = -EINTR;
323		goto out;
324	}
325
326	tmp_status = hdq_data->hdq_irqstatus;
327	/* check irqstatus */
328	if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
329		dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
330				tmp_status);
 
 
 
 
 
 
 
 
 
 
 
331		ret = -ETIMEDOUT;
332		goto out;
333	}
 
334	/*
335	 * wait for both INIT and GO bits rerurn to zero.
336	 * zero wait time expected for interrupt mode.
337	 */
338	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
339			OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
340			OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
341			&tmp_status);
342	if (ret)
343		dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
344			"return to zero, %x", tmp_status);
345
346out:
347	mutex_unlock(&hdq_data->hdq_mutex);
348rtn:
349	return ret;
350}
351
352static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
353{
354	int ret = 0;
355	u8 status;
356	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
357
358	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
359	if (ret < 0) {
360		ret = -EINTR;
361		goto rtn;
362	}
363
364	if (!hdq_data->hdq_usecount) {
365		ret = -EINVAL;
366		goto out;
367	}
368
369	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
370		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
371			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
372			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
373		/*
374		 * The RX comes immediately after TX. It
375		 * triggers another interrupt before we
376		 * sleep. So we have to wait for RXCOMPLETE bit.
377		 */
378		while (!(hdq_data->hdq_irqstatus
379			& OMAP_HDQ_INT_STATUS_RXCOMPLETE)
380			&& time_before(jiffies, timeout)) {
381			schedule_timeout_uninterruptible(1);
382		}
 
 
 
383		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
384			OMAP_HDQ_CTRL_STATUS_DIR);
385		status = hdq_data->hdq_irqstatus;
386		/* check irqstatus */
387		if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
388			dev_dbg(hdq_data->dev, "timeout waiting for"
389				"RXCOMPLETE, %x", status);
390			ret = -ETIMEDOUT;
391			goto out;
392		}
 
 
393	}
394	/* the data is ready. Read it in! */
395	*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
396out:
397	mutex_unlock(&hdq_data->hdq_mutex);
398rtn:
399	return 0;
400
401}
402
403/* Enable clocks and set the controller to HDQ mode */
404static int omap_hdq_get(struct hdq_data *hdq_data)
 
 
 
405{
406	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
407
408	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
409	if (ret < 0) {
410		ret = -EINTR;
411		goto rtn;
412	}
413
414	if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
415		dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
416		ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417		goto out;
 
 
 
 
418	} else {
419		hdq_data->hdq_usecount++;
420		try_module_get(THIS_MODULE);
421		if (1 == hdq_data->hdq_usecount) {
422			if (clk_enable(hdq_data->hdq_ick)) {
423				dev_dbg(hdq_data->dev, "Can not enable ick\n");
424				ret = -ENODEV;
425				goto clk_err;
426			}
427			if (clk_enable(hdq_data->hdq_fck)) {
428				dev_dbg(hdq_data->dev, "Can not enable fck\n");
429				clk_disable(hdq_data->hdq_ick);
430				ret = -ENODEV;
431				goto clk_err;
432			}
433
434			/* make sure HDQ is out of reset */
435			if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
436				OMAP_HDQ_SYSSTATUS_RESETDONE)) {
437				ret = _omap_hdq_reset(hdq_data);
438				if (ret)
439					/* back up the count */
440					hdq_data->hdq_usecount--;
441			} else {
442				/* select HDQ mode & enable clocks */
443				hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
444					OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
445					OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
446				hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
447					OMAP_HDQ_SYSCONFIG_AUTOIDLE);
448				hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
449			}
450		}
451	}
452
453clk_err:
454	clk_put(hdq_data->hdq_ick);
455	clk_put(hdq_data->hdq_fck);
456out:
457	mutex_unlock(&hdq_data->hdq_mutex);
458rtn:
 
 
 
459	return ret;
460}
461
462/* Disable clocks to the module */
463static int omap_hdq_put(struct hdq_data *hdq_data)
464{
465	int ret = 0;
 
466
467	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
468	if (ret < 0)
469		return -EINTR;
470
471	if (0 == hdq_data->hdq_usecount) {
472		dev_dbg(hdq_data->dev, "attempt to decrement use count"
473			"when it is zero");
474		ret = -EINVAL;
475	} else {
476		hdq_data->hdq_usecount--;
477		module_put(THIS_MODULE);
478		if (0 == hdq_data->hdq_usecount) {
479			clk_disable(hdq_data->hdq_ick);
480			clk_disable(hdq_data->hdq_fck);
481		}
482	}
483	mutex_unlock(&hdq_data->hdq_mutex);
484
485	return ret;
 
 
 
 
 
486}
487
488/* Read a byte of data from the device */
489static u8 omap_w1_read_byte(void *_hdq)
490{
491	struct hdq_data *hdq_data = _hdq;
492	u8 val = 0;
493	int ret;
494
495	ret = hdq_read_byte(hdq_data, &val);
496	if (ret) {
497		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
498		if (ret < 0) {
499			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
500			return -EINTR;
501		}
502		hdq_data->init_trans = 0;
503		mutex_unlock(&hdq_data->hdq_mutex);
504		omap_hdq_put(hdq_data);
505		return -1;
506	}
507
508	/* Write followed by a read, release the module */
509	if (hdq_data->init_trans) {
510		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
511		if (ret < 0) {
512			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
513			return -EINTR;
514		}
515		hdq_data->init_trans = 0;
516		mutex_unlock(&hdq_data->hdq_mutex);
517		omap_hdq_put(hdq_data);
518	}
519
520	return val;
521}
522
523/* Write a byte of data to the device */
524static void omap_w1_write_byte(void *_hdq, u8 byte)
525{
526	struct hdq_data *hdq_data = _hdq;
527	int ret;
528	u8 status;
529
530	/* First write to initialize the transfer */
531	if (hdq_data->init_trans == 0)
532		omap_hdq_get(hdq_data);
533
534	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
535	if (ret < 0) {
536		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
537		return;
538	}
539	hdq_data->init_trans++;
540	mutex_unlock(&hdq_data->hdq_mutex);
 
 
 
 
 
 
541
542	ret = hdq_write_byte(hdq_data, byte, &status);
543	if (ret == 0) {
544		dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
545		return;
546	}
547
548	/* Second write, data transferred. Release the module */
549	if (hdq_data->init_trans > 1) {
550		omap_hdq_put(hdq_data);
551		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
552		if (ret < 0) {
553			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
554			return;
555		}
556		hdq_data->init_trans = 0;
557		mutex_unlock(&hdq_data->hdq_mutex);
558	}
 
 
 
 
 
 
559
560	return;
561}
562
563static int __devinit omap_hdq_probe(struct platform_device *pdev)
564{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565	struct hdq_data *hdq_data;
566	struct resource *res;
567	int ret, irq;
568	u8 rev;
 
569
570	hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
571	if (!hdq_data) {
572		dev_dbg(&pdev->dev, "unable to allocate memory\n");
573		ret = -ENOMEM;
574		goto err_kmalloc;
575	}
576
577	hdq_data->dev = &pdev->dev;
578	platform_set_drvdata(pdev, hdq_data);
579
580	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581	if (!res) {
582		dev_dbg(&pdev->dev, "unable to get resource\n");
583		ret = -ENXIO;
584		goto err_resource;
585	}
586
587	hdq_data->hdq_base = ioremap(res->start, SZ_4K);
588	if (!hdq_data->hdq_base) {
589		dev_dbg(&pdev->dev, "ioremap failed\n");
590		ret = -EINVAL;
591		goto err_ioremap;
592	}
593
594	/* get interface & functional clock objects */
595	hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
596	if (IS_ERR(hdq_data->hdq_ick)) {
597		dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
598		ret = PTR_ERR(hdq_data->hdq_ick);
599		goto err_ick;
600	}
601
602	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
603	if (IS_ERR(hdq_data->hdq_fck)) {
604		dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
605		ret = PTR_ERR(hdq_data->hdq_fck);
606		goto err_fck;
607	}
608
609	hdq_data->hdq_usecount = 0;
610	mutex_init(&hdq_data->hdq_mutex);
611
612	if (clk_enable(hdq_data->hdq_ick)) {
613		dev_dbg(&pdev->dev, "Can not enable ick\n");
614		ret = -ENODEV;
615		goto err_intfclk;
 
 
 
616	}
617
618	if (clk_enable(hdq_data->hdq_fck)) {
619		dev_dbg(&pdev->dev, "Can not enable fck\n");
620		ret = -ENODEV;
621		goto err_fnclk;
 
 
 
 
622	}
623
624	rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
625	dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
626		(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
627
628	spin_lock_init(&hdq_data->hdq_spinlock);
629
630	irq = platform_get_irq(pdev, 0);
631	if (irq	< 0) {
632		ret = -ENXIO;
 
633		goto err_irq;
634	}
635
636	ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
637	if (ret < 0) {
638		dev_dbg(&pdev->dev, "could not request irq\n");
639		goto err_irq;
640	}
641
642	omap_hdq_break(hdq_data);
643
644	/* don't clock the HDQ until it is needed */
645	clk_disable(hdq_data->hdq_ick);
646	clk_disable(hdq_data->hdq_fck);
647
648	omap_w1_master.data = hdq_data;
649
650	ret = w1_add_master_device(&omap_w1_master);
651	if (ret) {
652		dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
653		goto err_w1;
654	}
655
656	return 0;
657
 
 
658err_w1:
659err_irq:
660	clk_disable(hdq_data->hdq_fck);
661
662err_fnclk:
663	clk_disable(hdq_data->hdq_ick);
664
665err_intfclk:
666	clk_put(hdq_data->hdq_fck);
667
668err_fck:
669	clk_put(hdq_data->hdq_ick);
670
671err_ick:
672	iounmap(hdq_data->hdq_base);
673
674err_ioremap:
675err_resource:
676	platform_set_drvdata(pdev, NULL);
677	kfree(hdq_data);
678
679err_kmalloc:
680	return ret;
681
682}
683
684static int omap_hdq_remove(struct platform_device *pdev)
685{
686	struct hdq_data *hdq_data = platform_get_drvdata(pdev);
687
688	mutex_lock(&hdq_data->hdq_mutex);
689
690	if (hdq_data->hdq_usecount) {
691		dev_dbg(&pdev->dev, "removed when use count is not zero\n");
692		mutex_unlock(&hdq_data->hdq_mutex);
693		return -EBUSY;
694	}
695
696	mutex_unlock(&hdq_data->hdq_mutex);
697
698	/* remove module dependency */
699	clk_put(hdq_data->hdq_ick);
700	clk_put(hdq_data->hdq_fck);
701	free_irq(INT_24XX_HDQ_IRQ, hdq_data);
702	platform_set_drvdata(pdev, NULL);
703	iounmap(hdq_data->hdq_base);
704	kfree(hdq_data);
 
705
706	return 0;
707}
708
709static int __init
710omap_hdq_init(void)
711{
712	return platform_driver_register(&omap_hdq_driver);
713}
714module_init(omap_hdq_init);
715
716static void __exit
717omap_hdq_exit(void)
718{
719	platform_driver_unregister(&omap_hdq_driver);
720}
721module_exit(omap_hdq_exit);
722
723module_param(w1_id, int, S_IRUSR);
724MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
 
725
726MODULE_AUTHOR("Texas Instruments");
727MODULE_DESCRIPTION("HDQ driver Library");
728MODULE_LICENSE("GPL");