Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Synopsys DesignWare I2C adapter driver (master only).
4 *
5 * Based on the TI DAVINCI I2C adapter driver.
6 *
7 * Copyright (C) 2006 Texas Instruments.
8 * Copyright (C) 2007 MontaVista Software Inc.
9 * Copyright (C) 2009 Provigent Ltd.
10 */
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/errno.h>
14#include <linux/export.h>
15#include <linux/gpio/consumer.h>
16#include <linux/i2c.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/pm_runtime.h>
21#include <linux/regmap.h>
22#include <linux/reset.h>
23
24#include "i2c-designware-core.h"
25
26#define AMD_TIMEOUT_MIN_US 25
27#define AMD_TIMEOUT_MAX_US 250
28#define AMD_MASTERCFG_MASK GENMASK(15, 0)
29
30static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
31{
32 /* Configure Tx/Rx FIFO threshold levels */
33 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
34 regmap_write(dev->map, DW_IC_RX_TL, 0);
35
36 /* Configure the I2C master */
37 regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
38}
39
40static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
41{
42 u32 comp_param1;
43 u32 sda_falling_time, scl_falling_time;
44 struct i2c_timings *t = &dev->timings;
45 const char *fp_str = "";
46 u32 ic_clk;
47 int ret;
48
49 ret = i2c_dw_acquire_lock(dev);
50 if (ret)
51 return ret;
52
53 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
54 i2c_dw_release_lock(dev);
55 if (ret)
56 return ret;
57
58 /* Set standard and fast speed dividers for high/low periods */
59 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
60 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
61
62 /* Calculate SCL timing parameters for standard mode if not set */
63 if (!dev->ss_hcnt || !dev->ss_lcnt) {
64 ic_clk = i2c_dw_clk_rate(dev);
65 dev->ss_hcnt =
66 i2c_dw_scl_hcnt(ic_clk,
67 4000, /* tHD;STA = tHIGH = 4.0 us */
68 sda_falling_time,
69 0, /* 0: DW default, 1: Ideal */
70 0); /* No offset */
71 dev->ss_lcnt =
72 i2c_dw_scl_lcnt(ic_clk,
73 4700, /* tLOW = 4.7 us */
74 scl_falling_time,
75 0); /* No offset */
76 }
77 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
78 dev->ss_hcnt, dev->ss_lcnt);
79
80 /*
81 * Set SCL timing parameters for fast mode or fast mode plus. Only
82 * difference is the timing parameter values since the registers are
83 * the same.
84 */
85 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
86 /*
87 * Check are Fast Mode Plus parameters available. Calculate
88 * SCL timing parameters for Fast Mode Plus if not set.
89 */
90 if (dev->fp_hcnt && dev->fp_lcnt) {
91 dev->fs_hcnt = dev->fp_hcnt;
92 dev->fs_lcnt = dev->fp_lcnt;
93 } else {
94 ic_clk = i2c_dw_clk_rate(dev);
95 dev->fs_hcnt =
96 i2c_dw_scl_hcnt(ic_clk,
97 260, /* tHIGH = 260 ns */
98 sda_falling_time,
99 0, /* DW default */
100 0); /* No offset */
101 dev->fs_lcnt =
102 i2c_dw_scl_lcnt(ic_clk,
103 500, /* tLOW = 500 ns */
104 scl_falling_time,
105 0); /* No offset */
106 }
107 fp_str = " Plus";
108 }
109 /*
110 * Calculate SCL timing parameters for fast mode if not set. They are
111 * needed also in high speed mode.
112 */
113 if (!dev->fs_hcnt || !dev->fs_lcnt) {
114 ic_clk = i2c_dw_clk_rate(dev);
115 dev->fs_hcnt =
116 i2c_dw_scl_hcnt(ic_clk,
117 600, /* tHD;STA = tHIGH = 0.6 us */
118 sda_falling_time,
119 0, /* 0: DW default, 1: Ideal */
120 0); /* No offset */
121 dev->fs_lcnt =
122 i2c_dw_scl_lcnt(ic_clk,
123 1300, /* tLOW = 1.3 us */
124 scl_falling_time,
125 0); /* No offset */
126 }
127 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
128 fp_str, dev->fs_hcnt, dev->fs_lcnt);
129
130 /* Check is high speed possible and fall back to fast mode if not */
131 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
132 DW_IC_CON_SPEED_HIGH) {
133 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
134 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
135 dev_err(dev->dev, "High Speed not supported!\n");
136 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
137 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
138 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
139 dev->hs_hcnt = 0;
140 dev->hs_lcnt = 0;
141 } else if (!dev->hs_hcnt || !dev->hs_lcnt) {
142 ic_clk = i2c_dw_clk_rate(dev);
143 dev->hs_hcnt =
144 i2c_dw_scl_hcnt(ic_clk,
145 160, /* tHIGH = 160 ns */
146 sda_falling_time,
147 0, /* DW default */
148 0); /* No offset */
149 dev->hs_lcnt =
150 i2c_dw_scl_lcnt(ic_clk,
151 320, /* tLOW = 320 ns */
152 scl_falling_time,
153 0); /* No offset */
154 }
155 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
156 dev->hs_hcnt, dev->hs_lcnt);
157 }
158
159 ret = i2c_dw_set_sda_hold(dev);
160 if (ret)
161 return ret;
162
163 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
164 return 0;
165}
166
167/**
168 * i2c_dw_init_master() - Initialize the designware I2C master hardware
169 * @dev: device private data
170 *
171 * This functions configures and enables the I2C master.
172 * This function is called during I2C init function, and in case of timeout at
173 * run time.
174 */
175static int i2c_dw_init_master(struct dw_i2c_dev *dev)
176{
177 int ret;
178
179 ret = i2c_dw_acquire_lock(dev);
180 if (ret)
181 return ret;
182
183 /* Disable the adapter */
184 __i2c_dw_disable(dev);
185
186 /* Write standard speed timing parameters */
187 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
188 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
189
190 /* Write fast mode/fast mode plus timing parameters */
191 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
192 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
193
194 /* Write high speed timing parameters if supported */
195 if (dev->hs_hcnt && dev->hs_lcnt) {
196 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
197 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
198 }
199
200 /* Write SDA hold time if supported */
201 if (dev->sda_hold_time)
202 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
203
204 i2c_dw_configure_fifo_master(dev);
205 i2c_dw_release_lock(dev);
206
207 return 0;
208}
209
210static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
211{
212 struct i2c_msg *msgs = dev->msgs;
213 u32 ic_con = 0, ic_tar = 0;
214 u32 dummy;
215
216 /* Disable the adapter */
217 __i2c_dw_disable(dev);
218
219 /* If the slave address is ten bit address, enable 10BITADDR */
220 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
221 ic_con = DW_IC_CON_10BITADDR_MASTER;
222 /*
223 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
224 * mode has to be enabled via bit 12 of IC_TAR register.
225 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
226 * detected from registers.
227 */
228 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
229 }
230
231 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
232 ic_con);
233
234 /*
235 * Set the slave (target) address and enable 10-bit addressing mode
236 * if applicable.
237 */
238 regmap_write(dev->map, DW_IC_TAR,
239 msgs[dev->msg_write_idx].addr | ic_tar);
240
241 /* Enforce disabled interrupts (due to HW issues) */
242 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
243
244 /* Enable the adapter */
245 __i2c_dw_enable(dev);
246
247 /* Dummy read to avoid the register getting stuck on Bay Trail */
248 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
249
250 /* Clear and enable interrupts */
251 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
252 regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
253}
254
255static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
256{
257 u32 val;
258 int ret;
259
260 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
261 !(val & DW_IC_INTR_STOP_DET),
262 1100, 20000);
263 if (ret)
264 dev_err(dev->dev, "i2c timeout error %d\n", ret);
265
266 return ret;
267}
268
269static int i2c_dw_status(struct dw_i2c_dev *dev)
270{
271 int status;
272
273 status = i2c_dw_wait_bus_not_busy(dev);
274 if (status)
275 return status;
276
277 return i2c_dw_check_stopbit(dev);
278}
279
280/*
281 * Initiate and continue master read/write transaction with polling
282 * based transfer routine afterward write messages into the Tx buffer.
283 */
284static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
285{
286 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
287 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
288 int cmd = 0, status;
289 u8 *tx_buf;
290 u32 val;
291
292 /*
293 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
294 * it is mandatory to set the right value in specific register
295 * (offset:0x474) as per the hardware IP specification.
296 */
297 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
298
299 dev->msgs = msgs;
300 dev->msgs_num = num_msgs;
301 i2c_dw_xfer_init(dev);
302 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
303
304 /* Initiate messages read/write transaction */
305 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
306 tx_buf = msgs[msg_wrt_idx].buf;
307 buf_len = msgs[msg_wrt_idx].len;
308
309 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
310 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
311 /*
312 * Initiate the i2c read/write transaction of buffer length,
313 * and poll for bus busy status. For the last message transfer,
314 * update the command with stopbit enable.
315 */
316 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
317 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
318 cmd |= BIT(9);
319
320 if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
321 /* Due to hardware bug, need to write the same command twice. */
322 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
323 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
324 if (cmd) {
325 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
326 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
327 /*
328 * Need to check the stop bit. However, it cannot be
329 * detected from the registers so we check it always
330 * when read/write the last byte.
331 */
332 status = i2c_dw_status(dev);
333 if (status)
334 return status;
335
336 for (data_idx = 0; data_idx < buf_len; data_idx++) {
337 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
338 tx_buf[data_idx] = val;
339 }
340 status = i2c_dw_check_stopbit(dev);
341 if (status)
342 return status;
343 }
344 } else {
345 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
346 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
347 }
348 }
349 status = i2c_dw_check_stopbit(dev);
350 if (status)
351 return status;
352 }
353
354 return 0;
355}
356
357/*
358 * Initiate (and continue) low level master read/write transaction.
359 * This function is only called from i2c_dw_isr, and pumping i2c_msg
360 * messages into the tx buffer. Even if the size of i2c_msg data is
361 * longer than the size of the tx buffer, it handles everything.
362 */
363static void
364i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
365{
366 struct i2c_msg *msgs = dev->msgs;
367 u32 intr_mask;
368 int tx_limit, rx_limit;
369 u32 addr = msgs[dev->msg_write_idx].addr;
370 u32 buf_len = dev->tx_buf_len;
371 u8 *buf = dev->tx_buf;
372 bool need_restart = false;
373 unsigned int flr;
374
375 intr_mask = DW_IC_INTR_MASTER_MASK;
376
377 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
378 u32 flags = msgs[dev->msg_write_idx].flags;
379
380 /*
381 * If target address has changed, we need to
382 * reprogram the target address in the I2C
383 * adapter when we are done with this transfer.
384 */
385 if (msgs[dev->msg_write_idx].addr != addr) {
386 dev_err(dev->dev,
387 "%s: invalid target address\n", __func__);
388 dev->msg_err = -EINVAL;
389 break;
390 }
391
392 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
393 /* new i2c_msg */
394 buf = msgs[dev->msg_write_idx].buf;
395 buf_len = msgs[dev->msg_write_idx].len;
396
397 /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
398 * IC_RESTART_EN are set, we must manually
399 * set restart bit between messages.
400 */
401 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
402 (dev->msg_write_idx > 0))
403 need_restart = true;
404 }
405
406 regmap_read(dev->map, DW_IC_TXFLR, &flr);
407 tx_limit = dev->tx_fifo_depth - flr;
408
409 regmap_read(dev->map, DW_IC_RXFLR, &flr);
410 rx_limit = dev->rx_fifo_depth - flr;
411
412 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
413 u32 cmd = 0;
414
415 /*
416 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
417 * manually set the stop bit. However, it cannot be
418 * detected from the registers so we set it always
419 * when writing/reading the last byte.
420 */
421
422 /*
423 * i2c-core always sets the buffer length of
424 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
425 * be adjusted when receiving the first byte.
426 * Thus we can't stop the transaction here.
427 */
428 if (dev->msg_write_idx == dev->msgs_num - 1 &&
429 buf_len == 1 && !(flags & I2C_M_RECV_LEN))
430 cmd |= BIT(9);
431
432 if (need_restart) {
433 cmd |= BIT(10);
434 need_restart = false;
435 }
436
437 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
438
439 /* Avoid rx buffer overrun */
440 if (dev->rx_outstanding >= dev->rx_fifo_depth)
441 break;
442
443 regmap_write(dev->map, DW_IC_DATA_CMD,
444 cmd | 0x100);
445 rx_limit--;
446 dev->rx_outstanding++;
447 } else {
448 regmap_write(dev->map, DW_IC_DATA_CMD,
449 cmd | *buf++);
450 }
451 tx_limit--; buf_len--;
452 }
453
454 dev->tx_buf = buf;
455 dev->tx_buf_len = buf_len;
456
457 /*
458 * Because we don't know the buffer length in the
459 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
460 * the transaction here.
461 */
462 if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
463 /* more bytes to be written */
464 dev->status |= STATUS_WRITE_IN_PROGRESS;
465 break;
466 } else
467 dev->status &= ~STATUS_WRITE_IN_PROGRESS;
468 }
469
470 /*
471 * If i2c_msg index search is completed, we don't need TX_EMPTY
472 * interrupt any more.
473 */
474 if (dev->msg_write_idx == dev->msgs_num)
475 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
476
477 if (dev->msg_err)
478 intr_mask = 0;
479
480 regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask);
481}
482
483static u8
484i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
485{
486 struct i2c_msg *msgs = dev->msgs;
487 u32 flags = msgs[dev->msg_read_idx].flags;
488
489 /*
490 * Adjust the buffer length and mask the flag
491 * after receiving the first byte.
492 */
493 len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
494 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
495 msgs[dev->msg_read_idx].len = len;
496 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
497
498 return len;
499}
500
501static void
502i2c_dw_read(struct dw_i2c_dev *dev)
503{
504 struct i2c_msg *msgs = dev->msgs;
505 unsigned int rx_valid;
506
507 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
508 u32 len, tmp;
509 u8 *buf;
510
511 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
512 continue;
513
514 if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
515 len = msgs[dev->msg_read_idx].len;
516 buf = msgs[dev->msg_read_idx].buf;
517 } else {
518 len = dev->rx_buf_len;
519 buf = dev->rx_buf;
520 }
521
522 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
523
524 for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
525 u32 flags = msgs[dev->msg_read_idx].flags;
526
527 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
528 /* Ensure length byte is a valid value */
529 if (flags & I2C_M_RECV_LEN &&
530 (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
531 len = i2c_dw_recv_len(dev, tmp);
532 }
533 *buf++ = tmp;
534 dev->rx_outstanding--;
535 }
536
537 if (len > 0) {
538 dev->status |= STATUS_READ_IN_PROGRESS;
539 dev->rx_buf_len = len;
540 dev->rx_buf = buf;
541 return;
542 } else
543 dev->status &= ~STATUS_READ_IN_PROGRESS;
544 }
545}
546
547/*
548 * Prepare controller for a transaction and call i2c_dw_xfer_msg.
549 */
550static int
551i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
552{
553 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
554 int ret;
555
556 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
557
558 pm_runtime_get_sync(dev->dev);
559
560 /*
561 * Initiate I2C message transfer when AMD NAVI GPU card is enabled,
562 * As it is polling based transfer mechanism, which does not support
563 * interrupt based functionalities of existing DesignWare driver.
564 */
565 if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
566 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
567 goto done_nolock;
568 }
569
570 reinit_completion(&dev->cmd_complete);
571 dev->msgs = msgs;
572 dev->msgs_num = num;
573 dev->cmd_err = 0;
574 dev->msg_write_idx = 0;
575 dev->msg_read_idx = 0;
576 dev->msg_err = 0;
577 dev->status = 0;
578 dev->abort_source = 0;
579 dev->rx_outstanding = 0;
580
581 ret = i2c_dw_acquire_lock(dev);
582 if (ret)
583 goto done_nolock;
584
585 ret = i2c_dw_wait_bus_not_busy(dev);
586 if (ret < 0)
587 goto done;
588
589 /* Start the transfers */
590 i2c_dw_xfer_init(dev);
591
592 /* Wait for tx to complete */
593 if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
594 dev_err(dev->dev, "controller timed out\n");
595 /* i2c_dw_init implicitly disables the adapter */
596 i2c_recover_bus(&dev->adapter);
597 i2c_dw_init_master(dev);
598 ret = -ETIMEDOUT;
599 goto done;
600 }
601
602 /*
603 * We must disable the adapter before returning and signaling the end
604 * of the current transfer. Otherwise the hardware might continue
605 * generating interrupts which in turn causes a race condition with
606 * the following transfer. Needs some more investigation if the
607 * additional interrupts are a hardware bug or this driver doesn't
608 * handle them correctly yet.
609 */
610 __i2c_dw_disable_nowait(dev);
611
612 if (dev->msg_err) {
613 ret = dev->msg_err;
614 goto done;
615 }
616
617 /* No error */
618 if (likely(!dev->cmd_err && !dev->status)) {
619 ret = num;
620 goto done;
621 }
622
623 /* We have an error */
624 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
625 ret = i2c_dw_handle_tx_abort(dev);
626 goto done;
627 }
628
629 if (dev->status)
630 dev_err(dev->dev,
631 "transfer terminated early - interrupt latency too high?\n");
632
633 ret = -EIO;
634
635done:
636 i2c_dw_release_lock(dev);
637
638done_nolock:
639 pm_runtime_mark_last_busy(dev->dev);
640 pm_runtime_put_autosuspend(dev->dev);
641
642 return ret;
643}
644
645static const struct i2c_algorithm i2c_dw_algo = {
646 .master_xfer = i2c_dw_xfer,
647 .functionality = i2c_dw_func,
648};
649
650static const struct i2c_adapter_quirks i2c_dw_quirks = {
651 .flags = I2C_AQ_NO_ZERO_LEN,
652};
653
654static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
655{
656 u32 stat, dummy;
657
658 /*
659 * The IC_INTR_STAT register just indicates "enabled" interrupts.
660 * The unmasked raw version of interrupt status bits is available
661 * in the IC_RAW_INTR_STAT register.
662 *
663 * That is,
664 * stat = readl(IC_INTR_STAT);
665 * equals to,
666 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
667 *
668 * The raw version might be useful for debugging purposes.
669 */
670 regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
671
672 /*
673 * Do not use the IC_CLR_INTR register to clear interrupts, or
674 * you'll miss some interrupts, triggered during the period from
675 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
676 *
677 * Instead, use the separately-prepared IC_CLR_* registers.
678 */
679 if (stat & DW_IC_INTR_RX_UNDER)
680 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
681 if (stat & DW_IC_INTR_RX_OVER)
682 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
683 if (stat & DW_IC_INTR_TX_OVER)
684 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
685 if (stat & DW_IC_INTR_RD_REQ)
686 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
687 if (stat & DW_IC_INTR_TX_ABRT) {
688 /*
689 * The IC_TX_ABRT_SOURCE register is cleared whenever
690 * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
691 */
692 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
693 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
694 }
695 if (stat & DW_IC_INTR_RX_DONE)
696 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
697 if (stat & DW_IC_INTR_ACTIVITY)
698 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
699 if ((stat & DW_IC_INTR_STOP_DET) &&
700 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
701 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
702 if (stat & DW_IC_INTR_START_DET)
703 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
704 if (stat & DW_IC_INTR_GEN_CALL)
705 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
706
707 return stat;
708}
709
710/*
711 * Interrupt service routine. This gets called whenever an I2C master interrupt
712 * occurs.
713 */
714static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
715{
716 struct dw_i2c_dev *dev = dev_id;
717 u32 stat, enabled;
718
719 regmap_read(dev->map, DW_IC_ENABLE, &enabled);
720 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
721 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
722 return IRQ_NONE;
723 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
724 return IRQ_NONE;
725 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
726
727 stat = i2c_dw_read_clear_intrbits(dev);
728
729 if (!(dev->status & STATUS_ACTIVE)) {
730 /*
731 * Unexpected interrupt in driver point of view. State
732 * variables are either unset or stale so acknowledge and
733 * disable interrupts for suppressing further interrupts if
734 * interrupt really came from this HW (E.g. firmware has left
735 * the HW active).
736 */
737 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
738 return IRQ_HANDLED;
739 }
740
741 if (stat & DW_IC_INTR_TX_ABRT) {
742 dev->cmd_err |= DW_IC_ERR_TX_ABRT;
743 dev->status &= ~STATUS_MASK;
744 dev->rx_outstanding = 0;
745
746 /*
747 * Anytime TX_ABRT is set, the contents of the tx/rx
748 * buffers are flushed. Make sure to skip them.
749 */
750 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
751 goto tx_aborted;
752 }
753
754 if (stat & DW_IC_INTR_RX_FULL)
755 i2c_dw_read(dev);
756
757 if (stat & DW_IC_INTR_TX_EMPTY)
758 i2c_dw_xfer_msg(dev);
759
760 /*
761 * No need to modify or disable the interrupt mask here.
762 * i2c_dw_xfer_msg() will take care of it according to
763 * the current transmit status.
764 */
765
766tx_aborted:
767 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
768 (dev->rx_outstanding == 0))
769 complete(&dev->cmd_complete);
770 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
771 /* Workaround to trigger pending interrupt */
772 regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
773 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
774 regmap_write(dev->map, DW_IC_INTR_MASK, stat);
775 }
776
777 return IRQ_HANDLED;
778}
779
780void i2c_dw_configure_master(struct dw_i2c_dev *dev)
781{
782 struct i2c_timings *t = &dev->timings;
783
784 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
785
786 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
787 DW_IC_CON_RESTART_EN;
788
789 dev->mode = DW_IC_MASTER;
790
791 switch (t->bus_freq_hz) {
792 case I2C_MAX_STANDARD_MODE_FREQ:
793 dev->master_cfg |= DW_IC_CON_SPEED_STD;
794 break;
795 case I2C_MAX_HIGH_SPEED_MODE_FREQ:
796 dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
797 break;
798 default:
799 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
800 }
801}
802EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
803
804static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
805{
806 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
807
808 i2c_dw_disable(dev);
809 reset_control_assert(dev->rst);
810 i2c_dw_prepare_clk(dev, false);
811}
812
813static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
814{
815 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
816
817 i2c_dw_prepare_clk(dev, true);
818 reset_control_deassert(dev->rst);
819 i2c_dw_init_master(dev);
820}
821
822static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
823{
824 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
825 struct i2c_adapter *adap = &dev->adapter;
826 struct gpio_desc *gpio;
827
828 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
829 if (IS_ERR_OR_NULL(gpio))
830 return PTR_ERR_OR_ZERO(gpio);
831
832 rinfo->scl_gpiod = gpio;
833
834 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
835 if (IS_ERR(gpio))
836 return PTR_ERR(gpio);
837 rinfo->sda_gpiod = gpio;
838
839 rinfo->recover_bus = i2c_generic_scl_recovery;
840 rinfo->prepare_recovery = i2c_dw_prepare_recovery;
841 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
842 adap->bus_recovery_info = rinfo;
843
844 dev_info(dev->dev, "running with gpio recovery mode! scl%s",
845 rinfo->sda_gpiod ? ",sda" : "");
846
847 return 0;
848}
849
850static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev)
851{
852 struct i2c_adapter *adap = &dev->adapter;
853 int ret;
854
855 pm_runtime_get_noresume(dev->dev);
856 ret = i2c_add_numbered_adapter(adap);
857 if (ret)
858 dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
859 pm_runtime_put_noidle(dev->dev);
860
861 return ret;
862}
863
864int i2c_dw_probe_master(struct dw_i2c_dev *dev)
865{
866 struct i2c_adapter *adap = &dev->adapter;
867 unsigned long irq_flags;
868 int ret;
869
870 init_completion(&dev->cmd_complete);
871
872 dev->init = i2c_dw_init_master;
873 dev->disable = i2c_dw_disable;
874
875 ret = i2c_dw_init_regmap(dev);
876 if (ret)
877 return ret;
878
879 ret = i2c_dw_set_timings_master(dev);
880 if (ret)
881 return ret;
882
883 ret = i2c_dw_set_fifo_size(dev);
884 if (ret)
885 return ret;
886
887 ret = dev->init(dev);
888 if (ret)
889 return ret;
890
891 snprintf(adap->name, sizeof(adap->name),
892 "Synopsys DesignWare I2C adapter");
893 adap->retries = 3;
894 adap->algo = &i2c_dw_algo;
895 adap->quirks = &i2c_dw_quirks;
896 adap->dev.parent = dev->dev;
897 i2c_set_adapdata(adap, dev);
898
899 if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
900 return amd_i2c_adap_quirk(dev);
901
902 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
903 irq_flags = IRQF_NO_SUSPEND;
904 } else {
905 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
906 }
907
908 ret = i2c_dw_acquire_lock(dev);
909 if (ret)
910 return ret;
911
912 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
913 i2c_dw_release_lock(dev);
914
915 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
916 dev_name(dev->dev), dev);
917 if (ret) {
918 dev_err(dev->dev, "failure requesting irq %i: %d\n",
919 dev->irq, ret);
920 return ret;
921 }
922
923 ret = i2c_dw_init_recovery_info(dev);
924 if (ret)
925 return ret;
926
927 /*
928 * Increment PM usage count during adapter registration in order to
929 * avoid possible spurious runtime suspend when adapter device is
930 * registered to the device core and immediate resume in case bus has
931 * registered I2C slaves that do I2C transfers in their probe.
932 */
933 pm_runtime_get_noresume(dev->dev);
934 ret = i2c_add_numbered_adapter(adap);
935 if (ret)
936 dev_err(dev->dev, "failure adding adapter: %d\n", ret);
937 pm_runtime_put_noidle(dev->dev);
938
939 return ret;
940}
941EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
942
943MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
944MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Synopsys DesignWare I2C adapter driver (master only).
4 *
5 * Based on the TI DAVINCI I2C adapter driver.
6 *
7 * Copyright (C) 2006 Texas Instruments.
8 * Copyright (C) 2007 MontaVista Software Inc.
9 * Copyright (C) 2009 Provigent Ltd.
10 */
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/errno.h>
14#include <linux/export.h>
15#include <linux/gpio/consumer.h>
16#include <linux/i2c.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/pinctrl/consumer.h>
21#include <linux/pm_runtime.h>
22#include <linux/regmap.h>
23#include <linux/reset.h>
24
25#include "i2c-designware-core.h"
26
27#define AMD_TIMEOUT_MIN_US 25
28#define AMD_TIMEOUT_MAX_US 250
29#define AMD_MASTERCFG_MASK GENMASK(15, 0)
30
31static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
32{
33 /* Configure Tx/Rx FIFO threshold levels */
34 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
35 regmap_write(dev->map, DW_IC_RX_TL, 0);
36
37 /* Configure the I2C master */
38 regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
39}
40
41static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
42{
43 unsigned int comp_param1;
44 u32 sda_falling_time, scl_falling_time;
45 struct i2c_timings *t = &dev->timings;
46 const char *fp_str = "";
47 u32 ic_clk;
48 int ret;
49
50 ret = i2c_dw_acquire_lock(dev);
51 if (ret)
52 return ret;
53
54 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
55 i2c_dw_release_lock(dev);
56 if (ret)
57 return ret;
58
59 /* Set standard and fast speed dividers for high/low periods */
60 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
61 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
62
63 /* Calculate SCL timing parameters for standard mode if not set */
64 if (!dev->ss_hcnt || !dev->ss_lcnt) {
65 ic_clk = i2c_dw_clk_rate(dev);
66 dev->ss_hcnt =
67 i2c_dw_scl_hcnt(ic_clk,
68 4000, /* tHD;STA = tHIGH = 4.0 us */
69 sda_falling_time,
70 0, /* 0: DW default, 1: Ideal */
71 0); /* No offset */
72 dev->ss_lcnt =
73 i2c_dw_scl_lcnt(ic_clk,
74 4700, /* tLOW = 4.7 us */
75 scl_falling_time,
76 0); /* No offset */
77 }
78 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
79 dev->ss_hcnt, dev->ss_lcnt);
80
81 /*
82 * Set SCL timing parameters for fast mode or fast mode plus. Only
83 * difference is the timing parameter values since the registers are
84 * the same.
85 */
86 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
87 /*
88 * Check are Fast Mode Plus parameters available. Calculate
89 * SCL timing parameters for Fast Mode Plus if not set.
90 */
91 if (dev->fp_hcnt && dev->fp_lcnt) {
92 dev->fs_hcnt = dev->fp_hcnt;
93 dev->fs_lcnt = dev->fp_lcnt;
94 } else {
95 ic_clk = i2c_dw_clk_rate(dev);
96 dev->fs_hcnt =
97 i2c_dw_scl_hcnt(ic_clk,
98 260, /* tHIGH = 260 ns */
99 sda_falling_time,
100 0, /* DW default */
101 0); /* No offset */
102 dev->fs_lcnt =
103 i2c_dw_scl_lcnt(ic_clk,
104 500, /* tLOW = 500 ns */
105 scl_falling_time,
106 0); /* No offset */
107 }
108 fp_str = " Plus";
109 }
110 /*
111 * Calculate SCL timing parameters for fast mode if not set. They are
112 * needed also in high speed mode.
113 */
114 if (!dev->fs_hcnt || !dev->fs_lcnt) {
115 ic_clk = i2c_dw_clk_rate(dev);
116 dev->fs_hcnt =
117 i2c_dw_scl_hcnt(ic_clk,
118 600, /* tHD;STA = tHIGH = 0.6 us */
119 sda_falling_time,
120 0, /* 0: DW default, 1: Ideal */
121 0); /* No offset */
122 dev->fs_lcnt =
123 i2c_dw_scl_lcnt(ic_clk,
124 1300, /* tLOW = 1.3 us */
125 scl_falling_time,
126 0); /* No offset */
127 }
128 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
129 fp_str, dev->fs_hcnt, dev->fs_lcnt);
130
131 /* Check is high speed possible and fall back to fast mode if not */
132 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
133 DW_IC_CON_SPEED_HIGH) {
134 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
135 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
136 dev_err(dev->dev, "High Speed not supported!\n");
137 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
138 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
139 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
140 dev->hs_hcnt = 0;
141 dev->hs_lcnt = 0;
142 } else if (!dev->hs_hcnt || !dev->hs_lcnt) {
143 ic_clk = i2c_dw_clk_rate(dev);
144 dev->hs_hcnt =
145 i2c_dw_scl_hcnt(ic_clk,
146 160, /* tHIGH = 160 ns */
147 sda_falling_time,
148 0, /* DW default */
149 0); /* No offset */
150 dev->hs_lcnt =
151 i2c_dw_scl_lcnt(ic_clk,
152 320, /* tLOW = 320 ns */
153 scl_falling_time,
154 0); /* No offset */
155 }
156 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
157 dev->hs_hcnt, dev->hs_lcnt);
158 }
159
160 ret = i2c_dw_set_sda_hold(dev);
161 if (ret)
162 return ret;
163
164 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
165 return 0;
166}
167
168/**
169 * i2c_dw_init_master() - Initialize the designware I2C master hardware
170 * @dev: device private data
171 *
172 * This functions configures and enables the I2C master.
173 * This function is called during I2C init function, and in case of timeout at
174 * run time.
175 */
176static int i2c_dw_init_master(struct dw_i2c_dev *dev)
177{
178 int ret;
179
180 ret = i2c_dw_acquire_lock(dev);
181 if (ret)
182 return ret;
183
184 /* Disable the adapter */
185 __i2c_dw_disable(dev);
186
187 /* Write standard speed timing parameters */
188 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
189 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
190
191 /* Write fast mode/fast mode plus timing parameters */
192 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
193 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
194
195 /* Write high speed timing parameters if supported */
196 if (dev->hs_hcnt && dev->hs_lcnt) {
197 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
198 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
199 }
200
201 /* Write SDA hold time if supported */
202 if (dev->sda_hold_time)
203 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
204
205 i2c_dw_configure_fifo_master(dev);
206 i2c_dw_release_lock(dev);
207
208 return 0;
209}
210
211static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
212{
213 struct i2c_msg *msgs = dev->msgs;
214 u32 ic_con = 0, ic_tar = 0;
215 unsigned int dummy;
216
217 /* Disable the adapter */
218 __i2c_dw_disable(dev);
219
220 /* If the slave address is ten bit address, enable 10BITADDR */
221 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
222 ic_con = DW_IC_CON_10BITADDR_MASTER;
223 /*
224 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
225 * mode has to be enabled via bit 12 of IC_TAR register.
226 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
227 * detected from registers.
228 */
229 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
230 }
231
232 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
233 ic_con);
234
235 /*
236 * Set the slave (target) address and enable 10-bit addressing mode
237 * if applicable.
238 */
239 regmap_write(dev->map, DW_IC_TAR,
240 msgs[dev->msg_write_idx].addr | ic_tar);
241
242 /* Enforce disabled interrupts (due to HW issues) */
243 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
244
245 /* Enable the adapter */
246 __i2c_dw_enable(dev);
247
248 /* Dummy read to avoid the register getting stuck on Bay Trail */
249 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
250
251 /* Clear and enable interrupts */
252 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
253 regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
254}
255
256static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
257{
258 u32 val;
259 int ret;
260
261 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
262 !(val & DW_IC_INTR_STOP_DET),
263 1100, 20000);
264 if (ret)
265 dev_err(dev->dev, "i2c timeout error %d\n", ret);
266
267 return ret;
268}
269
270static int i2c_dw_status(struct dw_i2c_dev *dev)
271{
272 int status;
273
274 status = i2c_dw_wait_bus_not_busy(dev);
275 if (status)
276 return status;
277
278 return i2c_dw_check_stopbit(dev);
279}
280
281/*
282 * Initiate and continue master read/write transaction with polling
283 * based transfer routine afterward write messages into the Tx buffer.
284 */
285static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
286{
287 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
288 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
289 int cmd = 0, status;
290 u8 *tx_buf;
291 unsigned int val;
292
293 /*
294 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
295 * it is mandatory to set the right value in specific register
296 * (offset:0x474) as per the hardware IP specification.
297 */
298 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
299
300 dev->msgs = msgs;
301 dev->msgs_num = num_msgs;
302 i2c_dw_xfer_init(dev);
303 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
304
305 /* Initiate messages read/write transaction */
306 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
307 tx_buf = msgs[msg_wrt_idx].buf;
308 buf_len = msgs[msg_wrt_idx].len;
309
310 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
311 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
312 /*
313 * Initiate the i2c read/write transaction of buffer length,
314 * and poll for bus busy status. For the last message transfer,
315 * update the command with stopbit enable.
316 */
317 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
318 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
319 cmd |= BIT(9);
320
321 if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
322 /* Due to hardware bug, need to write the same command twice. */
323 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
324 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
325 if (cmd) {
326 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
327 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
328 /*
329 * Need to check the stop bit. However, it cannot be
330 * detected from the registers so we check it always
331 * when read/write the last byte.
332 */
333 status = i2c_dw_status(dev);
334 if (status)
335 return status;
336
337 for (data_idx = 0; data_idx < buf_len; data_idx++) {
338 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
339 tx_buf[data_idx] = val;
340 }
341 status = i2c_dw_check_stopbit(dev);
342 if (status)
343 return status;
344 }
345 } else {
346 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
347 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
348 }
349 }
350 status = i2c_dw_check_stopbit(dev);
351 if (status)
352 return status;
353 }
354
355 return 0;
356}
357
358static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev)
359{
360 u32 val;
361
362 return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
363 val & DW_IC_INTR_TX_EMPTY,
364 100, 1000);
365}
366
367static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev)
368{
369 u32 val;
370
371 return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
372 val & DW_IC_INTR_RX_FULL,
373 100, 1000);
374}
375
376static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
377 int num_msgs)
378{
379 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
380 int msg_idx, buf_len, data_idx, ret;
381 unsigned int val, stop = 0;
382 u8 *buf;
383
384 dev->msgs = msgs;
385 dev->msgs_num = num_msgs;
386 i2c_dw_xfer_init(dev);
387 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
388
389 for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) {
390 buf = msgs[msg_idx].buf;
391 buf_len = msgs[msg_idx].len;
392
393 for (data_idx = 0; data_idx < buf_len; data_idx++) {
394 if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1)
395 stop |= BIT(9);
396
397 if (msgs[msg_idx].flags & I2C_M_RD) {
398 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop);
399
400 ret = i2c_dw_poll_rx_full(dev);
401 if (ret)
402 return ret;
403
404 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
405 buf[data_idx] = val;
406 } else {
407 ret = i2c_dw_poll_tx_empty(dev);
408 if (ret)
409 return ret;
410
411 regmap_write(dev->map, DW_IC_DATA_CMD,
412 buf[data_idx] | stop);
413 }
414 }
415 }
416
417 return num_msgs;
418}
419
420/*
421 * Initiate (and continue) low level master read/write transaction.
422 * This function is only called from i2c_dw_isr, and pumping i2c_msg
423 * messages into the tx buffer. Even if the size of i2c_msg data is
424 * longer than the size of the tx buffer, it handles everything.
425 */
426static void
427i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
428{
429 struct i2c_msg *msgs = dev->msgs;
430 u32 intr_mask;
431 int tx_limit, rx_limit;
432 u32 addr = msgs[dev->msg_write_idx].addr;
433 u32 buf_len = dev->tx_buf_len;
434 u8 *buf = dev->tx_buf;
435 bool need_restart = false;
436 unsigned int flr;
437
438 intr_mask = DW_IC_INTR_MASTER_MASK;
439
440 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
441 u32 flags = msgs[dev->msg_write_idx].flags;
442
443 /*
444 * If target address has changed, we need to
445 * reprogram the target address in the I2C
446 * adapter when we are done with this transfer.
447 */
448 if (msgs[dev->msg_write_idx].addr != addr) {
449 dev_err(dev->dev,
450 "%s: invalid target address\n", __func__);
451 dev->msg_err = -EINVAL;
452 break;
453 }
454
455 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
456 /* new i2c_msg */
457 buf = msgs[dev->msg_write_idx].buf;
458 buf_len = msgs[dev->msg_write_idx].len;
459
460 /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
461 * IC_RESTART_EN are set, we must manually
462 * set restart bit between messages.
463 */
464 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
465 (dev->msg_write_idx > 0))
466 need_restart = true;
467 }
468
469 regmap_read(dev->map, DW_IC_TXFLR, &flr);
470 tx_limit = dev->tx_fifo_depth - flr;
471
472 regmap_read(dev->map, DW_IC_RXFLR, &flr);
473 rx_limit = dev->rx_fifo_depth - flr;
474
475 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
476 u32 cmd = 0;
477
478 /*
479 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
480 * manually set the stop bit. However, it cannot be
481 * detected from the registers so we set it always
482 * when writing/reading the last byte.
483 */
484
485 /*
486 * i2c-core always sets the buffer length of
487 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
488 * be adjusted when receiving the first byte.
489 * Thus we can't stop the transaction here.
490 */
491 if (dev->msg_write_idx == dev->msgs_num - 1 &&
492 buf_len == 1 && !(flags & I2C_M_RECV_LEN))
493 cmd |= BIT(9);
494
495 if (need_restart) {
496 cmd |= BIT(10);
497 need_restart = false;
498 }
499
500 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
501
502 /* Avoid rx buffer overrun */
503 if (dev->rx_outstanding >= dev->rx_fifo_depth)
504 break;
505
506 regmap_write(dev->map, DW_IC_DATA_CMD,
507 cmd | 0x100);
508 rx_limit--;
509 dev->rx_outstanding++;
510 } else {
511 regmap_write(dev->map, DW_IC_DATA_CMD,
512 cmd | *buf++);
513 }
514 tx_limit--; buf_len--;
515 }
516
517 dev->tx_buf = buf;
518 dev->tx_buf_len = buf_len;
519
520 /*
521 * Because we don't know the buffer length in the
522 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
523 * transaction here. Also disable the TX_EMPTY IRQ
524 * while waiting for the data length byte to avoid the
525 * bogus interrupts flood.
526 */
527 if (flags & I2C_M_RECV_LEN) {
528 dev->status |= STATUS_WRITE_IN_PROGRESS;
529 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
530 break;
531 } else if (buf_len > 0) {
532 /* more bytes to be written */
533 dev->status |= STATUS_WRITE_IN_PROGRESS;
534 break;
535 } else
536 dev->status &= ~STATUS_WRITE_IN_PROGRESS;
537 }
538
539 /*
540 * If i2c_msg index search is completed, we don't need TX_EMPTY
541 * interrupt any more.
542 */
543 if (dev->msg_write_idx == dev->msgs_num)
544 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
545
546 if (dev->msg_err)
547 intr_mask = 0;
548
549 regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask);
550}
551
552static u8
553i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
554{
555 struct i2c_msg *msgs = dev->msgs;
556 u32 flags = msgs[dev->msg_read_idx].flags;
557
558 /*
559 * Adjust the buffer length and mask the flag
560 * after receiving the first byte.
561 */
562 len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
563 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
564 msgs[dev->msg_read_idx].len = len;
565 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
566
567 /*
568 * Received buffer length, re-enable TX_EMPTY interrupt
569 * to resume the SMBUS transaction.
570 */
571 regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
572 DW_IC_INTR_TX_EMPTY);
573
574 return len;
575}
576
577static void
578i2c_dw_read(struct dw_i2c_dev *dev)
579{
580 struct i2c_msg *msgs = dev->msgs;
581 unsigned int rx_valid;
582
583 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
584 unsigned int tmp;
585 u32 len;
586 u8 *buf;
587
588 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
589 continue;
590
591 if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
592 len = msgs[dev->msg_read_idx].len;
593 buf = msgs[dev->msg_read_idx].buf;
594 } else {
595 len = dev->rx_buf_len;
596 buf = dev->rx_buf;
597 }
598
599 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
600
601 for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
602 u32 flags = msgs[dev->msg_read_idx].flags;
603
604 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
605 tmp &= DW_IC_DATA_CMD_DAT;
606 /* Ensure length byte is a valid value */
607 if (flags & I2C_M_RECV_LEN) {
608 /*
609 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
610 * detected from the registers, the controller can be
611 * disabled if the STOP bit is set. But it is only set
612 * after receiving block data response length in
613 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
614 * another byte with STOP bit set when the block data
615 * response length is invalid to complete the transaction.
616 */
617 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
618 tmp = 1;
619
620 len = i2c_dw_recv_len(dev, tmp);
621 }
622 *buf++ = tmp;
623 dev->rx_outstanding--;
624 }
625
626 if (len > 0) {
627 dev->status |= STATUS_READ_IN_PROGRESS;
628 dev->rx_buf_len = len;
629 dev->rx_buf = buf;
630 return;
631 } else
632 dev->status &= ~STATUS_READ_IN_PROGRESS;
633 }
634}
635
636/*
637 * Prepare controller for a transaction and call i2c_dw_xfer_msg.
638 */
639static int
640i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
641{
642 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
643 int ret;
644
645 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
646
647 pm_runtime_get_sync(dev->dev);
648
649 /*
650 * Initiate I2C message transfer when polling mode is enabled,
651 * As it is polling based transfer mechanism, which does not support
652 * interrupt based functionalities of existing DesignWare driver.
653 */
654 switch (dev->flags & MODEL_MASK) {
655 case MODEL_AMD_NAVI_GPU:
656 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
657 goto done_nolock;
658 case MODEL_WANGXUN_SP:
659 ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num);
660 goto done_nolock;
661 default:
662 break;
663 }
664
665 reinit_completion(&dev->cmd_complete);
666 dev->msgs = msgs;
667 dev->msgs_num = num;
668 dev->cmd_err = 0;
669 dev->msg_write_idx = 0;
670 dev->msg_read_idx = 0;
671 dev->msg_err = 0;
672 dev->status = 0;
673 dev->abort_source = 0;
674 dev->rx_outstanding = 0;
675
676 ret = i2c_dw_acquire_lock(dev);
677 if (ret)
678 goto done_nolock;
679
680 ret = i2c_dw_wait_bus_not_busy(dev);
681 if (ret < 0)
682 goto done;
683
684 /* Start the transfers */
685 i2c_dw_xfer_init(dev);
686
687 /* Wait for tx to complete */
688 if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
689 dev_err(dev->dev, "controller timed out\n");
690 /* i2c_dw_init implicitly disables the adapter */
691 i2c_recover_bus(&dev->adapter);
692 i2c_dw_init_master(dev);
693 ret = -ETIMEDOUT;
694 goto done;
695 }
696
697 /*
698 * We must disable the adapter before returning and signaling the end
699 * of the current transfer. Otherwise the hardware might continue
700 * generating interrupts which in turn causes a race condition with
701 * the following transfer. Needs some more investigation if the
702 * additional interrupts are a hardware bug or this driver doesn't
703 * handle them correctly yet.
704 */
705 __i2c_dw_disable_nowait(dev);
706
707 if (dev->msg_err) {
708 ret = dev->msg_err;
709 goto done;
710 }
711
712 /* No error */
713 if (likely(!dev->cmd_err && !dev->status)) {
714 ret = num;
715 goto done;
716 }
717
718 /* We have an error */
719 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
720 ret = i2c_dw_handle_tx_abort(dev);
721 goto done;
722 }
723
724 if (dev->status)
725 dev_err(dev->dev,
726 "transfer terminated early - interrupt latency too high?\n");
727
728 ret = -EIO;
729
730done:
731 i2c_dw_release_lock(dev);
732
733done_nolock:
734 pm_runtime_mark_last_busy(dev->dev);
735 pm_runtime_put_autosuspend(dev->dev);
736
737 return ret;
738}
739
740static const struct i2c_algorithm i2c_dw_algo = {
741 .master_xfer = i2c_dw_xfer,
742 .functionality = i2c_dw_func,
743};
744
745static const struct i2c_adapter_quirks i2c_dw_quirks = {
746 .flags = I2C_AQ_NO_ZERO_LEN,
747};
748
749static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
750{
751 unsigned int stat, dummy;
752
753 /*
754 * The IC_INTR_STAT register just indicates "enabled" interrupts.
755 * The unmasked raw version of interrupt status bits is available
756 * in the IC_RAW_INTR_STAT register.
757 *
758 * That is,
759 * stat = readl(IC_INTR_STAT);
760 * equals to,
761 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
762 *
763 * The raw version might be useful for debugging purposes.
764 */
765 regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
766
767 /*
768 * Do not use the IC_CLR_INTR register to clear interrupts, or
769 * you'll miss some interrupts, triggered during the period from
770 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
771 *
772 * Instead, use the separately-prepared IC_CLR_* registers.
773 */
774 if (stat & DW_IC_INTR_RX_UNDER)
775 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
776 if (stat & DW_IC_INTR_RX_OVER)
777 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
778 if (stat & DW_IC_INTR_TX_OVER)
779 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
780 if (stat & DW_IC_INTR_RD_REQ)
781 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
782 if (stat & DW_IC_INTR_TX_ABRT) {
783 /*
784 * The IC_TX_ABRT_SOURCE register is cleared whenever
785 * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
786 */
787 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
788 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
789 }
790 if (stat & DW_IC_INTR_RX_DONE)
791 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
792 if (stat & DW_IC_INTR_ACTIVITY)
793 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
794 if ((stat & DW_IC_INTR_STOP_DET) &&
795 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
796 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
797 if (stat & DW_IC_INTR_START_DET)
798 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
799 if (stat & DW_IC_INTR_GEN_CALL)
800 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
801
802 return stat;
803}
804
805/*
806 * Interrupt service routine. This gets called whenever an I2C master interrupt
807 * occurs.
808 */
809static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
810{
811 struct dw_i2c_dev *dev = dev_id;
812 unsigned int stat, enabled;
813
814 regmap_read(dev->map, DW_IC_ENABLE, &enabled);
815 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
816 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
817 return IRQ_NONE;
818 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
819 return IRQ_NONE;
820 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
821
822 stat = i2c_dw_read_clear_intrbits(dev);
823
824 if (!(dev->status & STATUS_ACTIVE)) {
825 /*
826 * Unexpected interrupt in driver point of view. State
827 * variables are either unset or stale so acknowledge and
828 * disable interrupts for suppressing further interrupts if
829 * interrupt really came from this HW (E.g. firmware has left
830 * the HW active).
831 */
832 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
833 return IRQ_HANDLED;
834 }
835
836 if (stat & DW_IC_INTR_TX_ABRT) {
837 dev->cmd_err |= DW_IC_ERR_TX_ABRT;
838 dev->status &= ~STATUS_MASK;
839 dev->rx_outstanding = 0;
840
841 /*
842 * Anytime TX_ABRT is set, the contents of the tx/rx
843 * buffers are flushed. Make sure to skip them.
844 */
845 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
846 goto tx_aborted;
847 }
848
849 if (stat & DW_IC_INTR_RX_FULL)
850 i2c_dw_read(dev);
851
852 if (stat & DW_IC_INTR_TX_EMPTY)
853 i2c_dw_xfer_msg(dev);
854
855 /*
856 * No need to modify or disable the interrupt mask here.
857 * i2c_dw_xfer_msg() will take care of it according to
858 * the current transmit status.
859 */
860
861tx_aborted:
862 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
863 (dev->rx_outstanding == 0))
864 complete(&dev->cmd_complete);
865 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
866 /* Workaround to trigger pending interrupt */
867 regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
868 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
869 regmap_write(dev->map, DW_IC_INTR_MASK, stat);
870 }
871
872 return IRQ_HANDLED;
873}
874
875void i2c_dw_configure_master(struct dw_i2c_dev *dev)
876{
877 struct i2c_timings *t = &dev->timings;
878
879 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
880
881 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
882 DW_IC_CON_RESTART_EN;
883
884 dev->mode = DW_IC_MASTER;
885
886 switch (t->bus_freq_hz) {
887 case I2C_MAX_STANDARD_MODE_FREQ:
888 dev->master_cfg |= DW_IC_CON_SPEED_STD;
889 break;
890 case I2C_MAX_HIGH_SPEED_MODE_FREQ:
891 dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
892 break;
893 default:
894 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
895 }
896}
897EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
898
899static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
900{
901 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
902
903 i2c_dw_disable(dev);
904 reset_control_assert(dev->rst);
905 i2c_dw_prepare_clk(dev, false);
906}
907
908static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
909{
910 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
911
912 i2c_dw_prepare_clk(dev, true);
913 reset_control_deassert(dev->rst);
914 i2c_dw_init_master(dev);
915}
916
917static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
918{
919 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
920 struct i2c_adapter *adap = &dev->adapter;
921 struct gpio_desc *gpio;
922
923 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
924 if (IS_ERR_OR_NULL(gpio))
925 return PTR_ERR_OR_ZERO(gpio);
926
927 rinfo->scl_gpiod = gpio;
928
929 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
930 if (IS_ERR(gpio))
931 return PTR_ERR(gpio);
932 rinfo->sda_gpiod = gpio;
933
934 rinfo->pinctrl = devm_pinctrl_get(dev->dev);
935 if (IS_ERR(rinfo->pinctrl)) {
936 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
937 return PTR_ERR(rinfo->pinctrl);
938
939 rinfo->pinctrl = NULL;
940 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
941 } else if (!rinfo->pinctrl) {
942 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
943 }
944
945 rinfo->recover_bus = i2c_generic_scl_recovery;
946 rinfo->prepare_recovery = i2c_dw_prepare_recovery;
947 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
948 adap->bus_recovery_info = rinfo;
949
950 dev_info(dev->dev, "running with gpio recovery mode! scl%s",
951 rinfo->sda_gpiod ? ",sda" : "");
952
953 return 0;
954}
955
956static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev)
957{
958 struct i2c_adapter *adap = &dev->adapter;
959 int ret;
960
961 pm_runtime_get_noresume(dev->dev);
962 ret = i2c_add_numbered_adapter(adap);
963 if (ret)
964 dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
965 pm_runtime_put_noidle(dev->dev);
966
967 return ret;
968}
969
970static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev)
971{
972 switch (dev->flags & MODEL_MASK) {
973 case MODEL_AMD_NAVI_GPU:
974 case MODEL_WANGXUN_SP:
975 return true;
976 default:
977 return false;
978 }
979}
980
981int i2c_dw_probe_master(struct dw_i2c_dev *dev)
982{
983 struct i2c_adapter *adap = &dev->adapter;
984 unsigned long irq_flags;
985 unsigned int ic_con;
986 int ret;
987
988 init_completion(&dev->cmd_complete);
989
990 dev->init = i2c_dw_init_master;
991 dev->disable = i2c_dw_disable;
992
993 ret = i2c_dw_init_regmap(dev);
994 if (ret)
995 return ret;
996
997 ret = i2c_dw_set_timings_master(dev);
998 if (ret)
999 return ret;
1000
1001 ret = i2c_dw_set_fifo_size(dev);
1002 if (ret)
1003 return ret;
1004
1005 /* Lock the bus for accessing DW_IC_CON */
1006 ret = i2c_dw_acquire_lock(dev);
1007 if (ret)
1008 return ret;
1009
1010 /*
1011 * On AMD platforms BIOS advertises the bus clear feature
1012 * and enables the SCL/SDA stuck low. SMU FW does the
1013 * bus recovery process. Driver should not ignore this BIOS
1014 * advertisement of bus clear feature.
1015 */
1016 ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1017 i2c_dw_release_lock(dev);
1018 if (ret)
1019 return ret;
1020
1021 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1022 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1023
1024 ret = dev->init(dev);
1025 if (ret)
1026 return ret;
1027
1028 snprintf(adap->name, sizeof(adap->name),
1029 "Synopsys DesignWare I2C adapter");
1030 adap->retries = 3;
1031 adap->algo = &i2c_dw_algo;
1032 adap->quirks = &i2c_dw_quirks;
1033 adap->dev.parent = dev->dev;
1034 i2c_set_adapdata(adap, dev);
1035
1036 if (i2c_dw_is_model_poll(dev))
1037 return i2c_dw_poll_adap_quirk(dev);
1038
1039 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1040 irq_flags = IRQF_NO_SUSPEND;
1041 } else {
1042 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1043 }
1044
1045 ret = i2c_dw_acquire_lock(dev);
1046 if (ret)
1047 return ret;
1048
1049 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
1050 i2c_dw_release_lock(dev);
1051
1052 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
1053 dev_name(dev->dev), dev);
1054 if (ret) {
1055 dev_err(dev->dev, "failure requesting irq %i: %d\n",
1056 dev->irq, ret);
1057 return ret;
1058 }
1059
1060 ret = i2c_dw_init_recovery_info(dev);
1061 if (ret)
1062 return ret;
1063
1064 /*
1065 * Increment PM usage count during adapter registration in order to
1066 * avoid possible spurious runtime suspend when adapter device is
1067 * registered to the device core and immediate resume in case bus has
1068 * registered I2C slaves that do I2C transfers in their probe.
1069 */
1070 pm_runtime_get_noresume(dev->dev);
1071 ret = i2c_add_numbered_adapter(adap);
1072 if (ret)
1073 dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1074 pm_runtime_put_noidle(dev->dev);
1075
1076 return ret;
1077}
1078EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1079
1080MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1081MODULE_LICENSE("GPL");