Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMA support for Internal DMAC with SDHI SD/SDIO controller
4 *
5 * Copyright (C) 2016-19 Renesas Electronics Corporation
6 * Copyright (C) 2016-17 Horms Solutions, Simon Horman
7 * Copyright (C) 2018-19 Sang Engineering, Wolfram Sang
8 */
9
10#include <linux/bitops.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/io-64-nonatomic-hi-lo.h>
14#include <linux/mfd/tmio.h>
15#include <linux/mmc/host.h>
16#include <linux/mod_devicetable.h>
17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/pagemap.h>
20#include <linux/scatterlist.h>
21#include <linux/sys_soc.h>
22
23#include "renesas_sdhi.h"
24#include "tmio_mmc.h"
25
26#define DM_CM_DTRAN_MODE 0x820
27#define DM_CM_DTRAN_CTRL 0x828
28#define DM_CM_RST 0x830
29#define DM_CM_INFO1 0x840
30#define DM_CM_INFO1_MASK 0x848
31#define DM_CM_INFO2 0x850
32#define DM_CM_INFO2_MASK 0x858
33#define DM_DTRAN_ADDR 0x880
34
35/* DM_CM_DTRAN_MODE */
36#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */
37#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */
38#define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4))
39#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address, 0 = Fixed */
40
41/* DM_CM_DTRAN_CTRL */
42#define DTRAN_CTRL_DM_START BIT(0)
43
44/* DM_CM_RST */
45#define RST_DTRANRST1 BIT(9)
46#define RST_DTRANRST0 BIT(8)
47#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
48
49/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
50#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
51#define INFO1_DTRANEND1 BIT(20)
52#define INFO1_DTRANEND1_OLD BIT(17)
53#define INFO1_DTRANEND0 BIT(16)
54
55/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
56#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
57#define INFO2_DTRANERR1 BIT(17)
58#define INFO2_DTRANERR0 BIT(16)
59
60enum renesas_sdhi_dma_cookie {
61 COOKIE_UNMAPPED,
62 COOKIE_PRE_MAPPED,
63 COOKIE_MAPPED,
64};
65
66/*
67 * Specification of this driver:
68 * - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma
69 * - Since this SDHI DMAC register set has 16 but 32-bit width, we
70 * need a custom accessor.
71 */
72
73static unsigned long global_flags;
74/*
75 * Workaround for avoiding to use RX DMAC by multiple channels.
76 * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
77 * RX DMAC simultaneously, sometimes hundreds of bytes data are not
78 * stored into the system memory even if the DMAC interrupt happened.
79 * So, this driver then uses one RX DMAC channel only.
80 */
81#define SDHI_INTERNAL_DMAC_RX_IN_USE 0
82
83/* Definitions for sampling clocks */
84static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
85 {
86 .clk_rate = 0,
87 .tap = 0x00000300,
88 .tap_hs400_4tap = 0x00000100,
89 },
90};
91
92static const struct renesas_sdhi_of_data of_data_rza2 = {
93 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
94 TMIO_MMC_HAVE_CBSY,
95 .tmio_ocr_mask = MMC_VDD_32_33,
96 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
97 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
98 .bus_shift = 2,
99 .scc_offset = 0 - 0x1000,
100 .taps = rcar_gen3_scc_taps,
101 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
102 /* DMAC can handle 32bit blk count but only 1 segment */
103 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
104 .max_segs = 1,
105};
106
107static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
108 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
109 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
110 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
111 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
112 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
113 .bus_shift = 2,
114 .scc_offset = 0x1000,
115 .taps = rcar_gen3_scc_taps,
116 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
117 /* DMAC can handle 32bit blk count but only 1 segment */
118 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
119 .max_segs = 1,
120 .sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
121};
122
123static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_sdh_fallback = {
124 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
125 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
126 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
127 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
128 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
129 .bus_shift = 2,
130 .scc_offset = 0x1000,
131 .taps = rcar_gen3_scc_taps,
132 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
133 /* DMAC can handle 32bit blk count but only 1 segment */
134 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
135 .max_segs = 1,
136};
137
138static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
139 { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
140 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
141 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
142 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
143};
144
145static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
146 { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
147 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
148 { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
149 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
150};
151
152static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
153 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
154 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
155 { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
156 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
157};
158
159static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
160 .hs400_disabled = true,
161 .hs400_4taps = true,
162};
163
164static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
165 .hs400_disabled = true,
166 .hs400_4taps = true,
167 .dma_one_rx_only = true,
168 .old_info1_layout = true,
169};
170
171static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
172 .hs400_4taps = true,
173 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
174 .manual_tap_correction = true,
175};
176
177static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
178 .hs400_disabled = true,
179};
180
181static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
182 .fixed_addr_mode = true,
183};
184
185static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
186 .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
187 .manual_tap_correction = true,
188};
189
190static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
191 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
192 .manual_tap_correction = true,
193};
194
195static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
196 .hs400_4taps = true,
197 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
198 .hs400_calib_table = r8a7796_es13_calib_table,
199 .manual_tap_correction = true,
200};
201
202static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
203 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
204 .hs400_calib_table = r8a77965_calib_table,
205 .manual_tap_correction = true,
206};
207
208static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
209 .hs400_calib_table = r8a77990_calib_table,
210 .manual_tap_correction = true,
211};
212
213/*
214 * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
215 * So, we want to treat them equally and only have a match for ES1.2 to enforce
216 * this if there ever will be a way to distinguish ES1.2.
217 */
218static const struct soc_device_attribute sdhi_quirks_match[] = {
219 { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
220 { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400_one_rx },
221 { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
222 { .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_4tap_nohs400_one_rx },
223 { .soc_id = "r8a7796", .revision = "ES1.[12]", .data = &sdhi_quirks_4tap_nohs400 },
224 { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
225 { .soc_id = "r8a77980", .revision = "ES1.*", .data = &sdhi_quirks_nohs400 },
226 { /* Sentinel. */ }
227};
228
229static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
230 .of_data = &of_data_rcar_gen3,
231 .quirks = &sdhi_quirks_bad_taps2367,
232};
233
234static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
235 .of_data = &of_data_rcar_gen3,
236 .quirks = &sdhi_quirks_bad_taps1357,
237};
238
239static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
240 .of_data = &of_data_rcar_gen3,
241 .quirks = &sdhi_quirks_r8a77965,
242};
243
244static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
245 .of_data = &of_data_rcar_gen3_no_sdh_fallback,
246 .quirks = &sdhi_quirks_nohs400,
247};
248
249static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
250 .of_data = &of_data_rcar_gen3,
251 .quirks = &sdhi_quirks_r8a77990,
252};
253
254static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
255 .of_data = &of_data_rcar_gen3,
256};
257
258static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_nohs400_compatible = {
259 .of_data = &of_data_rcar_gen3,
260 .quirks = &sdhi_quirks_nohs400,
261};
262
263static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
264 .of_data = &of_data_rza2,
265 .quirks = &sdhi_quirks_fixed_addr,
266};
267
268static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
269 { .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
270 { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
271 { .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
272 { .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
273 { .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
274 { .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
275 { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
276 { .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
277 { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
278 { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
279 {},
280};
281MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
282
283static void
284renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
285{
286 struct renesas_sdhi *priv = host_to_priv(host);
287 u32 dma_irqs = INFO1_DTRANEND0 |
288 (sdhi_has_quirk(priv, old_info1_layout) ?
289 INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
290
291 if (!host->chan_tx || !host->chan_rx)
292 return;
293
294 writel(enable ? ~dma_irqs : INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
295
296 if (priv->dma_priv.enable)
297 priv->dma_priv.enable(host, enable);
298}
299
300static void
301renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host)
302{
303 u64 val = RST_DTRANRST1 | RST_DTRANRST0;
304
305 renesas_sdhi_internal_dmac_enable_dma(host, false);
306
307 writel(RST_RESERVED_BITS & ~val, host->ctl + DM_CM_RST);
308 writel(RST_RESERVED_BITS | val, host->ctl + DM_CM_RST);
309
310 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
311
312 renesas_sdhi_internal_dmac_enable_dma(host, true);
313}
314
315static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host)
316{
317 struct renesas_sdhi *priv = host_to_priv(host);
318 struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
319
320 u32 dma_irqs = INFO1_DTRANEND0 |
321 (sdhi_has_quirk(priv, old_info1_layout) ?
322 INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
323 u32 status = readl(host->ctl + DM_CM_INFO1);
324
325 if (status & dma_irqs) {
326 writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1);
327 set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags);
328 if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags))
329 tasklet_schedule(&dma_priv->dma_complete);
330 }
331
332 return status & dma_irqs;
333}
334
335static void
336renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
337{
338 struct renesas_sdhi *priv = host_to_priv(host);
339 struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
340
341 set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags);
342 if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) ||
343 host->data->error)
344 tasklet_schedule(&dma_priv->dma_complete);
345}
346
347/*
348 * renesas_sdhi_internal_dmac_map() will be called with two different
349 * sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
350 * sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
351 * pointer in a mmc_data instead of host->sg_ptr.
352 */
353static void
354renesas_sdhi_internal_dmac_unmap(struct tmio_mmc_host *host,
355 struct mmc_data *data,
356 enum renesas_sdhi_dma_cookie cookie)
357{
358 bool unmap = cookie == COOKIE_UNMAPPED ? (data->host_cookie != cookie) :
359 (data->host_cookie == cookie);
360
361 if (unmap) {
362 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
363 mmc_get_dma_dir(data));
364 data->host_cookie = COOKIE_UNMAPPED;
365 }
366}
367
368static bool
369renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
370 struct mmc_data *data,
371 enum renesas_sdhi_dma_cookie cookie)
372{
373 if (data->host_cookie == COOKIE_PRE_MAPPED)
374 return true;
375
376 if (!dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
377 mmc_get_dma_dir(data)))
378 return false;
379
380 data->host_cookie = cookie;
381
382 /* This DMAC needs buffers to be 128-byte aligned */
383 if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
384 renesas_sdhi_internal_dmac_unmap(host, data, cookie);
385 return false;
386 }
387
388 return true;
389}
390
391static void
392renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
393 struct mmc_data *data)
394{
395 struct renesas_sdhi *priv = host_to_priv(host);
396 struct scatterlist *sg = host->sg_ptr;
397 u32 dtran_mode = DTRAN_MODE_BUS_WIDTH;
398
399 if (!sdhi_has_quirk(priv, fixed_addr_mode))
400 dtran_mode |= DTRAN_MODE_ADDR_MODE;
401
402 if (!renesas_sdhi_internal_dmac_map(host, data, COOKIE_MAPPED))
403 goto force_pio;
404
405 if (data->flags & MMC_DATA_READ) {
406 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
407 if (sdhi_has_quirk(priv, dma_one_rx_only) &&
408 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
409 goto force_pio_with_unmap;
410 } else {
411 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
412 }
413
414 priv->dma_priv.end_flags = 0;
415 renesas_sdhi_internal_dmac_enable_dma(host, true);
416
417 /* set dma parameters */
418 writel(dtran_mode, host->ctl + DM_CM_DTRAN_MODE);
419 writel(sg_dma_address(sg), host->ctl + DM_DTRAN_ADDR);
420
421 host->dma_on = true;
422
423 return;
424
425force_pio_with_unmap:
426 renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
427
428force_pio:
429 renesas_sdhi_internal_dmac_enable_dma(host, false);
430}
431
432static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
433{
434 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
435 struct renesas_sdhi *priv = host_to_priv(host);
436
437 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
438
439 if (!host->cmd->error) {
440 /* start the DMAC */
441 writel(DTRAN_CTRL_DM_START, host->ctl + DM_CM_DTRAN_CTRL);
442 } else {
443 /* on CMD errors, simulate DMA end immediately */
444 set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags);
445 if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags))
446 tasklet_schedule(&priv->dma_priv.dma_complete);
447 }
448}
449
450static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
451{
452 enum dma_data_direction dir;
453
454 if (!host->dma_on)
455 return false;
456
457 if (!host->data)
458 return false;
459
460 if (host->data->flags & MMC_DATA_READ)
461 dir = DMA_FROM_DEVICE;
462 else
463 dir = DMA_TO_DEVICE;
464
465 renesas_sdhi_internal_dmac_enable_dma(host, false);
466 renesas_sdhi_internal_dmac_unmap(host, host->data, COOKIE_MAPPED);
467
468 if (dir == DMA_FROM_DEVICE)
469 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
470
471 host->dma_on = false;
472
473 return true;
474}
475
476static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
477{
478 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
479
480 spin_lock_irq(&host->lock);
481 if (!renesas_sdhi_internal_dmac_complete(host))
482 goto out;
483
484 tmio_mmc_do_data_irq(host);
485out:
486 spin_unlock_irq(&host->lock);
487}
488
489static void renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host *host)
490{
491 if (host->data)
492 renesas_sdhi_internal_dmac_complete(host);
493}
494
495static void renesas_sdhi_internal_dmac_post_req(struct mmc_host *mmc,
496 struct mmc_request *mrq,
497 int err)
498{
499 struct tmio_mmc_host *host = mmc_priv(mmc);
500 struct mmc_data *data = mrq->data;
501
502 if (!data)
503 return;
504
505 renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
506}
507
508static void renesas_sdhi_internal_dmac_pre_req(struct mmc_host *mmc,
509 struct mmc_request *mrq)
510{
511 struct tmio_mmc_host *host = mmc_priv(mmc);
512 struct mmc_data *data = mrq->data;
513
514 if (!data)
515 return;
516
517 data->host_cookie = COOKIE_UNMAPPED;
518 renesas_sdhi_internal_dmac_map(host, data, COOKIE_PRE_MAPPED);
519}
520
521static void
522renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
523 struct tmio_mmc_data *pdata)
524{
525 struct renesas_sdhi *priv = host_to_priv(host);
526
527 /* Disable DMAC interrupts initially */
528 writel(INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
529 writel(INFO2_MASK_CLEAR, host->ctl + DM_CM_INFO2_MASK);
530 writel(0, host->ctl + DM_CM_INFO1);
531 writel(0, host->ctl + DM_CM_INFO2);
532
533 /* Each value is set to non-zero to assume "enabling" each DMA */
534 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
535
536 tasklet_init(&priv->dma_priv.dma_complete,
537 renesas_sdhi_internal_dmac_complete_tasklet_fn,
538 (unsigned long)host);
539 tasklet_init(&host->dma_issue,
540 renesas_sdhi_internal_dmac_issue_tasklet_fn,
541 (unsigned long)host);
542
543 /* Add pre_req and post_req */
544 host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
545 host->ops.post_req = renesas_sdhi_internal_dmac_post_req;
546}
547
548static void
549renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host)
550{
551 /* Each value is set to zero to assume "disabling" each DMA */
552 host->chan_rx = host->chan_tx = NULL;
553}
554
555static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
556 .start = renesas_sdhi_internal_dmac_start_dma,
557 .enable = renesas_sdhi_internal_dmac_enable_dma,
558 .request = renesas_sdhi_internal_dmac_request_dma,
559 .release = renesas_sdhi_internal_dmac_release_dma,
560 .abort = renesas_sdhi_internal_dmac_abort_dma,
561 .dataend = renesas_sdhi_internal_dmac_dataend_dma,
562 .end = renesas_sdhi_internal_dmac_end_dma,
563 .dma_irq = renesas_sdhi_internal_dmac_dma_irq,
564};
565
566static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
567{
568 const struct soc_device_attribute *attr;
569 const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
570 const struct renesas_sdhi_quirks *quirks;
571 struct device *dev = &pdev->dev;
572
573 of_data_quirks = of_device_get_match_data(&pdev->dev);
574 quirks = of_data_quirks->quirks;
575
576 attr = soc_device_match(sdhi_quirks_match);
577 if (attr)
578 quirks = attr->data;
579
580 /* value is max of SD_SECCNT. Confirmed by HW engineers */
581 dma_set_max_seg_size(dev, 0xffffffff);
582
583 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
584 of_data_quirks->of_data, quirks);
585}
586
587static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
588 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
589 pm_runtime_force_resume)
590 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
591 tmio_mmc_host_runtime_resume,
592 NULL)
593};
594
595static struct platform_driver renesas_internal_dmac_sdhi_driver = {
596 .driver = {
597 .name = "renesas_sdhi_internal_dmac",
598 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
599 .pm = &renesas_sdhi_internal_dmac_dev_pm_ops,
600 .of_match_table = renesas_sdhi_internal_dmac_of_match,
601 },
602 .probe = renesas_sdhi_internal_dmac_probe,
603 .remove = renesas_sdhi_remove,
604};
605
606module_platform_driver(renesas_internal_dmac_sdhi_driver);
607
608MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC");
609MODULE_AUTHOR("Yoshihiro Shimoda");
610MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMA support for Internal DMAC with SDHI SD/SDIO controller
4 *
5 * Copyright (C) 2016-19 Renesas Electronics Corporation
6 * Copyright (C) 2016-17 Horms Solutions, Simon Horman
7 * Copyright (C) 2018-19 Sang Engineering, Wolfram Sang
8 */
9
10#include <linux/bitops.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/io-64-nonatomic-hi-lo.h>
14#include <linux/mmc/host.h>
15#include <linux/mod_devicetable.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/pagemap.h>
19#include <linux/platform_data/tmio.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22#include <linux/scatterlist.h>
23#include <linux/sys_soc.h>
24
25#include "renesas_sdhi.h"
26#include "tmio_mmc.h"
27
28#define DM_CM_DTRAN_MODE 0x820
29#define DM_CM_DTRAN_CTRL 0x828
30#define DM_CM_RST 0x830
31#define DM_CM_INFO1 0x840
32#define DM_CM_INFO1_MASK 0x848
33#define DM_CM_INFO2 0x850
34#define DM_CM_INFO2_MASK 0x858
35#define DM_DTRAN_ADDR 0x880
36
37/* DM_CM_DTRAN_MODE */
38#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */
39#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */
40#define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4))
41#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address, 0 = Fixed */
42
43/* DM_CM_DTRAN_CTRL */
44#define DTRAN_CTRL_DM_START BIT(0)
45
46/* DM_CM_RST */
47#define RST_DTRANRST1 BIT(9)
48#define RST_DTRANRST0 BIT(8)
49#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
50
51/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
52#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
53#define INFO1_DTRANEND1 BIT(20)
54#define INFO1_DTRANEND1_OLD BIT(17)
55#define INFO1_DTRANEND0 BIT(16)
56
57/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
58#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
59#define INFO2_DTRANERR1 BIT(17)
60#define INFO2_DTRANERR0 BIT(16)
61
62enum renesas_sdhi_dma_cookie {
63 COOKIE_UNMAPPED,
64 COOKIE_PRE_MAPPED,
65 COOKIE_MAPPED,
66};
67
68/*
69 * Specification of this driver:
70 * - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma
71 * - Since this SDHI DMAC register set has 16 but 32-bit width, we
72 * need a custom accessor.
73 */
74
75static unsigned long global_flags;
76/*
77 * Workaround for avoiding to use RX DMAC by multiple channels. On R-Car M3-W
78 * ES1.0, when multiple SDHI channels use RX DMAC simultaneously, sometimes
79 * hundreds of data bytes are not stored into the system memory even if the
80 * DMAC interrupt happened. So, this driver then uses one RX DMAC channel only.
81 */
82#define SDHI_INTERNAL_DMAC_RX_IN_USE 0
83
84/* Definitions for sampling clocks */
85static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
86 {
87 .clk_rate = 0,
88 .tap = 0x00000300,
89 .tap_hs400_4tap = 0x00000100,
90 },
91};
92
93static const struct renesas_sdhi_of_data of_data_rza2 = {
94 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
95 TMIO_MMC_HAVE_CBSY,
96 .tmio_ocr_mask = MMC_VDD_32_33,
97 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
98 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
99 .bus_shift = 2,
100 .scc_offset = 0 - 0x1000,
101 .taps = rcar_gen3_scc_taps,
102 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
103 /* DMAC can handle 32bit blk count but only 1 segment */
104 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
105 .max_segs = 1,
106};
107
108static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
109 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
110 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
111 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
112 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
113 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
114 .bus_shift = 2,
115 .scc_offset = 0x1000,
116 .taps = rcar_gen3_scc_taps,
117 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
118 /* DMAC can handle 32bit blk count but only 1 segment */
119 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
120 .max_segs = 1,
121 .sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
122};
123
124static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_sdh_fallback = {
125 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
126 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
127 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
128 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
129 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
130 .bus_shift = 2,
131 .scc_offset = 0x1000,
132 .taps = rcar_gen3_scc_taps,
133 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
134 /* DMAC can handle 32bit blk count but only 1 segment */
135 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
136 .max_segs = 1,
137};
138
139static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
140 { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
141 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
142 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
143 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
144};
145
146static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
147 { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
148 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
149 { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
150 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
151};
152
153static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
154 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
155 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
156 { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
157 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
158};
159
160static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
161 .hs400_disabled = true,
162 .hs400_4taps = true,
163};
164
165static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
166 .hs400_disabled = true,
167 .hs400_4taps = true,
168 .dma_one_rx_only = true,
169 .old_info1_layout = true,
170};
171
172static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
173 .hs400_4taps = true,
174 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
175 .manual_tap_correction = true,
176};
177
178static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
179 .hs400_disabled = true,
180};
181
182static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
183 .fixed_addr_mode = true,
184};
185
186static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
187 .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
188 .manual_tap_correction = true,
189};
190
191static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
192 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
193 .manual_tap_correction = true,
194};
195
196static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
197 .hs400_4taps = true,
198 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
199 .hs400_calib_table = r8a7796_es13_calib_table,
200 .manual_tap_correction = true,
201};
202
203static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
204 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
205 .hs400_calib_table = r8a77965_calib_table,
206 .manual_tap_correction = true,
207};
208
209static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
210 .hs400_calib_table = r8a77990_calib_table,
211 .manual_tap_correction = true,
212};
213
214static const struct renesas_sdhi_quirks sdhi_quirks_rzg2l = {
215 .fixed_addr_mode = true,
216 .hs400_disabled = true,
217};
218
219/*
220 * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
221 * So, we want to treat them equally and only have a match for ES1.2 to enforce
222 * this if there ever will be a way to distinguish ES1.2.
223 */
224static const struct soc_device_attribute sdhi_quirks_match[] = {
225 { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
226 { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
227 { .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_4tap_nohs400_one_rx },
228 { .soc_id = "r8a7796", .revision = "ES1.[12]", .data = &sdhi_quirks_4tap_nohs400 },
229 { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
230 { .soc_id = "r8a77980", .revision = "ES1.*", .data = &sdhi_quirks_nohs400 },
231 { /* Sentinel. */ }
232};
233
234static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
235 .of_data = &of_data_rcar_gen3,
236 .quirks = &sdhi_quirks_bad_taps2367,
237};
238
239static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
240 .of_data = &of_data_rcar_gen3,
241 .quirks = &sdhi_quirks_bad_taps1357,
242};
243
244static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
245 .of_data = &of_data_rcar_gen3,
246 .quirks = &sdhi_quirks_r8a77965,
247};
248
249static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
250 .of_data = &of_data_rcar_gen3_no_sdh_fallback,
251 .quirks = &sdhi_quirks_nohs400,
252};
253
254static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
255 .of_data = &of_data_rcar_gen3,
256 .quirks = &sdhi_quirks_r8a77990,
257};
258
259static const struct renesas_sdhi_of_data_with_quirks of_rzg2l_compatible = {
260 .of_data = &of_data_rcar_gen3,
261 .quirks = &sdhi_quirks_rzg2l,
262};
263
264static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
265 .of_data = &of_data_rcar_gen3,
266};
267
268static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_nohs400_compatible = {
269 .of_data = &of_data_rcar_gen3,
270 .quirks = &sdhi_quirks_nohs400,
271};
272
273static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
274 .of_data = &of_data_rza2,
275 .quirks = &sdhi_quirks_fixed_addr,
276};
277
278static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
279 { .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
280 { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
281 { .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
282 { .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
283 { .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
284 { .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
285 { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
286 { .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
287 { .compatible = "renesas,sdhi-r9a09g011", .data = &of_rzg2l_compatible, },
288 { .compatible = "renesas,sdhi-r9a09g057", .data = &of_rzg2l_compatible, },
289 { .compatible = "renesas,rzg2l-sdhi", .data = &of_rzg2l_compatible, },
290 { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
291 { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
292 {},
293};
294MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
295
296static void
297renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
298{
299 struct renesas_sdhi *priv = host_to_priv(host);
300 u32 dma_irqs = INFO1_DTRANEND0 |
301 (sdhi_has_quirk(priv, old_info1_layout) ?
302 INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
303
304 if (!host->chan_tx || !host->chan_rx)
305 return;
306
307 writel(enable ? ~dma_irqs : INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
308
309 if (priv->dma_priv.enable)
310 priv->dma_priv.enable(host, enable);
311}
312
313static void
314renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host)
315{
316 u64 val = RST_DTRANRST1 | RST_DTRANRST0;
317
318 renesas_sdhi_internal_dmac_enable_dma(host, false);
319
320 writel(RST_RESERVED_BITS & ~val, host->ctl + DM_CM_RST);
321 writel(RST_RESERVED_BITS | val, host->ctl + DM_CM_RST);
322
323 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
324
325 renesas_sdhi_internal_dmac_enable_dma(host, true);
326}
327
328static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host)
329{
330 struct renesas_sdhi *priv = host_to_priv(host);
331 struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
332
333 u32 dma_irqs = INFO1_DTRANEND0 |
334 (sdhi_has_quirk(priv, old_info1_layout) ?
335 INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
336 u32 status = readl(host->ctl + DM_CM_INFO1);
337
338 if (status & dma_irqs) {
339 writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1);
340 set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags);
341 if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags))
342 queue_work(system_bh_wq, &dma_priv->dma_complete);
343 }
344
345 return status & dma_irqs;
346}
347
348static void
349renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
350{
351 struct renesas_sdhi *priv = host_to_priv(host);
352 struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
353
354 set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags);
355 if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) ||
356 host->data->error)
357 queue_work(system_bh_wq, &dma_priv->dma_complete);
358}
359
360/*
361 * renesas_sdhi_internal_dmac_map() will be called with two different
362 * sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
363 * sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
364 * pointer in a mmc_data instead of host->sg_ptr.
365 */
366static void
367renesas_sdhi_internal_dmac_unmap(struct tmio_mmc_host *host,
368 struct mmc_data *data,
369 enum renesas_sdhi_dma_cookie cookie)
370{
371 bool unmap = cookie == COOKIE_UNMAPPED ? (data->host_cookie != cookie) :
372 (data->host_cookie == cookie);
373
374 if (unmap) {
375 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
376 mmc_get_dma_dir(data));
377 data->host_cookie = COOKIE_UNMAPPED;
378 }
379}
380
381static bool
382renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
383 struct mmc_data *data,
384 enum renesas_sdhi_dma_cookie cookie)
385{
386 if (data->host_cookie == COOKIE_PRE_MAPPED)
387 return true;
388
389 if (!dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
390 mmc_get_dma_dir(data)))
391 return false;
392
393 data->host_cookie = cookie;
394
395 /* This DMAC needs buffers to be 128-byte aligned */
396 if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
397 renesas_sdhi_internal_dmac_unmap(host, data, cookie);
398 return false;
399 }
400
401 return true;
402}
403
404static void
405renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
406 struct mmc_data *data)
407{
408 struct renesas_sdhi *priv = host_to_priv(host);
409 struct scatterlist *sg = host->sg_ptr;
410 u32 dtran_mode = DTRAN_MODE_BUS_WIDTH;
411
412 if (!sdhi_has_quirk(priv, fixed_addr_mode))
413 dtran_mode |= DTRAN_MODE_ADDR_MODE;
414
415 if (!renesas_sdhi_internal_dmac_map(host, data, COOKIE_MAPPED))
416 goto force_pio;
417
418 if (data->flags & MMC_DATA_READ) {
419 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
420 if (sdhi_has_quirk(priv, dma_one_rx_only) &&
421 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
422 goto force_pio_with_unmap;
423 } else {
424 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
425 }
426
427 priv->dma_priv.end_flags = 0;
428 renesas_sdhi_internal_dmac_enable_dma(host, true);
429
430 /* set dma parameters */
431 writel(dtran_mode, host->ctl + DM_CM_DTRAN_MODE);
432 writel(sg_dma_address(sg), host->ctl + DM_DTRAN_ADDR);
433
434 host->dma_on = true;
435
436 return;
437
438force_pio_with_unmap:
439 renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
440
441force_pio:
442 renesas_sdhi_internal_dmac_enable_dma(host, false);
443}
444
445static void renesas_sdhi_internal_dmac_issue_work_fn(struct work_struct *work)
446{
447 struct tmio_mmc_host *host = from_work(host, work, dma_issue);
448 struct renesas_sdhi *priv = host_to_priv(host);
449
450 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
451
452 if (!host->cmd->error) {
453 /* start the DMAC */
454 writel(DTRAN_CTRL_DM_START, host->ctl + DM_CM_DTRAN_CTRL);
455 } else {
456 /* on CMD errors, simulate DMA end immediately */
457 set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags);
458 if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags))
459 queue_work(system_bh_wq, &priv->dma_priv.dma_complete);
460 }
461}
462
463static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
464{
465 enum dma_data_direction dir;
466
467 if (!host->dma_on)
468 return false;
469
470 if (!host->data)
471 return false;
472
473 if (host->data->flags & MMC_DATA_READ)
474 dir = DMA_FROM_DEVICE;
475 else
476 dir = DMA_TO_DEVICE;
477
478 renesas_sdhi_internal_dmac_enable_dma(host, false);
479 renesas_sdhi_internal_dmac_unmap(host, host->data, COOKIE_MAPPED);
480
481 if (dir == DMA_FROM_DEVICE)
482 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
483
484 host->dma_on = false;
485
486 return true;
487}
488
489static void renesas_sdhi_internal_dmac_complete_work_fn(struct work_struct *work)
490{
491 struct renesas_sdhi_dma *dma_priv = from_work(dma_priv, work, dma_complete);
492 struct renesas_sdhi *priv = container_of(dma_priv, typeof(*priv), dma_priv);
493 struct tmio_mmc_host *host = priv->host;
494
495 spin_lock_irq(&host->lock);
496 if (!renesas_sdhi_internal_dmac_complete(host))
497 goto out;
498
499 tmio_mmc_do_data_irq(host);
500out:
501 spin_unlock_irq(&host->lock);
502}
503
504static void renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host *host)
505{
506 if (host->data)
507 renesas_sdhi_internal_dmac_complete(host);
508}
509
510static void renesas_sdhi_internal_dmac_post_req(struct mmc_host *mmc,
511 struct mmc_request *mrq,
512 int err)
513{
514 struct tmio_mmc_host *host = mmc_priv(mmc);
515 struct mmc_data *data = mrq->data;
516
517 if (!data)
518 return;
519
520 renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
521}
522
523static void renesas_sdhi_internal_dmac_pre_req(struct mmc_host *mmc,
524 struct mmc_request *mrq)
525{
526 struct tmio_mmc_host *host = mmc_priv(mmc);
527 struct mmc_data *data = mrq->data;
528
529 if (!data)
530 return;
531
532 data->host_cookie = COOKIE_UNMAPPED;
533 renesas_sdhi_internal_dmac_map(host, data, COOKIE_PRE_MAPPED);
534}
535
536static void
537renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
538 struct tmio_mmc_data *pdata)
539{
540 struct renesas_sdhi *priv = host_to_priv(host);
541
542 /* Disable DMAC interrupts initially */
543 writel(INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
544 writel(INFO2_MASK_CLEAR, host->ctl + DM_CM_INFO2_MASK);
545 writel(0, host->ctl + DM_CM_INFO1);
546 writel(0, host->ctl + DM_CM_INFO2);
547
548 /* Each value is set to non-zero to assume "enabling" each DMA */
549 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
550
551 INIT_WORK(&priv->dma_priv.dma_complete,
552 renesas_sdhi_internal_dmac_complete_work_fn);
553 INIT_WORK(&host->dma_issue,
554 renesas_sdhi_internal_dmac_issue_work_fn);
555
556 /* Add pre_req and post_req */
557 host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
558 host->ops.post_req = renesas_sdhi_internal_dmac_post_req;
559}
560
561static void
562renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host)
563{
564 /* Each value is set to zero to assume "disabling" each DMA */
565 host->chan_rx = host->chan_tx = NULL;
566}
567
568static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
569 .start = renesas_sdhi_internal_dmac_start_dma,
570 .enable = renesas_sdhi_internal_dmac_enable_dma,
571 .request = renesas_sdhi_internal_dmac_request_dma,
572 .release = renesas_sdhi_internal_dmac_release_dma,
573 .abort = renesas_sdhi_internal_dmac_abort_dma,
574 .dataend = renesas_sdhi_internal_dmac_dataend_dma,
575 .end = renesas_sdhi_internal_dmac_end_dma,
576 .dma_irq = renesas_sdhi_internal_dmac_dma_irq,
577};
578
579static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
580{
581 const struct soc_device_attribute *attr;
582 const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
583 const struct renesas_sdhi_quirks *quirks;
584 struct device *dev = &pdev->dev;
585
586 of_data_quirks = of_device_get_match_data(&pdev->dev);
587 quirks = of_data_quirks->quirks;
588
589 attr = soc_device_match(sdhi_quirks_match);
590 if (attr)
591 quirks = attr->data;
592
593 /* value is max of SD_SECCNT. Confirmed by HW engineers */
594 dma_set_max_seg_size(dev, 0xffffffff);
595
596 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
597 of_data_quirks->of_data, quirks);
598}
599
600static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
601 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
602 pm_runtime_force_resume)
603 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
604 tmio_mmc_host_runtime_resume,
605 NULL)
606};
607
608static struct platform_driver renesas_internal_dmac_sdhi_driver = {
609 .driver = {
610 .name = "renesas_sdhi_internal_dmac",
611 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
612 .pm = &renesas_sdhi_internal_dmac_dev_pm_ops,
613 .of_match_table = renesas_sdhi_internal_dmac_of_match,
614 },
615 .probe = renesas_sdhi_internal_dmac_probe,
616 .remove = renesas_sdhi_remove,
617};
618
619module_platform_driver(renesas_internal_dmac_sdhi_driver);
620
621MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC");
622MODULE_AUTHOR("Yoshihiro Shimoda");
623MODULE_LICENSE("GPL v2");