Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Driver for Realtek PCI-Express card reader
3 *
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 *
6 * Author:
7 * Wei WANG <wei_wang@realsil.com.cn>
8 */
9
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/dma-mapping.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/idr.h>
18#include <linux/platform_device.h>
19#include <linux/mfd/core.h>
20#include <linux/rtsx_pci.h>
21#include <linux/mmc/card.h>
22#include <asm/unaligned.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25
26#include "rtsx_pcr.h"
27#include "rts5261.h"
28#include "rts5228.h"
29#include "rts5264.h"
30
31static bool msi_en = true;
32module_param(msi_en, bool, S_IRUGO | S_IWUSR);
33MODULE_PARM_DESC(msi_en, "Enable MSI");
34
35static DEFINE_IDR(rtsx_pci_idr);
36static DEFINE_SPINLOCK(rtsx_pci_lock);
37
38static struct mfd_cell rtsx_pcr_cells[] = {
39 [RTSX_SD_CARD] = {
40 .name = DRV_NAME_RTSX_PCI_SDMMC,
41 },
42};
43
44static const struct pci_device_id rtsx_pci_ids[] = {
45 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 { 0, }
60};
61
62MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
63
64static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
65{
66 rtsx_pci_write_register(pcr, MSGTXDATA0,
67 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
68 rtsx_pci_write_register(pcr, MSGTXDATA1,
69 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
70 rtsx_pci_write_register(pcr, MSGTXDATA2,
71 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
72 rtsx_pci_write_register(pcr, MSGTXDATA3,
73 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
74 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
75 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
76
77 return 0;
78}
79
80int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
81{
82 return rtsx_comm_set_ltr_latency(pcr, latency);
83}
84
85static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
86{
87 if (pcr->aspm_enabled == enable)
88 return;
89
90 if (pcr->aspm_mode == ASPM_MODE_CFG) {
91 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
92 PCI_EXP_LNKCTL_ASPMC,
93 enable ? pcr->aspm_en : 0);
94 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
95 if (pcr->aspm_en & 0x02)
96 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
97 FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
98 else
99 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
100 FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
101 }
102
103 if (!enable && (pcr->aspm_en & 0x02))
104 mdelay(10);
105
106 pcr->aspm_enabled = enable;
107}
108
109static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
110{
111 if (pcr->ops->set_aspm)
112 pcr->ops->set_aspm(pcr, false);
113 else
114 rtsx_comm_set_aspm(pcr, false);
115}
116
117int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
118{
119 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
120
121 return 0;
122}
123
124static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
125{
126 if (pcr->ops->set_l1off_cfg_sub_d0)
127 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
128}
129
130static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
131{
132 struct rtsx_cr_option *option = &pcr->option;
133
134 rtsx_disable_aspm(pcr);
135
136 /* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
137 msleep(1);
138
139 if (option->ltr_enabled)
140 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
141
142 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
143 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
144}
145
146static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
147{
148 rtsx_comm_pm_full_on(pcr);
149}
150
151void rtsx_pci_start_run(struct rtsx_pcr *pcr)
152{
153 /* If pci device removed, don't queue idle work any more */
154 if (pcr->remove_pci)
155 return;
156
157 if (pcr->state != PDEV_STAT_RUN) {
158 pcr->state = PDEV_STAT_RUN;
159 if (pcr->ops->enable_auto_blink)
160 pcr->ops->enable_auto_blink(pcr);
161 rtsx_pm_full_on(pcr);
162 }
163}
164EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
165
166int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
167{
168 int i;
169 u32 val = HAIMR_WRITE_START;
170
171 val |= (u32)(addr & 0x3FFF) << 16;
172 val |= (u32)mask << 8;
173 val |= (u32)data;
174
175 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
176
177 for (i = 0; i < MAX_RW_REG_CNT; i++) {
178 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
179 if ((val & HAIMR_TRANS_END) == 0) {
180 if (data != (u8)val)
181 return -EIO;
182 return 0;
183 }
184 }
185
186 return -ETIMEDOUT;
187}
188EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
189
190int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
191{
192 u32 val = HAIMR_READ_START;
193 int i;
194
195 val |= (u32)(addr & 0x3FFF) << 16;
196 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
197
198 for (i = 0; i < MAX_RW_REG_CNT; i++) {
199 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
200 if ((val & HAIMR_TRANS_END) == 0)
201 break;
202 }
203
204 if (i >= MAX_RW_REG_CNT)
205 return -ETIMEDOUT;
206
207 if (data)
208 *data = (u8)(val & 0xFF);
209
210 return 0;
211}
212EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
213
214int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
215{
216 int err, i, finished = 0;
217 u8 tmp;
218
219 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
220 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
221 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
222 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
223
224 for (i = 0; i < 100000; i++) {
225 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
226 if (err < 0)
227 return err;
228
229 if (!(tmp & 0x80)) {
230 finished = 1;
231 break;
232 }
233 }
234
235 if (!finished)
236 return -ETIMEDOUT;
237
238 return 0;
239}
240
241int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
242{
243 if (pcr->ops->write_phy)
244 return pcr->ops->write_phy(pcr, addr, val);
245
246 return __rtsx_pci_write_phy_register(pcr, addr, val);
247}
248EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
249
250int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
251{
252 int err, i, finished = 0;
253 u16 data;
254 u8 tmp, val1, val2;
255
256 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
257 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
258
259 for (i = 0; i < 100000; i++) {
260 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
261 if (err < 0)
262 return err;
263
264 if (!(tmp & 0x80)) {
265 finished = 1;
266 break;
267 }
268 }
269
270 if (!finished)
271 return -ETIMEDOUT;
272
273 rtsx_pci_read_register(pcr, PHYDATA0, &val1);
274 rtsx_pci_read_register(pcr, PHYDATA1, &val2);
275 data = val1 | (val2 << 8);
276
277 if (val)
278 *val = data;
279
280 return 0;
281}
282
283int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
284{
285 if (pcr->ops->read_phy)
286 return pcr->ops->read_phy(pcr, addr, val);
287
288 return __rtsx_pci_read_phy_register(pcr, addr, val);
289}
290EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
291
292void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
293{
294 if (pcr->ops->stop_cmd)
295 return pcr->ops->stop_cmd(pcr);
296
297 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
298 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
299
300 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
301 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
302}
303EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
304
305void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
306 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
307{
308 unsigned long flags;
309 u32 val = 0;
310 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
311
312 val |= (u32)(cmd_type & 0x03) << 30;
313 val |= (u32)(reg_addr & 0x3FFF) << 16;
314 val |= (u32)mask << 8;
315 val |= (u32)data;
316
317 spin_lock_irqsave(&pcr->lock, flags);
318 ptr += pcr->ci;
319 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
320 put_unaligned_le32(val, ptr);
321 ptr++;
322 pcr->ci++;
323 }
324 spin_unlock_irqrestore(&pcr->lock, flags);
325}
326EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
327
328void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
329{
330 u32 val = 1 << 31;
331
332 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
333
334 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
335 /* Hardware Auto Response */
336 val |= 0x40000000;
337 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
338}
339EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
340
341int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
342{
343 struct completion trans_done;
344 u32 val = 1 << 31;
345 long timeleft;
346 unsigned long flags;
347 int err = 0;
348
349 spin_lock_irqsave(&pcr->lock, flags);
350
351 /* set up data structures for the wakeup system */
352 pcr->done = &trans_done;
353 pcr->trans_result = TRANS_NOT_READY;
354 init_completion(&trans_done);
355
356 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
357
358 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
359 /* Hardware Auto Response */
360 val |= 0x40000000;
361 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
362
363 spin_unlock_irqrestore(&pcr->lock, flags);
364
365 /* Wait for TRANS_OK_INT */
366 timeleft = wait_for_completion_interruptible_timeout(
367 &trans_done, msecs_to_jiffies(timeout));
368 if (timeleft <= 0) {
369 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
370 err = -ETIMEDOUT;
371 goto finish_send_cmd;
372 }
373
374 spin_lock_irqsave(&pcr->lock, flags);
375 if (pcr->trans_result == TRANS_RESULT_FAIL)
376 err = -EINVAL;
377 else if (pcr->trans_result == TRANS_RESULT_OK)
378 err = 0;
379 else if (pcr->trans_result == TRANS_NO_DEVICE)
380 err = -ENODEV;
381 spin_unlock_irqrestore(&pcr->lock, flags);
382
383finish_send_cmd:
384 spin_lock_irqsave(&pcr->lock, flags);
385 pcr->done = NULL;
386 spin_unlock_irqrestore(&pcr->lock, flags);
387
388 if ((err < 0) && (err != -ENODEV))
389 rtsx_pci_stop_cmd(pcr);
390
391 if (pcr->finish_me)
392 complete(pcr->finish_me);
393
394 return err;
395}
396EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
397
398static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
399 dma_addr_t addr, unsigned int len, int end)
400{
401 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
402 u64 val;
403 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
404
405 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
406
407 if (end)
408 option |= RTSX_SG_END;
409
410 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
411 if (len > 0xFFFF)
412 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
413 | (((u64)len >> 16) << 6) | option;
414 else
415 val = ((u64)addr << 32) | ((u64)len << 16) | option;
416 } else {
417 val = ((u64)addr << 32) | ((u64)len << 12) | option;
418 }
419 put_unaligned_le64(val, ptr);
420 pcr->sgi++;
421}
422
423int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
424 int num_sg, bool read, int timeout)
425{
426 int err = 0, count;
427
428 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
429 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
430 if (count < 1)
431 return -EINVAL;
432 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
433
434 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
435
436 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
437
438 return err;
439}
440EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
441
442int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
443 int num_sg, bool read)
444{
445 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
446
447 if (pcr->remove_pci)
448 return -EINVAL;
449
450 if ((sglist == NULL) || (num_sg <= 0))
451 return -EINVAL;
452
453 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
454}
455EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
456
457void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
458 int num_sg, bool read)
459{
460 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
461
462 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
463}
464EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
465
466int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
467 int count, bool read, int timeout)
468{
469 struct completion trans_done;
470 struct scatterlist *sg;
471 dma_addr_t addr;
472 long timeleft;
473 unsigned long flags;
474 unsigned int len;
475 int i, err = 0;
476 u32 val;
477 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
478
479 if (pcr->remove_pci)
480 return -ENODEV;
481
482 if ((sglist == NULL) || (count < 1))
483 return -EINVAL;
484
485 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
486 pcr->sgi = 0;
487 for_each_sg(sglist, sg, count, i) {
488 addr = sg_dma_address(sg);
489 len = sg_dma_len(sg);
490 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
491 }
492
493 spin_lock_irqsave(&pcr->lock, flags);
494
495 pcr->done = &trans_done;
496 pcr->trans_result = TRANS_NOT_READY;
497 init_completion(&trans_done);
498 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
499 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
500
501 spin_unlock_irqrestore(&pcr->lock, flags);
502
503 timeleft = wait_for_completion_interruptible_timeout(
504 &trans_done, msecs_to_jiffies(timeout));
505 if (timeleft <= 0) {
506 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
507 err = -ETIMEDOUT;
508 goto out;
509 }
510
511 spin_lock_irqsave(&pcr->lock, flags);
512 if (pcr->trans_result == TRANS_RESULT_FAIL) {
513 err = -EILSEQ;
514 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
515 pcr->dma_error_count++;
516 }
517
518 else if (pcr->trans_result == TRANS_NO_DEVICE)
519 err = -ENODEV;
520 spin_unlock_irqrestore(&pcr->lock, flags);
521
522out:
523 spin_lock_irqsave(&pcr->lock, flags);
524 pcr->done = NULL;
525 spin_unlock_irqrestore(&pcr->lock, flags);
526
527 if ((err < 0) && (err != -ENODEV))
528 rtsx_pci_stop_cmd(pcr);
529
530 if (pcr->finish_me)
531 complete(pcr->finish_me);
532
533 return err;
534}
535EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
536
537int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
538{
539 int err;
540 int i, j;
541 u16 reg;
542 u8 *ptr;
543
544 if (buf_len > 512)
545 buf_len = 512;
546
547 ptr = buf;
548 reg = PPBUF_BASE2;
549 for (i = 0; i < buf_len / 256; i++) {
550 rtsx_pci_init_cmd(pcr);
551
552 for (j = 0; j < 256; j++)
553 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
554
555 err = rtsx_pci_send_cmd(pcr, 250);
556 if (err < 0)
557 return err;
558
559 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
560 ptr += 256;
561 }
562
563 if (buf_len % 256) {
564 rtsx_pci_init_cmd(pcr);
565
566 for (j = 0; j < buf_len % 256; j++)
567 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
568
569 err = rtsx_pci_send_cmd(pcr, 250);
570 if (err < 0)
571 return err;
572 }
573
574 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
575
576 return 0;
577}
578EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
579
580int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
581{
582 int err;
583 int i, j;
584 u16 reg;
585 u8 *ptr;
586
587 if (buf_len > 512)
588 buf_len = 512;
589
590 ptr = buf;
591 reg = PPBUF_BASE2;
592 for (i = 0; i < buf_len / 256; i++) {
593 rtsx_pci_init_cmd(pcr);
594
595 for (j = 0; j < 256; j++) {
596 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
597 reg++, 0xFF, *ptr);
598 ptr++;
599 }
600
601 err = rtsx_pci_send_cmd(pcr, 250);
602 if (err < 0)
603 return err;
604 }
605
606 if (buf_len % 256) {
607 rtsx_pci_init_cmd(pcr);
608
609 for (j = 0; j < buf_len % 256; j++) {
610 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
611 reg++, 0xFF, *ptr);
612 ptr++;
613 }
614
615 err = rtsx_pci_send_cmd(pcr, 250);
616 if (err < 0)
617 return err;
618 }
619
620 return 0;
621}
622EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
623
624static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
625{
626 rtsx_pci_init_cmd(pcr);
627
628 while (*tbl & 0xFFFF0000) {
629 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
630 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
631 tbl++;
632 }
633
634 return rtsx_pci_send_cmd(pcr, 100);
635}
636
637int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
638{
639 const u32 *tbl;
640
641 if (card == RTSX_SD_CARD)
642 tbl = pcr->sd_pull_ctl_enable_tbl;
643 else if (card == RTSX_MS_CARD)
644 tbl = pcr->ms_pull_ctl_enable_tbl;
645 else
646 return -EINVAL;
647
648 return rtsx_pci_set_pull_ctl(pcr, tbl);
649}
650EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
651
652int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
653{
654 const u32 *tbl;
655
656 if (card == RTSX_SD_CARD)
657 tbl = pcr->sd_pull_ctl_disable_tbl;
658 else if (card == RTSX_MS_CARD)
659 tbl = pcr->ms_pull_ctl_disable_tbl;
660 else
661 return -EINVAL;
662
663 return rtsx_pci_set_pull_ctl(pcr, tbl);
664}
665EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
666
667static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
668{
669 struct rtsx_hw_param *hw_param = &pcr->hw_param;
670
671 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
672 | hw_param->interrupt_en;
673
674 if (pcr->num_slots > 1)
675 pcr->bier |= MS_INT_EN;
676
677 /* Enable Bus Interrupt */
678 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
679
680 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
681}
682
683static inline u8 double_ssc_depth(u8 depth)
684{
685 return ((depth > 1) ? (depth - 1) : depth);
686}
687
688static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
689{
690 if (div > CLK_DIV_1) {
691 if (ssc_depth > (div - 1))
692 ssc_depth -= (div - 1);
693 else
694 ssc_depth = SSC_DEPTH_4M;
695 }
696
697 return ssc_depth;
698}
699
700int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
701 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
702{
703 int err, clk;
704 u8 n, clk_divider, mcu_cnt, div;
705 static const u8 depth[] = {
706 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
707 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
708 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
709 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
710 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
711 };
712
713 if (PCI_PID(pcr) == PID_5261)
714 return rts5261_pci_switch_clock(pcr, card_clock,
715 ssc_depth, initial_mode, double_clk, vpclk);
716 if (PCI_PID(pcr) == PID_5228)
717 return rts5228_pci_switch_clock(pcr, card_clock,
718 ssc_depth, initial_mode, double_clk, vpclk);
719 if (PCI_PID(pcr) == PID_5264)
720 return rts5264_pci_switch_clock(pcr, card_clock,
721 ssc_depth, initial_mode, double_clk, vpclk);
722
723 if (initial_mode) {
724 /* We use 250k(around) here, in initial stage */
725 clk_divider = SD_CLK_DIVIDE_128;
726 card_clock = 30000000;
727 } else {
728 clk_divider = SD_CLK_DIVIDE_0;
729 }
730 err = rtsx_pci_write_register(pcr, SD_CFG1,
731 SD_CLK_DIVIDE_MASK, clk_divider);
732 if (err < 0)
733 return err;
734
735 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
736 if (card_clock == UHS_SDR104_MAX_DTR &&
737 pcr->dma_error_count &&
738 PCI_PID(pcr) == RTS5227_DEVICE_ID)
739 card_clock = UHS_SDR104_MAX_DTR -
740 (pcr->dma_error_count * 20000000);
741
742 card_clock /= 1000000;
743 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
744
745 clk = card_clock;
746 if (!initial_mode && double_clk)
747 clk = card_clock * 2;
748 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
749 clk, pcr->cur_clock);
750
751 if (clk == pcr->cur_clock)
752 return 0;
753
754 if (pcr->ops->conv_clk_and_div_n)
755 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
756 else
757 n = (u8)(clk - 2);
758 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
759 return -EINVAL;
760
761 mcu_cnt = (u8)(125/clk + 3);
762 if (mcu_cnt > 15)
763 mcu_cnt = 15;
764
765 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
766 div = CLK_DIV_1;
767 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
768 if (pcr->ops->conv_clk_and_div_n) {
769 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
770 DIV_N_TO_CLK) * 2;
771 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
772 CLK_TO_DIV_N);
773 } else {
774 n = (n + 2) * 2 - 2;
775 }
776 div++;
777 }
778 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
779
780 ssc_depth = depth[ssc_depth];
781 if (double_clk)
782 ssc_depth = double_ssc_depth(ssc_depth);
783
784 ssc_depth = revise_ssc_depth(ssc_depth, div);
785 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
786
787 rtsx_pci_init_cmd(pcr);
788 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
789 CLK_LOW_FREQ, CLK_LOW_FREQ);
790 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
791 0xFF, (div << 4) | mcu_cnt);
792 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
793 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
794 SSC_DEPTH_MASK, ssc_depth);
795 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
796 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
797 if (vpclk) {
798 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
799 PHASE_NOT_RESET, 0);
800 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
801 PHASE_NOT_RESET, PHASE_NOT_RESET);
802 }
803
804 err = rtsx_pci_send_cmd(pcr, 2000);
805 if (err < 0)
806 return err;
807
808 /* Wait SSC clock stable */
809 udelay(SSC_CLOCK_STABLE_WAIT);
810 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
811 if (err < 0)
812 return err;
813
814 pcr->cur_clock = clk;
815 return 0;
816}
817EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
818
819int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
820{
821 if (pcr->ops->card_power_on)
822 return pcr->ops->card_power_on(pcr, card);
823
824 return 0;
825}
826EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
827
828int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
829{
830 if (pcr->ops->card_power_off)
831 return pcr->ops->card_power_off(pcr, card);
832
833 return 0;
834}
835EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
836
837int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
838{
839 static const unsigned int cd_mask[] = {
840 [RTSX_SD_CARD] = SD_EXIST,
841 [RTSX_MS_CARD] = MS_EXIST
842 };
843
844 if (!(pcr->flags & PCR_MS_PMOS)) {
845 /* When using single PMOS, accessing card is not permitted
846 * if the existing card is not the designated one.
847 */
848 if (pcr->card_exist & (~cd_mask[card]))
849 return -EIO;
850 }
851
852 return 0;
853}
854EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
855
856int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
857{
858 if (pcr->ops->switch_output_voltage)
859 return pcr->ops->switch_output_voltage(pcr, voltage);
860
861 return 0;
862}
863EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
864
865unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
866{
867 unsigned int val;
868
869 val = rtsx_pci_readl(pcr, RTSX_BIPR);
870 if (pcr->ops->cd_deglitch)
871 val = pcr->ops->cd_deglitch(pcr);
872
873 return val;
874}
875EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
876
877void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
878{
879 struct completion finish;
880
881 pcr->finish_me = &finish;
882 init_completion(&finish);
883
884 if (pcr->done)
885 complete(pcr->done);
886
887 if (!pcr->remove_pci)
888 rtsx_pci_stop_cmd(pcr);
889
890 wait_for_completion_interruptible_timeout(&finish,
891 msecs_to_jiffies(2));
892 pcr->finish_me = NULL;
893}
894EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
895
896static void rtsx_pci_card_detect(struct work_struct *work)
897{
898 struct delayed_work *dwork;
899 struct rtsx_pcr *pcr;
900 unsigned long flags;
901 unsigned int card_detect = 0, card_inserted, card_removed;
902 u32 irq_status;
903
904 dwork = to_delayed_work(work);
905 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
906
907 pcr_dbg(pcr, "--> %s\n", __func__);
908
909 mutex_lock(&pcr->pcr_mutex);
910 spin_lock_irqsave(&pcr->lock, flags);
911
912 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
913 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
914
915 irq_status &= CARD_EXIST;
916 card_inserted = pcr->card_inserted & irq_status;
917 card_removed = pcr->card_removed;
918 pcr->card_inserted = 0;
919 pcr->card_removed = 0;
920
921 spin_unlock_irqrestore(&pcr->lock, flags);
922
923 if (card_inserted || card_removed) {
924 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
925 card_inserted, card_removed);
926
927 if (pcr->ops->cd_deglitch)
928 card_inserted = pcr->ops->cd_deglitch(pcr);
929
930 card_detect = card_inserted | card_removed;
931
932 pcr->card_exist |= card_inserted;
933 pcr->card_exist &= ~card_removed;
934 }
935
936 mutex_unlock(&pcr->pcr_mutex);
937
938 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
939 pcr->slots[RTSX_SD_CARD].card_event(
940 pcr->slots[RTSX_SD_CARD].p_dev);
941 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
942 pcr->slots[RTSX_MS_CARD].card_event(
943 pcr->slots[RTSX_MS_CARD].p_dev);
944}
945
946static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
947{
948 if (pcr->ops->process_ocp) {
949 pcr->ops->process_ocp(pcr);
950 } else {
951 if (!pcr->option.ocp_en)
952 return;
953 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
954 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
955 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
956 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
957 rtsx_pci_clear_ocpstat(pcr);
958 pcr->ocp_stat = 0;
959 }
960 }
961}
962
963static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
964{
965 if (pcr->option.ocp_en)
966 rtsx_pci_process_ocp(pcr);
967
968 return 0;
969}
970
971static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
972{
973 struct rtsx_pcr *pcr = dev_id;
974 u32 int_reg;
975
976 if (!pcr)
977 return IRQ_NONE;
978
979 spin_lock(&pcr->lock);
980
981 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
982 /* Clear interrupt flag */
983 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
984 if ((int_reg & pcr->bier) == 0) {
985 spin_unlock(&pcr->lock);
986 return IRQ_NONE;
987 }
988 if (int_reg == 0xFFFFFFFF) {
989 spin_unlock(&pcr->lock);
990 return IRQ_HANDLED;
991 }
992
993 int_reg &= (pcr->bier | 0x7FFFFF);
994
995 if ((int_reg & SD_OC_INT) ||
996 ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
997 rtsx_pci_process_ocp_interrupt(pcr);
998
999 if (int_reg & SD_INT) {
1000 if (int_reg & SD_EXIST) {
1001 pcr->card_inserted |= SD_EXIST;
1002 } else {
1003 pcr->card_removed |= SD_EXIST;
1004 pcr->card_inserted &= ~SD_EXIST;
1005 if (PCI_PID(pcr) == PID_5261) {
1006 rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1007 RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1008 pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1009 }
1010 }
1011 pcr->dma_error_count = 0;
1012 }
1013
1014 if (int_reg & MS_INT) {
1015 if (int_reg & MS_EXIST) {
1016 pcr->card_inserted |= MS_EXIST;
1017 } else {
1018 pcr->card_removed |= MS_EXIST;
1019 pcr->card_inserted &= ~MS_EXIST;
1020 }
1021 }
1022
1023 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1024 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1025 pcr->trans_result = TRANS_RESULT_FAIL;
1026 if (pcr->done)
1027 complete(pcr->done);
1028 } else if (int_reg & TRANS_OK_INT) {
1029 pcr->trans_result = TRANS_RESULT_OK;
1030 if (pcr->done)
1031 complete(pcr->done);
1032 }
1033 }
1034
1035 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1036 schedule_delayed_work(&pcr->carddet_work,
1037 msecs_to_jiffies(200));
1038
1039 spin_unlock(&pcr->lock);
1040 return IRQ_HANDLED;
1041}
1042
1043static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1044{
1045 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1046 __func__, pcr->msi_en, pcr->pci->irq);
1047
1048 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1049 pcr->msi_en ? 0 : IRQF_SHARED,
1050 DRV_NAME_RTSX_PCI, pcr)) {
1051 dev_err(&(pcr->pci->dev),
1052 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1053 pcr->pci->irq);
1054 return -1;
1055 }
1056
1057 pcr->irq = pcr->pci->irq;
1058 pci_intx(pcr->pci, !pcr->msi_en);
1059
1060 return 0;
1061}
1062
1063static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1064{
1065 /* Set relink_time to 0 */
1066 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1067 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1068 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1069 RELINK_TIME_MASK, 0);
1070
1071 rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1072 D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1073
1074 rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1075}
1076
1077static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1078{
1079 if (pcr->ops->turn_off_led)
1080 pcr->ops->turn_off_led(pcr);
1081
1082 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1083 pcr->bier = 0;
1084
1085 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1086 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1087
1088 if (pcr->ops->force_power_down)
1089 pcr->ops->force_power_down(pcr, pm_state, runtime);
1090 else
1091 rtsx_base_force_power_down(pcr);
1092}
1093
1094void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1095{
1096 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1097
1098 if (pcr->ops->enable_ocp) {
1099 pcr->ops->enable_ocp(pcr);
1100 } else {
1101 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1102 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1103 }
1104
1105}
1106
1107void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1108{
1109 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1110
1111 if (pcr->ops->disable_ocp) {
1112 pcr->ops->disable_ocp(pcr);
1113 } else {
1114 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1115 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1116 OC_POWER_DOWN);
1117 }
1118}
1119
1120void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1121{
1122 if (pcr->ops->init_ocp) {
1123 pcr->ops->init_ocp(pcr);
1124 } else {
1125 struct rtsx_cr_option *option = &(pcr->option);
1126
1127 if (option->ocp_en) {
1128 u8 val = option->sd_800mA_ocp_thd;
1129
1130 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1131 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1132 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1133 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1134 SD_OCP_THD_MASK, val);
1135 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1136 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1137 rtsx_pci_enable_ocp(pcr);
1138 }
1139 }
1140}
1141
1142int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1143{
1144 if (pcr->ops->get_ocpstat)
1145 return pcr->ops->get_ocpstat(pcr, val);
1146 else
1147 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1148}
1149
1150void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1151{
1152 if (pcr->ops->clear_ocpstat) {
1153 pcr->ops->clear_ocpstat(pcr);
1154 } else {
1155 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1156 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1157
1158 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1159 udelay(100);
1160 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1161 }
1162}
1163
1164void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1165{
1166 u16 val;
1167
1168 if ((PCI_PID(pcr) != PID_525A) &&
1169 (PCI_PID(pcr) != PID_5260) &&
1170 (PCI_PID(pcr) != PID_5264)) {
1171 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1172 val |= 1<<9;
1173 rtsx_pci_write_phy_register(pcr, 0x01, val);
1174 }
1175 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1176 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1177 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1178 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1179
1180}
1181
1182void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1183{
1184 u16 val;
1185
1186 if ((PCI_PID(pcr) != PID_525A) &&
1187 (PCI_PID(pcr) != PID_5260) &&
1188 (PCI_PID(pcr) != PID_5264)) {
1189 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1190 val &= ~(1<<9);
1191 rtsx_pci_write_phy_register(pcr, 0x01, val);
1192 }
1193 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1194 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1195
1196}
1197
1198int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1199{
1200 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1201 MS_CLK_EN | SD40_CLK_EN, 0);
1202 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1203 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1204
1205 msleep(50);
1206
1207 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1208
1209 return 0;
1210}
1211
1212int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1213{
1214 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1215 MS_CLK_EN | SD40_CLK_EN, 0);
1216
1217 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1218
1219 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1220 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1221
1222 return 0;
1223}
1224
1225static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1226{
1227 struct pci_dev *pdev = pcr->pci;
1228 int err;
1229
1230 if (PCI_PID(pcr) == PID_5228)
1231 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1232 RTS5228_LDO1_SR_0_5);
1233
1234 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1235
1236 rtsx_pci_enable_bus_int(pcr);
1237
1238 /* Power on SSC */
1239 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1240 /* Gating real mcu clock */
1241 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1242 RTS5261_MCU_CLOCK_GATING, 0);
1243 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1244 SSC_POWER_DOWN, 0);
1245 } else {
1246 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1247 }
1248 if (err < 0)
1249 return err;
1250
1251 /* Wait SSC power stable */
1252 udelay(200);
1253
1254 rtsx_disable_aspm(pcr);
1255 if (pcr->ops->optimize_phy) {
1256 err = pcr->ops->optimize_phy(pcr);
1257 if (err < 0)
1258 return err;
1259 }
1260
1261 rtsx_pci_init_cmd(pcr);
1262
1263 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1264 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1265
1266 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1267 /* Disable card clock */
1268 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1269 /* Reset delink mode */
1270 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1271 /* Card driving select */
1272 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1273 0xFF, pcr->card_drive_sel);
1274 /* Enable SSC Clock */
1275 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1276 0xFF, SSC_8X_EN | SSC_SEL_4M);
1277 if (PCI_PID(pcr) == PID_5261)
1278 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1279 RTS5261_SSC_DEPTH_2M);
1280 else if (PCI_PID(pcr) == PID_5228)
1281 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1282 RTS5228_SSC_DEPTH_2M);
1283 else if (is_version(pcr, 0x5264, IC_VER_A))
1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
1285 else if (PCI_PID(pcr) == PID_5264)
1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1287 RTS5264_SSC_DEPTH_2M);
1288 else
1289 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1290
1291 /* Disable cd_pwr_save */
1292 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1293 /* Clear Link Ready Interrupt */
1294 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1295 LINK_RDY_INT, LINK_RDY_INT);
1296 /* Enlarge the estimation window of PERST# glitch
1297 * to reduce the chance of invalid card interrupt
1298 */
1299 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1300 /* Update RC oscillator to 400k
1301 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1302 * 1: 2M 0: 400k
1303 */
1304 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1305 /* Set interrupt write clear
1306 * bit 1: U_elbi_if_rd_clr_en
1307 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1308 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1309 */
1310 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1311
1312 err = rtsx_pci_send_cmd(pcr, 100);
1313 if (err < 0)
1314 return err;
1315
1316 switch (PCI_PID(pcr)) {
1317 case PID_5250:
1318 case PID_524A:
1319 case PID_525A:
1320 case PID_5260:
1321 case PID_5261:
1322 case PID_5228:
1323 case PID_5264:
1324 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1325 break;
1326 default:
1327 break;
1328 }
1329
1330 /*init ocp*/
1331 rtsx_pci_init_ocp(pcr);
1332
1333 /* Enable clk_request_n to enable clock power management */
1334 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1335 0, PCI_EXP_LNKCTL_CLKREQ_EN);
1336 /* Enter L1 when host tx idle */
1337 pci_write_config_byte(pdev, 0x70F, 0x5B);
1338
1339 if (pcr->ops->extra_init_hw) {
1340 err = pcr->ops->extra_init_hw(pcr);
1341 if (err < 0)
1342 return err;
1343 }
1344
1345 if (pcr->aspm_mode == ASPM_MODE_REG)
1346 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1347
1348 /* No CD interrupt if probing driver with card inserted.
1349 * So we need to initialize pcr->card_exist here.
1350 */
1351 if (pcr->ops->cd_deglitch)
1352 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1353 else
1354 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1355
1356 return 0;
1357}
1358
1359static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1360{
1361 struct rtsx_cr_option *option = &(pcr->option);
1362 int err, l1ss;
1363 u32 lval;
1364 u16 cfg_val;
1365 u8 val;
1366
1367 spin_lock_init(&pcr->lock);
1368 mutex_init(&pcr->pcr_mutex);
1369
1370 switch (PCI_PID(pcr)) {
1371 default:
1372 case 0x5209:
1373 rts5209_init_params(pcr);
1374 break;
1375
1376 case 0x5229:
1377 rts5229_init_params(pcr);
1378 break;
1379
1380 case 0x5289:
1381 rtl8411_init_params(pcr);
1382 break;
1383
1384 case 0x5227:
1385 rts5227_init_params(pcr);
1386 break;
1387
1388 case 0x522A:
1389 rts522a_init_params(pcr);
1390 break;
1391
1392 case 0x5249:
1393 rts5249_init_params(pcr);
1394 break;
1395
1396 case 0x524A:
1397 rts524a_init_params(pcr);
1398 break;
1399
1400 case 0x525A:
1401 rts525a_init_params(pcr);
1402 break;
1403
1404 case 0x5287:
1405 rtl8411b_init_params(pcr);
1406 break;
1407
1408 case 0x5286:
1409 rtl8402_init_params(pcr);
1410 break;
1411
1412 case 0x5260:
1413 rts5260_init_params(pcr);
1414 break;
1415
1416 case 0x5261:
1417 rts5261_init_params(pcr);
1418 break;
1419
1420 case 0x5228:
1421 rts5228_init_params(pcr);
1422 break;
1423
1424 case 0x5264:
1425 rts5264_init_params(pcr);
1426 break;
1427 }
1428
1429 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1430 PCI_PID(pcr), pcr->ic_version);
1431
1432 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1433 GFP_KERNEL);
1434 if (!pcr->slots)
1435 return -ENOMEM;
1436
1437 if (pcr->aspm_mode == ASPM_MODE_CFG) {
1438 pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1439 if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1440 pcr->aspm_enabled = true;
1441 else
1442 pcr->aspm_enabled = false;
1443
1444 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
1445 rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1446 if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1447 pcr->aspm_enabled = false;
1448 else
1449 pcr->aspm_enabled = true;
1450 }
1451
1452 l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
1453 if (l1ss) {
1454 pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
1455
1456 if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
1457 rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
1458 else
1459 rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
1460
1461 if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
1462 rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
1463 else
1464 rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
1465
1466 if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
1467 rtsx_set_dev_flag(pcr, PM_L1_1_EN);
1468 else
1469 rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
1470
1471 if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
1472 rtsx_set_dev_flag(pcr, PM_L1_2_EN);
1473 else
1474 rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
1475
1476 pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
1477 if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
1478 option->ltr_enabled = true;
1479 option->ltr_active = true;
1480 } else {
1481 option->ltr_enabled = false;
1482 }
1483
1484 if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
1485 | PM_L1_1_EN | PM_L1_2_EN))
1486 option->force_clkreq_0 = false;
1487 else
1488 option->force_clkreq_0 = true;
1489 } else {
1490 option->ltr_enabled = false;
1491 option->force_clkreq_0 = true;
1492 }
1493
1494 if (pcr->ops->fetch_vendor_settings)
1495 pcr->ops->fetch_vendor_settings(pcr);
1496
1497 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1498 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1499 pcr->sd30_drive_sel_1v8);
1500 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1501 pcr->sd30_drive_sel_3v3);
1502 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1503 pcr->card_drive_sel);
1504 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1505
1506 pcr->state = PDEV_STAT_IDLE;
1507 err = rtsx_pci_init_hw(pcr);
1508 if (err < 0) {
1509 kfree(pcr->slots);
1510 return err;
1511 }
1512
1513 return 0;
1514}
1515
1516static int rtsx_pci_probe(struct pci_dev *pcidev,
1517 const struct pci_device_id *id)
1518{
1519 struct rtsx_pcr *pcr;
1520 struct pcr_handle *handle;
1521 u32 base, len;
1522 int ret, i, bar = 0;
1523
1524 dev_dbg(&(pcidev->dev),
1525 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1526 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1527 (int)pcidev->revision);
1528
1529 ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1530 if (ret < 0)
1531 return ret;
1532
1533 ret = pci_enable_device(pcidev);
1534 if (ret)
1535 return ret;
1536
1537 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1538 if (ret)
1539 goto disable;
1540
1541 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1542 if (!pcr) {
1543 ret = -ENOMEM;
1544 goto release_pci;
1545 }
1546
1547 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1548 if (!handle) {
1549 ret = -ENOMEM;
1550 goto free_pcr;
1551 }
1552 handle->pcr = pcr;
1553
1554 idr_preload(GFP_KERNEL);
1555 spin_lock(&rtsx_pci_lock);
1556 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1557 if (ret >= 0)
1558 pcr->id = ret;
1559 spin_unlock(&rtsx_pci_lock);
1560 idr_preload_end();
1561 if (ret < 0)
1562 goto free_handle;
1563
1564 pcr->pci = pcidev;
1565 dev_set_drvdata(&pcidev->dev, handle);
1566
1567 if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
1568 bar = 1;
1569 len = pci_resource_len(pcidev, bar);
1570 base = pci_resource_start(pcidev, bar);
1571 pcr->remap_addr = ioremap(base, len);
1572 if (!pcr->remap_addr) {
1573 ret = -ENOMEM;
1574 goto free_idr;
1575 }
1576
1577 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1578 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1579 GFP_KERNEL);
1580 if (pcr->rtsx_resv_buf == NULL) {
1581 ret = -ENXIO;
1582 goto unmap;
1583 }
1584 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1585 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1586 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1587 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1588 pcr->card_inserted = 0;
1589 pcr->card_removed = 0;
1590 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1591
1592 pcr->msi_en = msi_en;
1593 if (pcr->msi_en) {
1594 ret = pci_enable_msi(pcidev);
1595 if (ret)
1596 pcr->msi_en = false;
1597 }
1598
1599 ret = rtsx_pci_acquire_irq(pcr);
1600 if (ret < 0)
1601 goto disable_msi;
1602
1603 pci_set_master(pcidev);
1604 synchronize_irq(pcr->irq);
1605
1606 ret = rtsx_pci_init_chip(pcr);
1607 if (ret < 0)
1608 goto disable_irq;
1609
1610 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1611 rtsx_pcr_cells[i].platform_data = handle;
1612 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1613 }
1614
1615
1616 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1617 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1618 if (ret < 0)
1619 goto free_slots;
1620
1621 pm_runtime_allow(&pcidev->dev);
1622 pm_runtime_put(&pcidev->dev);
1623
1624 return 0;
1625
1626free_slots:
1627 kfree(pcr->slots);
1628disable_irq:
1629 free_irq(pcr->irq, (void *)pcr);
1630disable_msi:
1631 if (pcr->msi_en)
1632 pci_disable_msi(pcr->pci);
1633 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1634 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1635unmap:
1636 iounmap(pcr->remap_addr);
1637free_idr:
1638 spin_lock(&rtsx_pci_lock);
1639 idr_remove(&rtsx_pci_idr, pcr->id);
1640 spin_unlock(&rtsx_pci_lock);
1641free_handle:
1642 kfree(handle);
1643free_pcr:
1644 kfree(pcr);
1645release_pci:
1646 pci_release_regions(pcidev);
1647disable:
1648 pci_disable_device(pcidev);
1649
1650 return ret;
1651}
1652
1653static void rtsx_pci_remove(struct pci_dev *pcidev)
1654{
1655 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1656 struct rtsx_pcr *pcr = handle->pcr;
1657
1658 pcr->remove_pci = true;
1659
1660 pm_runtime_get_sync(&pcidev->dev);
1661 pm_runtime_forbid(&pcidev->dev);
1662
1663 /* Disable interrupts at the pcr level */
1664 spin_lock_irq(&pcr->lock);
1665 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1666 pcr->bier = 0;
1667 spin_unlock_irq(&pcr->lock);
1668
1669 cancel_delayed_work_sync(&pcr->carddet_work);
1670
1671 mfd_remove_devices(&pcidev->dev);
1672
1673 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1674 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1675 free_irq(pcr->irq, (void *)pcr);
1676 if (pcr->msi_en)
1677 pci_disable_msi(pcr->pci);
1678 iounmap(pcr->remap_addr);
1679
1680 pci_release_regions(pcidev);
1681 pci_disable_device(pcidev);
1682
1683 spin_lock(&rtsx_pci_lock);
1684 idr_remove(&rtsx_pci_idr, pcr->id);
1685 spin_unlock(&rtsx_pci_lock);
1686
1687 kfree(pcr->slots);
1688 kfree(pcr);
1689 kfree(handle);
1690
1691 dev_dbg(&(pcidev->dev),
1692 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1693 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1694}
1695
1696static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1697{
1698 struct pci_dev *pcidev = to_pci_dev(dev_d);
1699 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1700 struct rtsx_pcr *pcr = handle->pcr;
1701
1702 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1703
1704 cancel_delayed_work_sync(&pcr->carddet_work);
1705
1706 mutex_lock(&pcr->pcr_mutex);
1707
1708 rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1709
1710 mutex_unlock(&pcr->pcr_mutex);
1711 return 0;
1712}
1713
1714static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1715{
1716 struct pci_dev *pcidev = to_pci_dev(dev_d);
1717 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1718 struct rtsx_pcr *pcr = handle->pcr;
1719 int ret = 0;
1720
1721 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1722
1723 mutex_lock(&pcr->pcr_mutex);
1724
1725 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1726 if (ret)
1727 goto out;
1728
1729 ret = rtsx_pci_init_hw(pcr);
1730 if (ret)
1731 goto out;
1732
1733out:
1734 mutex_unlock(&pcr->pcr_mutex);
1735 return ret;
1736}
1737
1738#ifdef CONFIG_PM
1739
1740static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1741{
1742 if (pcr->ops->set_aspm)
1743 pcr->ops->set_aspm(pcr, true);
1744 else
1745 rtsx_comm_set_aspm(pcr, true);
1746}
1747
1748static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1749{
1750 struct rtsx_cr_option *option = &pcr->option;
1751
1752 if (option->ltr_enabled) {
1753 u32 latency = option->ltr_l1off_latency;
1754
1755 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1756 mdelay(option->l1_snooze_delay);
1757
1758 rtsx_set_ltr_latency(pcr, latency);
1759 }
1760
1761 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1762 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1763
1764 rtsx_enable_aspm(pcr);
1765}
1766
1767static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1768{
1769 rtsx_comm_pm_power_saving(pcr);
1770}
1771
1772static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1773{
1774 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1775 struct rtsx_pcr *pcr = handle->pcr;
1776
1777 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1778
1779 rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1780
1781 pci_disable_device(pcidev);
1782 free_irq(pcr->irq, (void *)pcr);
1783 if (pcr->msi_en)
1784 pci_disable_msi(pcr->pci);
1785}
1786
1787static int rtsx_pci_runtime_idle(struct device *device)
1788{
1789 struct pci_dev *pcidev = to_pci_dev(device);
1790 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1791 struct rtsx_pcr *pcr = handle->pcr;
1792
1793 dev_dbg(device, "--> %s\n", __func__);
1794
1795 mutex_lock(&pcr->pcr_mutex);
1796
1797 pcr->state = PDEV_STAT_IDLE;
1798
1799 if (pcr->ops->disable_auto_blink)
1800 pcr->ops->disable_auto_blink(pcr);
1801 if (pcr->ops->turn_off_led)
1802 pcr->ops->turn_off_led(pcr);
1803
1804 rtsx_pm_power_saving(pcr);
1805
1806 mutex_unlock(&pcr->pcr_mutex);
1807
1808 if (pcr->rtd3_en)
1809 pm_schedule_suspend(device, 10000);
1810
1811 return -EBUSY;
1812}
1813
1814static int rtsx_pci_runtime_suspend(struct device *device)
1815{
1816 struct pci_dev *pcidev = to_pci_dev(device);
1817 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1818 struct rtsx_pcr *pcr = handle->pcr;
1819
1820 dev_dbg(device, "--> %s\n", __func__);
1821
1822 cancel_delayed_work_sync(&pcr->carddet_work);
1823
1824 mutex_lock(&pcr->pcr_mutex);
1825 rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1826
1827 mutex_unlock(&pcr->pcr_mutex);
1828
1829 return 0;
1830}
1831
1832static int rtsx_pci_runtime_resume(struct device *device)
1833{
1834 struct pci_dev *pcidev = to_pci_dev(device);
1835 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1836 struct rtsx_pcr *pcr = handle->pcr;
1837
1838 dev_dbg(device, "--> %s\n", __func__);
1839
1840 mutex_lock(&pcr->pcr_mutex);
1841
1842 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1843
1844 rtsx_pci_init_hw(pcr);
1845
1846 if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1847 pcr->slots[RTSX_SD_CARD].card_event(
1848 pcr->slots[RTSX_SD_CARD].p_dev);
1849 }
1850
1851 mutex_unlock(&pcr->pcr_mutex);
1852 return 0;
1853}
1854
1855#else /* CONFIG_PM */
1856
1857#define rtsx_pci_shutdown NULL
1858#define rtsx_pci_runtime_suspend NULL
1859#define rtsx_pic_runtime_resume NULL
1860
1861#endif /* CONFIG_PM */
1862
1863static const struct dev_pm_ops rtsx_pci_pm_ops = {
1864 SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1865 SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1866};
1867
1868static struct pci_driver rtsx_pci_driver = {
1869 .name = DRV_NAME_RTSX_PCI,
1870 .id_table = rtsx_pci_ids,
1871 .probe = rtsx_pci_probe,
1872 .remove = rtsx_pci_remove,
1873 .driver.pm = &rtsx_pci_pm_ops,
1874 .shutdown = rtsx_pci_shutdown,
1875};
1876module_pci_driver(rtsx_pci_driver);
1877
1878MODULE_LICENSE("GPL");
1879MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1880MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Driver for Realtek PCI-Express card reader
3 *
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 *
6 * Author:
7 * Wei WANG <wei_wang@realsil.com.cn>
8 */
9
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/dma-mapping.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/idr.h>
18#include <linux/platform_device.h>
19#include <linux/mfd/core.h>
20#include <linux/rtsx_pci.h>
21#include <linux/mmc/card.h>
22#include <asm/unaligned.h>
23
24#include "rtsx_pcr.h"
25#include "rts5261.h"
26#include "rts5228.h"
27
28static bool msi_en = true;
29module_param(msi_en, bool, S_IRUGO | S_IWUSR);
30MODULE_PARM_DESC(msi_en, "Enable MSI");
31
32static DEFINE_IDR(rtsx_pci_idr);
33static DEFINE_SPINLOCK(rtsx_pci_lock);
34
35static struct mfd_cell rtsx_pcr_cells[] = {
36 [RTSX_SD_CARD] = {
37 .name = DRV_NAME_RTSX_PCI_SDMMC,
38 },
39};
40
41static const struct pci_device_id rtsx_pci_ids[] = {
42 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
43 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
44 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { 0, }
56};
57
58MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
59
60static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
61{
62 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
63 PCI_EXP_LNKCTL_ASPMC, 0);
64}
65
66static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
67{
68 rtsx_pci_write_register(pcr, MSGTXDATA0,
69 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
70 rtsx_pci_write_register(pcr, MSGTXDATA1,
71 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
72 rtsx_pci_write_register(pcr, MSGTXDATA2,
73 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
74 rtsx_pci_write_register(pcr, MSGTXDATA3,
75 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
76 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
77 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
78
79 return 0;
80}
81
82int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
83{
84 return rtsx_comm_set_ltr_latency(pcr, latency);
85}
86
87static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
88{
89 if (pcr->aspm_enabled == enable)
90 return;
91
92 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
93 PCI_EXP_LNKCTL_ASPMC,
94 enable ? pcr->aspm_en : 0);
95
96 pcr->aspm_enabled = enable;
97}
98
99static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
100{
101 if (pcr->ops->set_aspm)
102 pcr->ops->set_aspm(pcr, false);
103 else
104 rtsx_comm_set_aspm(pcr, false);
105}
106
107int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
108{
109 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
110
111 return 0;
112}
113
114static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
115{
116 if (pcr->ops->set_l1off_cfg_sub_d0)
117 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
118}
119
120static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
121{
122 struct rtsx_cr_option *option = &pcr->option;
123
124 rtsx_disable_aspm(pcr);
125
126 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
127 msleep(1);
128
129 if (option->ltr_enabled)
130 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
131
132 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
133 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
134}
135
136static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
137{
138 rtsx_comm_pm_full_on(pcr);
139}
140
141void rtsx_pci_start_run(struct rtsx_pcr *pcr)
142{
143 /* If pci device removed, don't queue idle work any more */
144 if (pcr->remove_pci)
145 return;
146
147 if (pcr->state != PDEV_STAT_RUN) {
148 pcr->state = PDEV_STAT_RUN;
149 if (pcr->ops->enable_auto_blink)
150 pcr->ops->enable_auto_blink(pcr);
151 rtsx_pm_full_on(pcr);
152 }
153
154 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
155}
156EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
157
158int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
159{
160 int i;
161 u32 val = HAIMR_WRITE_START;
162
163 val |= (u32)(addr & 0x3FFF) << 16;
164 val |= (u32)mask << 8;
165 val |= (u32)data;
166
167 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
168
169 for (i = 0; i < MAX_RW_REG_CNT; i++) {
170 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
171 if ((val & HAIMR_TRANS_END) == 0) {
172 if (data != (u8)val)
173 return -EIO;
174 return 0;
175 }
176 }
177
178 return -ETIMEDOUT;
179}
180EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
181
182int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
183{
184 u32 val = HAIMR_READ_START;
185 int i;
186
187 val |= (u32)(addr & 0x3FFF) << 16;
188 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
189
190 for (i = 0; i < MAX_RW_REG_CNT; i++) {
191 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
192 if ((val & HAIMR_TRANS_END) == 0)
193 break;
194 }
195
196 if (i >= MAX_RW_REG_CNT)
197 return -ETIMEDOUT;
198
199 if (data)
200 *data = (u8)(val & 0xFF);
201
202 return 0;
203}
204EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
205
206int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
207{
208 int err, i, finished = 0;
209 u8 tmp;
210
211 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
212 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
213 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
214 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
215
216 for (i = 0; i < 100000; i++) {
217 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
218 if (err < 0)
219 return err;
220
221 if (!(tmp & 0x80)) {
222 finished = 1;
223 break;
224 }
225 }
226
227 if (!finished)
228 return -ETIMEDOUT;
229
230 return 0;
231}
232
233int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
234{
235 if (pcr->ops->write_phy)
236 return pcr->ops->write_phy(pcr, addr, val);
237
238 return __rtsx_pci_write_phy_register(pcr, addr, val);
239}
240EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
241
242int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
243{
244 int err, i, finished = 0;
245 u16 data;
246 u8 tmp, val1, val2;
247
248 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
249 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
250
251 for (i = 0; i < 100000; i++) {
252 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
253 if (err < 0)
254 return err;
255
256 if (!(tmp & 0x80)) {
257 finished = 1;
258 break;
259 }
260 }
261
262 if (!finished)
263 return -ETIMEDOUT;
264
265 rtsx_pci_read_register(pcr, PHYDATA0, &val1);
266 rtsx_pci_read_register(pcr, PHYDATA1, &val2);
267 data = val1 | (val2 << 8);
268
269 if (val)
270 *val = data;
271
272 return 0;
273}
274
275int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
276{
277 if (pcr->ops->read_phy)
278 return pcr->ops->read_phy(pcr, addr, val);
279
280 return __rtsx_pci_read_phy_register(pcr, addr, val);
281}
282EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
283
284void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
285{
286 if (pcr->ops->stop_cmd)
287 return pcr->ops->stop_cmd(pcr);
288
289 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
290 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
291
292 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
293 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
294}
295EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
296
297void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
298 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
299{
300 unsigned long flags;
301 u32 val = 0;
302 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
303
304 val |= (u32)(cmd_type & 0x03) << 30;
305 val |= (u32)(reg_addr & 0x3FFF) << 16;
306 val |= (u32)mask << 8;
307 val |= (u32)data;
308
309 spin_lock_irqsave(&pcr->lock, flags);
310 ptr += pcr->ci;
311 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
312 put_unaligned_le32(val, ptr);
313 ptr++;
314 pcr->ci++;
315 }
316 spin_unlock_irqrestore(&pcr->lock, flags);
317}
318EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
319
320void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
321{
322 u32 val = 1 << 31;
323
324 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
325
326 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
327 /* Hardware Auto Response */
328 val |= 0x40000000;
329 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
330}
331EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
332
333int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
334{
335 struct completion trans_done;
336 u32 val = 1 << 31;
337 long timeleft;
338 unsigned long flags;
339 int err = 0;
340
341 spin_lock_irqsave(&pcr->lock, flags);
342
343 /* set up data structures for the wakeup system */
344 pcr->done = &trans_done;
345 pcr->trans_result = TRANS_NOT_READY;
346 init_completion(&trans_done);
347
348 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
349
350 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
351 /* Hardware Auto Response */
352 val |= 0x40000000;
353 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
354
355 spin_unlock_irqrestore(&pcr->lock, flags);
356
357 /* Wait for TRANS_OK_INT */
358 timeleft = wait_for_completion_interruptible_timeout(
359 &trans_done, msecs_to_jiffies(timeout));
360 if (timeleft <= 0) {
361 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
362 err = -ETIMEDOUT;
363 goto finish_send_cmd;
364 }
365
366 spin_lock_irqsave(&pcr->lock, flags);
367 if (pcr->trans_result == TRANS_RESULT_FAIL)
368 err = -EINVAL;
369 else if (pcr->trans_result == TRANS_RESULT_OK)
370 err = 0;
371 else if (pcr->trans_result == TRANS_NO_DEVICE)
372 err = -ENODEV;
373 spin_unlock_irqrestore(&pcr->lock, flags);
374
375finish_send_cmd:
376 spin_lock_irqsave(&pcr->lock, flags);
377 pcr->done = NULL;
378 spin_unlock_irqrestore(&pcr->lock, flags);
379
380 if ((err < 0) && (err != -ENODEV))
381 rtsx_pci_stop_cmd(pcr);
382
383 if (pcr->finish_me)
384 complete(pcr->finish_me);
385
386 return err;
387}
388EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
389
390static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
391 dma_addr_t addr, unsigned int len, int end)
392{
393 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
394 u64 val;
395 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
396
397 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
398
399 if (end)
400 option |= RTSX_SG_END;
401
402 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
403 if (len > 0xFFFF)
404 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
405 | (((u64)len >> 16) << 6) | option;
406 else
407 val = ((u64)addr << 32) | ((u64)len << 16) | option;
408 } else {
409 val = ((u64)addr << 32) | ((u64)len << 12) | option;
410 }
411 put_unaligned_le64(val, ptr);
412 pcr->sgi++;
413}
414
415int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
416 int num_sg, bool read, int timeout)
417{
418 int err = 0, count;
419
420 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
421 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
422 if (count < 1)
423 return -EINVAL;
424 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
425
426 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
427
428 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
429
430 return err;
431}
432EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
433
434int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
435 int num_sg, bool read)
436{
437 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
438
439 if (pcr->remove_pci)
440 return -EINVAL;
441
442 if ((sglist == NULL) || (num_sg <= 0))
443 return -EINVAL;
444
445 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
446}
447EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
448
449void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
450 int num_sg, bool read)
451{
452 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
453
454 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
455}
456EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
457
458int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
459 int count, bool read, int timeout)
460{
461 struct completion trans_done;
462 struct scatterlist *sg;
463 dma_addr_t addr;
464 long timeleft;
465 unsigned long flags;
466 unsigned int len;
467 int i, err = 0;
468 u32 val;
469 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
470
471 if (pcr->remove_pci)
472 return -ENODEV;
473
474 if ((sglist == NULL) || (count < 1))
475 return -EINVAL;
476
477 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
478 pcr->sgi = 0;
479 for_each_sg(sglist, sg, count, i) {
480 addr = sg_dma_address(sg);
481 len = sg_dma_len(sg);
482 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
483 }
484
485 spin_lock_irqsave(&pcr->lock, flags);
486
487 pcr->done = &trans_done;
488 pcr->trans_result = TRANS_NOT_READY;
489 init_completion(&trans_done);
490 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
491 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
492
493 spin_unlock_irqrestore(&pcr->lock, flags);
494
495 timeleft = wait_for_completion_interruptible_timeout(
496 &trans_done, msecs_to_jiffies(timeout));
497 if (timeleft <= 0) {
498 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
499 err = -ETIMEDOUT;
500 goto out;
501 }
502
503 spin_lock_irqsave(&pcr->lock, flags);
504 if (pcr->trans_result == TRANS_RESULT_FAIL) {
505 err = -EILSEQ;
506 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
507 pcr->dma_error_count++;
508 }
509
510 else if (pcr->trans_result == TRANS_NO_DEVICE)
511 err = -ENODEV;
512 spin_unlock_irqrestore(&pcr->lock, flags);
513
514out:
515 spin_lock_irqsave(&pcr->lock, flags);
516 pcr->done = NULL;
517 spin_unlock_irqrestore(&pcr->lock, flags);
518
519 if ((err < 0) && (err != -ENODEV))
520 rtsx_pci_stop_cmd(pcr);
521
522 if (pcr->finish_me)
523 complete(pcr->finish_me);
524
525 return err;
526}
527EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
528
529int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
530{
531 int err;
532 int i, j;
533 u16 reg;
534 u8 *ptr;
535
536 if (buf_len > 512)
537 buf_len = 512;
538
539 ptr = buf;
540 reg = PPBUF_BASE2;
541 for (i = 0; i < buf_len / 256; i++) {
542 rtsx_pci_init_cmd(pcr);
543
544 for (j = 0; j < 256; j++)
545 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
546
547 err = rtsx_pci_send_cmd(pcr, 250);
548 if (err < 0)
549 return err;
550
551 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
552 ptr += 256;
553 }
554
555 if (buf_len % 256) {
556 rtsx_pci_init_cmd(pcr);
557
558 for (j = 0; j < buf_len % 256; j++)
559 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
560
561 err = rtsx_pci_send_cmd(pcr, 250);
562 if (err < 0)
563 return err;
564 }
565
566 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
567
568 return 0;
569}
570EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
571
572int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
573{
574 int err;
575 int i, j;
576 u16 reg;
577 u8 *ptr;
578
579 if (buf_len > 512)
580 buf_len = 512;
581
582 ptr = buf;
583 reg = PPBUF_BASE2;
584 for (i = 0; i < buf_len / 256; i++) {
585 rtsx_pci_init_cmd(pcr);
586
587 for (j = 0; j < 256; j++) {
588 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
589 reg++, 0xFF, *ptr);
590 ptr++;
591 }
592
593 err = rtsx_pci_send_cmd(pcr, 250);
594 if (err < 0)
595 return err;
596 }
597
598 if (buf_len % 256) {
599 rtsx_pci_init_cmd(pcr);
600
601 for (j = 0; j < buf_len % 256; j++) {
602 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
603 reg++, 0xFF, *ptr);
604 ptr++;
605 }
606
607 err = rtsx_pci_send_cmd(pcr, 250);
608 if (err < 0)
609 return err;
610 }
611
612 return 0;
613}
614EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
615
616static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
617{
618 rtsx_pci_init_cmd(pcr);
619
620 while (*tbl & 0xFFFF0000) {
621 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
622 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
623 tbl++;
624 }
625
626 return rtsx_pci_send_cmd(pcr, 100);
627}
628
629int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
630{
631 const u32 *tbl;
632
633 if (card == RTSX_SD_CARD)
634 tbl = pcr->sd_pull_ctl_enable_tbl;
635 else if (card == RTSX_MS_CARD)
636 tbl = pcr->ms_pull_ctl_enable_tbl;
637 else
638 return -EINVAL;
639
640 return rtsx_pci_set_pull_ctl(pcr, tbl);
641}
642EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
643
644int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
645{
646 const u32 *tbl;
647
648 if (card == RTSX_SD_CARD)
649 tbl = pcr->sd_pull_ctl_disable_tbl;
650 else if (card == RTSX_MS_CARD)
651 tbl = pcr->ms_pull_ctl_disable_tbl;
652 else
653 return -EINVAL;
654
655 return rtsx_pci_set_pull_ctl(pcr, tbl);
656}
657EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
658
659static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
660{
661 struct rtsx_hw_param *hw_param = &pcr->hw_param;
662
663 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
664 | hw_param->interrupt_en;
665
666 if (pcr->num_slots > 1)
667 pcr->bier |= MS_INT_EN;
668
669 /* Enable Bus Interrupt */
670 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
671
672 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
673}
674
675static inline u8 double_ssc_depth(u8 depth)
676{
677 return ((depth > 1) ? (depth - 1) : depth);
678}
679
680static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
681{
682 if (div > CLK_DIV_1) {
683 if (ssc_depth > (div - 1))
684 ssc_depth -= (div - 1);
685 else
686 ssc_depth = SSC_DEPTH_4M;
687 }
688
689 return ssc_depth;
690}
691
692int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
693 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
694{
695 int err, clk;
696 u8 n, clk_divider, mcu_cnt, div;
697 static const u8 depth[] = {
698 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
699 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
700 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
701 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
702 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
703 };
704
705 if (PCI_PID(pcr) == PID_5261)
706 return rts5261_pci_switch_clock(pcr, card_clock,
707 ssc_depth, initial_mode, double_clk, vpclk);
708 if (PCI_PID(pcr) == PID_5228)
709 return rts5228_pci_switch_clock(pcr, card_clock,
710 ssc_depth, initial_mode, double_clk, vpclk);
711
712 if (initial_mode) {
713 /* We use 250k(around) here, in initial stage */
714 clk_divider = SD_CLK_DIVIDE_128;
715 card_clock = 30000000;
716 } else {
717 clk_divider = SD_CLK_DIVIDE_0;
718 }
719 err = rtsx_pci_write_register(pcr, SD_CFG1,
720 SD_CLK_DIVIDE_MASK, clk_divider);
721 if (err < 0)
722 return err;
723
724 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
725 if (card_clock == UHS_SDR104_MAX_DTR &&
726 pcr->dma_error_count &&
727 PCI_PID(pcr) == RTS5227_DEVICE_ID)
728 card_clock = UHS_SDR104_MAX_DTR -
729 (pcr->dma_error_count * 20000000);
730
731 card_clock /= 1000000;
732 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
733
734 clk = card_clock;
735 if (!initial_mode && double_clk)
736 clk = card_clock * 2;
737 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
738 clk, pcr->cur_clock);
739
740 if (clk == pcr->cur_clock)
741 return 0;
742
743 if (pcr->ops->conv_clk_and_div_n)
744 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
745 else
746 n = (u8)(clk - 2);
747 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
748 return -EINVAL;
749
750 mcu_cnt = (u8)(125/clk + 3);
751 if (mcu_cnt > 15)
752 mcu_cnt = 15;
753
754 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
755 div = CLK_DIV_1;
756 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
757 if (pcr->ops->conv_clk_and_div_n) {
758 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
759 DIV_N_TO_CLK) * 2;
760 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
761 CLK_TO_DIV_N);
762 } else {
763 n = (n + 2) * 2 - 2;
764 }
765 div++;
766 }
767 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
768
769 ssc_depth = depth[ssc_depth];
770 if (double_clk)
771 ssc_depth = double_ssc_depth(ssc_depth);
772
773 ssc_depth = revise_ssc_depth(ssc_depth, div);
774 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
775
776 rtsx_pci_init_cmd(pcr);
777 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
778 CLK_LOW_FREQ, CLK_LOW_FREQ);
779 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
780 0xFF, (div << 4) | mcu_cnt);
781 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
782 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
783 SSC_DEPTH_MASK, ssc_depth);
784 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
785 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
786 if (vpclk) {
787 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
788 PHASE_NOT_RESET, 0);
789 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
790 PHASE_NOT_RESET, PHASE_NOT_RESET);
791 }
792
793 err = rtsx_pci_send_cmd(pcr, 2000);
794 if (err < 0)
795 return err;
796
797 /* Wait SSC clock stable */
798 udelay(SSC_CLOCK_STABLE_WAIT);
799 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
800 if (err < 0)
801 return err;
802
803 pcr->cur_clock = clk;
804 return 0;
805}
806EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
807
808int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
809{
810 if (pcr->ops->card_power_on)
811 return pcr->ops->card_power_on(pcr, card);
812
813 return 0;
814}
815EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
816
817int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
818{
819 if (pcr->ops->card_power_off)
820 return pcr->ops->card_power_off(pcr, card);
821
822 return 0;
823}
824EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
825
826int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
827{
828 static const unsigned int cd_mask[] = {
829 [RTSX_SD_CARD] = SD_EXIST,
830 [RTSX_MS_CARD] = MS_EXIST
831 };
832
833 if (!(pcr->flags & PCR_MS_PMOS)) {
834 /* When using single PMOS, accessing card is not permitted
835 * if the existing card is not the designated one.
836 */
837 if (pcr->card_exist & (~cd_mask[card]))
838 return -EIO;
839 }
840
841 return 0;
842}
843EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
844
845int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
846{
847 if (pcr->ops->switch_output_voltage)
848 return pcr->ops->switch_output_voltage(pcr, voltage);
849
850 return 0;
851}
852EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
853
854unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
855{
856 unsigned int val;
857
858 val = rtsx_pci_readl(pcr, RTSX_BIPR);
859 if (pcr->ops->cd_deglitch)
860 val = pcr->ops->cd_deglitch(pcr);
861
862 return val;
863}
864EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
865
866void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
867{
868 struct completion finish;
869
870 pcr->finish_me = &finish;
871 init_completion(&finish);
872
873 if (pcr->done)
874 complete(pcr->done);
875
876 if (!pcr->remove_pci)
877 rtsx_pci_stop_cmd(pcr);
878
879 wait_for_completion_interruptible_timeout(&finish,
880 msecs_to_jiffies(2));
881 pcr->finish_me = NULL;
882}
883EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
884
885static void rtsx_pci_card_detect(struct work_struct *work)
886{
887 struct delayed_work *dwork;
888 struct rtsx_pcr *pcr;
889 unsigned long flags;
890 unsigned int card_detect = 0, card_inserted, card_removed;
891 u32 irq_status;
892
893 dwork = to_delayed_work(work);
894 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
895
896 pcr_dbg(pcr, "--> %s\n", __func__);
897
898 mutex_lock(&pcr->pcr_mutex);
899 spin_lock_irqsave(&pcr->lock, flags);
900
901 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
902 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
903
904 irq_status &= CARD_EXIST;
905 card_inserted = pcr->card_inserted & irq_status;
906 card_removed = pcr->card_removed;
907 pcr->card_inserted = 0;
908 pcr->card_removed = 0;
909
910 spin_unlock_irqrestore(&pcr->lock, flags);
911
912 if (card_inserted || card_removed) {
913 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
914 card_inserted, card_removed);
915
916 if (pcr->ops->cd_deglitch)
917 card_inserted = pcr->ops->cd_deglitch(pcr);
918
919 card_detect = card_inserted | card_removed;
920
921 pcr->card_exist |= card_inserted;
922 pcr->card_exist &= ~card_removed;
923 }
924
925 mutex_unlock(&pcr->pcr_mutex);
926
927 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
928 pcr->slots[RTSX_SD_CARD].card_event(
929 pcr->slots[RTSX_SD_CARD].p_dev);
930 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
931 pcr->slots[RTSX_MS_CARD].card_event(
932 pcr->slots[RTSX_MS_CARD].p_dev);
933}
934
935static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
936{
937 if (pcr->ops->process_ocp) {
938 pcr->ops->process_ocp(pcr);
939 } else {
940 if (!pcr->option.ocp_en)
941 return;
942 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
943 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
944 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
945 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
946 rtsx_pci_clear_ocpstat(pcr);
947 pcr->ocp_stat = 0;
948 }
949 }
950}
951
952static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
953{
954 if (pcr->option.ocp_en)
955 rtsx_pci_process_ocp(pcr);
956
957 return 0;
958}
959
960static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
961{
962 struct rtsx_pcr *pcr = dev_id;
963 u32 int_reg;
964
965 if (!pcr)
966 return IRQ_NONE;
967
968 spin_lock(&pcr->lock);
969
970 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
971 /* Clear interrupt flag */
972 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
973 if ((int_reg & pcr->bier) == 0) {
974 spin_unlock(&pcr->lock);
975 return IRQ_NONE;
976 }
977 if (int_reg == 0xFFFFFFFF) {
978 spin_unlock(&pcr->lock);
979 return IRQ_HANDLED;
980 }
981
982 int_reg &= (pcr->bier | 0x7FFFFF);
983
984 if (int_reg & SD_OC_INT)
985 rtsx_pci_process_ocp_interrupt(pcr);
986
987 if (int_reg & SD_INT) {
988 if (int_reg & SD_EXIST) {
989 pcr->card_inserted |= SD_EXIST;
990 } else {
991 pcr->card_removed |= SD_EXIST;
992 pcr->card_inserted &= ~SD_EXIST;
993 }
994 pcr->dma_error_count = 0;
995 }
996
997 if (int_reg & MS_INT) {
998 if (int_reg & MS_EXIST) {
999 pcr->card_inserted |= MS_EXIST;
1000 } else {
1001 pcr->card_removed |= MS_EXIST;
1002 pcr->card_inserted &= ~MS_EXIST;
1003 }
1004 }
1005
1006 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1007 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1008 pcr->trans_result = TRANS_RESULT_FAIL;
1009 if (pcr->done)
1010 complete(pcr->done);
1011 } else if (int_reg & TRANS_OK_INT) {
1012 pcr->trans_result = TRANS_RESULT_OK;
1013 if (pcr->done)
1014 complete(pcr->done);
1015 }
1016 }
1017
1018 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1019 schedule_delayed_work(&pcr->carddet_work,
1020 msecs_to_jiffies(200));
1021
1022 spin_unlock(&pcr->lock);
1023 return IRQ_HANDLED;
1024}
1025
1026static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1027{
1028 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1029 __func__, pcr->msi_en, pcr->pci->irq);
1030
1031 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1032 pcr->msi_en ? 0 : IRQF_SHARED,
1033 DRV_NAME_RTSX_PCI, pcr)) {
1034 dev_err(&(pcr->pci->dev),
1035 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1036 pcr->pci->irq);
1037 return -1;
1038 }
1039
1040 pcr->irq = pcr->pci->irq;
1041 pci_intx(pcr->pci, !pcr->msi_en);
1042
1043 return 0;
1044}
1045
1046static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1047{
1048 if (pcr->ops->set_aspm)
1049 pcr->ops->set_aspm(pcr, true);
1050 else
1051 rtsx_comm_set_aspm(pcr, true);
1052}
1053
1054static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1055{
1056 struct rtsx_cr_option *option = &pcr->option;
1057
1058 if (option->ltr_enabled) {
1059 u32 latency = option->ltr_l1off_latency;
1060
1061 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1062 mdelay(option->l1_snooze_delay);
1063
1064 rtsx_set_ltr_latency(pcr, latency);
1065 }
1066
1067 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1068 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1069
1070 rtsx_enable_aspm(pcr);
1071}
1072
1073static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1074{
1075 rtsx_comm_pm_power_saving(pcr);
1076}
1077
1078static void rtsx_pci_idle_work(struct work_struct *work)
1079{
1080 struct delayed_work *dwork = to_delayed_work(work);
1081 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1082
1083 pcr_dbg(pcr, "--> %s\n", __func__);
1084
1085 mutex_lock(&pcr->pcr_mutex);
1086
1087 pcr->state = PDEV_STAT_IDLE;
1088
1089 if (pcr->ops->disable_auto_blink)
1090 pcr->ops->disable_auto_blink(pcr);
1091 if (pcr->ops->turn_off_led)
1092 pcr->ops->turn_off_led(pcr);
1093
1094 rtsx_pm_power_saving(pcr);
1095
1096 mutex_unlock(&pcr->pcr_mutex);
1097}
1098
1099static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1100{
1101 if (pcr->ops->turn_off_led)
1102 pcr->ops->turn_off_led(pcr);
1103
1104 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1105 pcr->bier = 0;
1106
1107 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1108 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1109
1110 if (pcr->ops->force_power_down)
1111 pcr->ops->force_power_down(pcr, pm_state);
1112}
1113
1114void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1115{
1116 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1117
1118 if (pcr->ops->enable_ocp) {
1119 pcr->ops->enable_ocp(pcr);
1120 } else {
1121 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1122 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1123 }
1124
1125}
1126
1127void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1128{
1129 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1130
1131 if (pcr->ops->disable_ocp) {
1132 pcr->ops->disable_ocp(pcr);
1133 } else {
1134 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1135 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1136 OC_POWER_DOWN);
1137 }
1138}
1139
1140void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1141{
1142 if (pcr->ops->init_ocp) {
1143 pcr->ops->init_ocp(pcr);
1144 } else {
1145 struct rtsx_cr_option *option = &(pcr->option);
1146
1147 if (option->ocp_en) {
1148 u8 val = option->sd_800mA_ocp_thd;
1149
1150 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1151 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1152 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1153 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1154 SD_OCP_THD_MASK, val);
1155 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1156 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1157 rtsx_pci_enable_ocp(pcr);
1158 } else {
1159 /* OC power down */
1160 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1161 OC_POWER_DOWN);
1162 }
1163 }
1164}
1165
1166int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1167{
1168 if (pcr->ops->get_ocpstat)
1169 return pcr->ops->get_ocpstat(pcr, val);
1170 else
1171 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1172}
1173
1174void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1175{
1176 if (pcr->ops->clear_ocpstat) {
1177 pcr->ops->clear_ocpstat(pcr);
1178 } else {
1179 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1180 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1181
1182 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1183 udelay(100);
1184 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1185 }
1186}
1187
1188void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1189{
1190 u16 val;
1191
1192 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1193 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1194 val |= 1<<9;
1195 rtsx_pci_write_phy_register(pcr, 0x01, val);
1196 }
1197 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1198 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1199 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1200 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1201
1202}
1203
1204void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1205{
1206 u16 val;
1207
1208 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1209 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1210 val &= ~(1<<9);
1211 rtsx_pci_write_phy_register(pcr, 0x01, val);
1212 }
1213 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1214 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1215
1216}
1217
1218int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1219{
1220 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1221 MS_CLK_EN | SD40_CLK_EN, 0);
1222 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1223 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1224
1225 msleep(50);
1226
1227 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1228
1229 return 0;
1230}
1231
1232int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1233{
1234 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1235 MS_CLK_EN | SD40_CLK_EN, 0);
1236
1237 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1238
1239 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1240 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1241
1242 return 0;
1243}
1244
1245static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1246{
1247 struct pci_dev *pdev = pcr->pci;
1248 int err;
1249
1250 if (PCI_PID(pcr) == PID_5228)
1251 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1252 RTS5228_LDO1_SR_0_5);
1253
1254 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1255
1256 rtsx_pci_enable_bus_int(pcr);
1257
1258 /* Power on SSC */
1259 if (PCI_PID(pcr) == PID_5261) {
1260 /* Gating real mcu clock */
1261 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1262 RTS5261_MCU_CLOCK_GATING, 0);
1263 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1264 SSC_POWER_DOWN, 0);
1265 } else {
1266 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1267 }
1268 if (err < 0)
1269 return err;
1270
1271 /* Wait SSC power stable */
1272 udelay(200);
1273
1274 rtsx_pci_disable_aspm(pcr);
1275 if (pcr->ops->optimize_phy) {
1276 err = pcr->ops->optimize_phy(pcr);
1277 if (err < 0)
1278 return err;
1279 }
1280
1281 rtsx_pci_init_cmd(pcr);
1282
1283 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1285
1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1287 /* Disable card clock */
1288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1289 /* Reset delink mode */
1290 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1291 /* Card driving select */
1292 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1293 0xFF, pcr->card_drive_sel);
1294 /* Enable SSC Clock */
1295 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1296 0xFF, SSC_8X_EN | SSC_SEL_4M);
1297 if (PCI_PID(pcr) == PID_5261)
1298 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1299 RTS5261_SSC_DEPTH_2M);
1300 else if (PCI_PID(pcr) == PID_5228)
1301 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1302 RTS5228_SSC_DEPTH_2M);
1303 else
1304 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1305
1306 /* Disable cd_pwr_save */
1307 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1308 /* Clear Link Ready Interrupt */
1309 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1310 LINK_RDY_INT, LINK_RDY_INT);
1311 /* Enlarge the estimation window of PERST# glitch
1312 * to reduce the chance of invalid card interrupt
1313 */
1314 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1315 /* Update RC oscillator to 400k
1316 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1317 * 1: 2M 0: 400k
1318 */
1319 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1320 /* Set interrupt write clear
1321 * bit 1: U_elbi_if_rd_clr_en
1322 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1323 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1324 */
1325 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1326
1327 err = rtsx_pci_send_cmd(pcr, 100);
1328 if (err < 0)
1329 return err;
1330
1331 switch (PCI_PID(pcr)) {
1332 case PID_5250:
1333 case PID_524A:
1334 case PID_525A:
1335 case PID_5260:
1336 case PID_5261:
1337 case PID_5228:
1338 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1339 break;
1340 default:
1341 break;
1342 }
1343
1344 /*init ocp*/
1345 rtsx_pci_init_ocp(pcr);
1346
1347 /* Enable clk_request_n to enable clock power management */
1348 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
1349 PCI_EXP_LNKCTL_CLKREQ_EN);
1350 /* Enter L1 when host tx idle */
1351 pci_write_config_byte(pdev, 0x70F, 0x5B);
1352
1353 if (pcr->ops->extra_init_hw) {
1354 err = pcr->ops->extra_init_hw(pcr);
1355 if (err < 0)
1356 return err;
1357 }
1358
1359 /* No CD interrupt if probing driver with card inserted.
1360 * So we need to initialize pcr->card_exist here.
1361 */
1362 if (pcr->ops->cd_deglitch)
1363 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1364 else
1365 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1366
1367 return 0;
1368}
1369
1370static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1371{
1372 int err;
1373
1374 spin_lock_init(&pcr->lock);
1375 mutex_init(&pcr->pcr_mutex);
1376
1377 switch (PCI_PID(pcr)) {
1378 default:
1379 case 0x5209:
1380 rts5209_init_params(pcr);
1381 break;
1382
1383 case 0x5229:
1384 rts5229_init_params(pcr);
1385 break;
1386
1387 case 0x5289:
1388 rtl8411_init_params(pcr);
1389 break;
1390
1391 case 0x5227:
1392 rts5227_init_params(pcr);
1393 break;
1394
1395 case 0x522A:
1396 rts522a_init_params(pcr);
1397 break;
1398
1399 case 0x5249:
1400 rts5249_init_params(pcr);
1401 break;
1402
1403 case 0x524A:
1404 rts524a_init_params(pcr);
1405 break;
1406
1407 case 0x525A:
1408 rts525a_init_params(pcr);
1409 break;
1410
1411 case 0x5287:
1412 rtl8411b_init_params(pcr);
1413 break;
1414
1415 case 0x5286:
1416 rtl8402_init_params(pcr);
1417 break;
1418
1419 case 0x5260:
1420 rts5260_init_params(pcr);
1421 break;
1422
1423 case 0x5261:
1424 rts5261_init_params(pcr);
1425 break;
1426
1427 case 0x5228:
1428 rts5228_init_params(pcr);
1429 break;
1430 }
1431
1432 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1433 PCI_PID(pcr), pcr->ic_version);
1434
1435 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1436 GFP_KERNEL);
1437 if (!pcr->slots)
1438 return -ENOMEM;
1439
1440 if (pcr->ops->fetch_vendor_settings)
1441 pcr->ops->fetch_vendor_settings(pcr);
1442
1443 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1444 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1445 pcr->sd30_drive_sel_1v8);
1446 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1447 pcr->sd30_drive_sel_3v3);
1448 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1449 pcr->card_drive_sel);
1450 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1451
1452 pcr->state = PDEV_STAT_IDLE;
1453 err = rtsx_pci_init_hw(pcr);
1454 if (err < 0) {
1455 kfree(pcr->slots);
1456 return err;
1457 }
1458
1459 return 0;
1460}
1461
1462static int rtsx_pci_probe(struct pci_dev *pcidev,
1463 const struct pci_device_id *id)
1464{
1465 struct rtsx_pcr *pcr;
1466 struct pcr_handle *handle;
1467 u32 base, len;
1468 int ret, i, bar = 0;
1469
1470 dev_dbg(&(pcidev->dev),
1471 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1472 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1473 (int)pcidev->revision);
1474
1475 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1476 if (ret < 0)
1477 return ret;
1478
1479 ret = pci_enable_device(pcidev);
1480 if (ret)
1481 return ret;
1482
1483 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1484 if (ret)
1485 goto disable;
1486
1487 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1488 if (!pcr) {
1489 ret = -ENOMEM;
1490 goto release_pci;
1491 }
1492
1493 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1494 if (!handle) {
1495 ret = -ENOMEM;
1496 goto free_pcr;
1497 }
1498 handle->pcr = pcr;
1499
1500 idr_preload(GFP_KERNEL);
1501 spin_lock(&rtsx_pci_lock);
1502 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1503 if (ret >= 0)
1504 pcr->id = ret;
1505 spin_unlock(&rtsx_pci_lock);
1506 idr_preload_end();
1507 if (ret < 0)
1508 goto free_handle;
1509
1510 pcr->pci = pcidev;
1511 dev_set_drvdata(&pcidev->dev, handle);
1512
1513 if (CHK_PCI_PID(pcr, 0x525A))
1514 bar = 1;
1515 len = pci_resource_len(pcidev, bar);
1516 base = pci_resource_start(pcidev, bar);
1517 pcr->remap_addr = ioremap(base, len);
1518 if (!pcr->remap_addr) {
1519 ret = -ENOMEM;
1520 goto free_handle;
1521 }
1522
1523 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1524 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1525 GFP_KERNEL);
1526 if (pcr->rtsx_resv_buf == NULL) {
1527 ret = -ENXIO;
1528 goto unmap;
1529 }
1530 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1531 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1532 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1533 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1534
1535 pcr->card_inserted = 0;
1536 pcr->card_removed = 0;
1537 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1538 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1539
1540 pcr->msi_en = msi_en;
1541 if (pcr->msi_en) {
1542 ret = pci_enable_msi(pcidev);
1543 if (ret)
1544 pcr->msi_en = false;
1545 }
1546
1547 ret = rtsx_pci_acquire_irq(pcr);
1548 if (ret < 0)
1549 goto disable_msi;
1550
1551 pci_set_master(pcidev);
1552 synchronize_irq(pcr->irq);
1553
1554 ret = rtsx_pci_init_chip(pcr);
1555 if (ret < 0)
1556 goto disable_irq;
1557
1558 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1559 rtsx_pcr_cells[i].platform_data = handle;
1560 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1561 }
1562 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1563 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1564 if (ret < 0)
1565 goto disable_irq;
1566
1567 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1568
1569 return 0;
1570
1571disable_irq:
1572 free_irq(pcr->irq, (void *)pcr);
1573disable_msi:
1574 if (pcr->msi_en)
1575 pci_disable_msi(pcr->pci);
1576 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1577 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1578unmap:
1579 iounmap(pcr->remap_addr);
1580free_handle:
1581 kfree(handle);
1582free_pcr:
1583 kfree(pcr);
1584release_pci:
1585 pci_release_regions(pcidev);
1586disable:
1587 pci_disable_device(pcidev);
1588
1589 return ret;
1590}
1591
1592static void rtsx_pci_remove(struct pci_dev *pcidev)
1593{
1594 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1595 struct rtsx_pcr *pcr = handle->pcr;
1596
1597 pcr->remove_pci = true;
1598
1599 /* Disable interrupts at the pcr level */
1600 spin_lock_irq(&pcr->lock);
1601 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1602 pcr->bier = 0;
1603 spin_unlock_irq(&pcr->lock);
1604
1605 cancel_delayed_work_sync(&pcr->carddet_work);
1606 cancel_delayed_work_sync(&pcr->idle_work);
1607
1608 mfd_remove_devices(&pcidev->dev);
1609
1610 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1611 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1612 free_irq(pcr->irq, (void *)pcr);
1613 if (pcr->msi_en)
1614 pci_disable_msi(pcr->pci);
1615 iounmap(pcr->remap_addr);
1616
1617 pci_release_regions(pcidev);
1618 pci_disable_device(pcidev);
1619
1620 spin_lock(&rtsx_pci_lock);
1621 idr_remove(&rtsx_pci_idr, pcr->id);
1622 spin_unlock(&rtsx_pci_lock);
1623
1624 kfree(pcr->slots);
1625 kfree(pcr);
1626 kfree(handle);
1627
1628 dev_dbg(&(pcidev->dev),
1629 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1630 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1631}
1632
1633static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1634{
1635 struct pci_dev *pcidev = to_pci_dev(dev_d);
1636 struct pcr_handle *handle;
1637 struct rtsx_pcr *pcr;
1638
1639 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1640
1641 handle = pci_get_drvdata(pcidev);
1642 pcr = handle->pcr;
1643
1644 cancel_delayed_work(&pcr->carddet_work);
1645 cancel_delayed_work(&pcr->idle_work);
1646
1647 mutex_lock(&pcr->pcr_mutex);
1648
1649 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1650
1651 device_wakeup_disable(dev_d);
1652
1653 mutex_unlock(&pcr->pcr_mutex);
1654 return 0;
1655}
1656
1657static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1658{
1659 struct pci_dev *pcidev = to_pci_dev(dev_d);
1660 struct pcr_handle *handle;
1661 struct rtsx_pcr *pcr;
1662 int ret = 0;
1663
1664 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1665
1666 handle = pci_get_drvdata(pcidev);
1667 pcr = handle->pcr;
1668
1669 mutex_lock(&pcr->pcr_mutex);
1670
1671 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1672 if (ret)
1673 goto out;
1674
1675 ret = rtsx_pci_init_hw(pcr);
1676 if (ret)
1677 goto out;
1678
1679 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1680
1681out:
1682 mutex_unlock(&pcr->pcr_mutex);
1683 return ret;
1684}
1685
1686#ifdef CONFIG_PM
1687
1688static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1689{
1690 struct pcr_handle *handle;
1691 struct rtsx_pcr *pcr;
1692
1693 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1694
1695 handle = pci_get_drvdata(pcidev);
1696 pcr = handle->pcr;
1697 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1698
1699 pci_disable_device(pcidev);
1700 free_irq(pcr->irq, (void *)pcr);
1701 if (pcr->msi_en)
1702 pci_disable_msi(pcr->pci);
1703}
1704
1705#else /* CONFIG_PM */
1706
1707#define rtsx_pci_shutdown NULL
1708
1709#endif /* CONFIG_PM */
1710
1711static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
1712
1713static struct pci_driver rtsx_pci_driver = {
1714 .name = DRV_NAME_RTSX_PCI,
1715 .id_table = rtsx_pci_ids,
1716 .probe = rtsx_pci_probe,
1717 .remove = rtsx_pci_remove,
1718 .driver.pm = &rtsx_pci_pm_ops,
1719 .shutdown = rtsx_pci_shutdown,
1720};
1721module_pci_driver(rtsx_pci_driver);
1722
1723MODULE_LICENSE("GPL");
1724MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1725MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");