Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Driver for Realtek PCI-Express card reader
3 *
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 *
6 * Author:
7 * Wei WANG <wei_wang@realsil.com.cn>
8 */
9
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/dma-mapping.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/idr.h>
18#include <linux/platform_device.h>
19#include <linux/mfd/core.h>
20#include <linux/rtsx_pci.h>
21#include <linux/mmc/card.h>
22#include <linux/unaligned.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25
26#include "rtsx_pcr.h"
27#include "rts5261.h"
28#include "rts5228.h"
29#include "rts5264.h"
30
31static bool msi_en = true;
32module_param(msi_en, bool, S_IRUGO | S_IWUSR);
33MODULE_PARM_DESC(msi_en, "Enable MSI");
34
35static DEFINE_IDR(rtsx_pci_idr);
36static DEFINE_SPINLOCK(rtsx_pci_lock);
37
38static struct mfd_cell rtsx_pcr_cells[] = {
39 [RTSX_SD_CARD] = {
40 .name = DRV_NAME_RTSX_PCI_SDMMC,
41 },
42};
43
44static const struct pci_device_id rtsx_pci_ids[] = {
45 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 { 0, }
60};
61
62MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
63
64static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
65{
66 rtsx_pci_write_register(pcr, MSGTXDATA0,
67 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
68 rtsx_pci_write_register(pcr, MSGTXDATA1,
69 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
70 rtsx_pci_write_register(pcr, MSGTXDATA2,
71 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
72 rtsx_pci_write_register(pcr, MSGTXDATA3,
73 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
74 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
75 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
76
77 return 0;
78}
79
80int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
81{
82 return rtsx_comm_set_ltr_latency(pcr, latency);
83}
84
85static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
86{
87 if (pcr->aspm_enabled == enable)
88 return;
89
90 if (pcr->aspm_mode == ASPM_MODE_CFG) {
91 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
92 PCI_EXP_LNKCTL_ASPMC,
93 enable ? pcr->aspm_en : 0);
94 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
95 if (pcr->aspm_en & 0x02)
96 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
97 FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
98 else
99 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
100 FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
101 }
102
103 if (!enable && (pcr->aspm_en & 0x02))
104 mdelay(10);
105
106 pcr->aspm_enabled = enable;
107}
108
109static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
110{
111 if (pcr->ops->set_aspm)
112 pcr->ops->set_aspm(pcr, false);
113 else
114 rtsx_comm_set_aspm(pcr, false);
115}
116
117int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
118{
119 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
120
121 return 0;
122}
123
124static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
125{
126 if (pcr->ops->set_l1off_cfg_sub_d0)
127 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
128}
129
130static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
131{
132 struct rtsx_cr_option *option = &pcr->option;
133
134 rtsx_disable_aspm(pcr);
135
136 /* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
137 msleep(1);
138
139 if (option->ltr_enabled)
140 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
141
142 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
143 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
144}
145
146static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
147{
148 rtsx_comm_pm_full_on(pcr);
149}
150
151void rtsx_pci_start_run(struct rtsx_pcr *pcr)
152{
153 /* If pci device removed, don't queue idle work any more */
154 if (pcr->remove_pci)
155 return;
156
157 if (pcr->state != PDEV_STAT_RUN) {
158 pcr->state = PDEV_STAT_RUN;
159 if (pcr->ops->enable_auto_blink)
160 pcr->ops->enable_auto_blink(pcr);
161 rtsx_pm_full_on(pcr);
162 }
163}
164EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
165
166int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
167{
168 int i;
169 u32 val = HAIMR_WRITE_START;
170
171 val |= (u32)(addr & 0x3FFF) << 16;
172 val |= (u32)mask << 8;
173 val |= (u32)data;
174
175 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
176
177 for (i = 0; i < MAX_RW_REG_CNT; i++) {
178 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
179 if ((val & HAIMR_TRANS_END) == 0) {
180 if (data != (u8)val)
181 return -EIO;
182 return 0;
183 }
184 }
185
186 return -ETIMEDOUT;
187}
188EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
189
190int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
191{
192 u32 val = HAIMR_READ_START;
193 int i;
194
195 val |= (u32)(addr & 0x3FFF) << 16;
196 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
197
198 for (i = 0; i < MAX_RW_REG_CNT; i++) {
199 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
200 if ((val & HAIMR_TRANS_END) == 0)
201 break;
202 }
203
204 if (i >= MAX_RW_REG_CNT)
205 return -ETIMEDOUT;
206
207 if (data)
208 *data = (u8)(val & 0xFF);
209
210 return 0;
211}
212EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
213
214int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
215{
216 int err, i, finished = 0;
217 u8 tmp;
218
219 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
220 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
221 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
222 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
223
224 for (i = 0; i < 100000; i++) {
225 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
226 if (err < 0)
227 return err;
228
229 if (!(tmp & 0x80)) {
230 finished = 1;
231 break;
232 }
233 }
234
235 if (!finished)
236 return -ETIMEDOUT;
237
238 return 0;
239}
240
241int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
242{
243 if (pcr->ops->write_phy)
244 return pcr->ops->write_phy(pcr, addr, val);
245
246 return __rtsx_pci_write_phy_register(pcr, addr, val);
247}
248EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
249
250int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
251{
252 int err, i, finished = 0;
253 u16 data;
254 u8 tmp, val1, val2;
255
256 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
257 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
258
259 for (i = 0; i < 100000; i++) {
260 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
261 if (err < 0)
262 return err;
263
264 if (!(tmp & 0x80)) {
265 finished = 1;
266 break;
267 }
268 }
269
270 if (!finished)
271 return -ETIMEDOUT;
272
273 rtsx_pci_read_register(pcr, PHYDATA0, &val1);
274 rtsx_pci_read_register(pcr, PHYDATA1, &val2);
275 data = val1 | (val2 << 8);
276
277 if (val)
278 *val = data;
279
280 return 0;
281}
282
283int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
284{
285 if (pcr->ops->read_phy)
286 return pcr->ops->read_phy(pcr, addr, val);
287
288 return __rtsx_pci_read_phy_register(pcr, addr, val);
289}
290EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
291
292void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
293{
294 if (pcr->ops->stop_cmd)
295 return pcr->ops->stop_cmd(pcr);
296
297 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
298 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
299
300 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
301 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
302}
303EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
304
305void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
306 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
307{
308 unsigned long flags;
309 u32 val = 0;
310 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
311
312 val |= (u32)(cmd_type & 0x03) << 30;
313 val |= (u32)(reg_addr & 0x3FFF) << 16;
314 val |= (u32)mask << 8;
315 val |= (u32)data;
316
317 spin_lock_irqsave(&pcr->lock, flags);
318 ptr += pcr->ci;
319 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
320 put_unaligned_le32(val, ptr);
321 ptr++;
322 pcr->ci++;
323 }
324 spin_unlock_irqrestore(&pcr->lock, flags);
325}
326EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
327
328void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
329{
330 u32 val = 1 << 31;
331
332 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
333
334 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
335 /* Hardware Auto Response */
336 val |= 0x40000000;
337 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
338}
339EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
340
341int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
342{
343 struct completion trans_done;
344 u32 val = 1 << 31;
345 long timeleft;
346 unsigned long flags;
347 int err = 0;
348
349 spin_lock_irqsave(&pcr->lock, flags);
350
351 /* set up data structures for the wakeup system */
352 pcr->done = &trans_done;
353 pcr->trans_result = TRANS_NOT_READY;
354 init_completion(&trans_done);
355
356 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
357
358 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
359 /* Hardware Auto Response */
360 val |= 0x40000000;
361 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
362
363 spin_unlock_irqrestore(&pcr->lock, flags);
364
365 /* Wait for TRANS_OK_INT */
366 timeleft = wait_for_completion_interruptible_timeout(
367 &trans_done, msecs_to_jiffies(timeout));
368 if (timeleft <= 0) {
369 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
370 err = -ETIMEDOUT;
371 goto finish_send_cmd;
372 }
373
374 spin_lock_irqsave(&pcr->lock, flags);
375 if (pcr->trans_result == TRANS_RESULT_FAIL)
376 err = -EINVAL;
377 else if (pcr->trans_result == TRANS_RESULT_OK)
378 err = 0;
379 else if (pcr->trans_result == TRANS_NO_DEVICE)
380 err = -ENODEV;
381 spin_unlock_irqrestore(&pcr->lock, flags);
382
383finish_send_cmd:
384 spin_lock_irqsave(&pcr->lock, flags);
385 pcr->done = NULL;
386 spin_unlock_irqrestore(&pcr->lock, flags);
387
388 if ((err < 0) && (err != -ENODEV))
389 rtsx_pci_stop_cmd(pcr);
390
391 if (pcr->finish_me)
392 complete(pcr->finish_me);
393
394 return err;
395}
396EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
397
398static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
399 dma_addr_t addr, unsigned int len, int end)
400{
401 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
402 u64 val;
403 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
404
405 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
406
407 if (end)
408 option |= RTSX_SG_END;
409
410 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
411 if (len > 0xFFFF)
412 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
413 | (((u64)len >> 16) << 6) | option;
414 else
415 val = ((u64)addr << 32) | ((u64)len << 16) | option;
416 } else {
417 val = ((u64)addr << 32) | ((u64)len << 12) | option;
418 }
419 put_unaligned_le64(val, ptr);
420 pcr->sgi++;
421}
422
423int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
424 int num_sg, bool read, int timeout)
425{
426 int err = 0, count;
427
428 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
429 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
430 if (count < 1)
431 return -EINVAL;
432 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
433
434 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
435
436 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
437
438 return err;
439}
440EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
441
442int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
443 int num_sg, bool read)
444{
445 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
446
447 if (pcr->remove_pci)
448 return -EINVAL;
449
450 if ((sglist == NULL) || (num_sg <= 0))
451 return -EINVAL;
452
453 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
454}
455EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
456
457void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
458 int num_sg, bool read)
459{
460 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
461
462 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
463}
464EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
465
466int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
467 int count, bool read, int timeout)
468{
469 struct completion trans_done;
470 struct scatterlist *sg;
471 dma_addr_t addr;
472 long timeleft;
473 unsigned long flags;
474 unsigned int len;
475 int i, err = 0;
476 u32 val;
477 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
478
479 if (pcr->remove_pci)
480 return -ENODEV;
481
482 if ((sglist == NULL) || (count < 1))
483 return -EINVAL;
484
485 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
486 pcr->sgi = 0;
487 for_each_sg(sglist, sg, count, i) {
488 addr = sg_dma_address(sg);
489 len = sg_dma_len(sg);
490 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
491 }
492
493 spin_lock_irqsave(&pcr->lock, flags);
494
495 pcr->done = &trans_done;
496 pcr->trans_result = TRANS_NOT_READY;
497 init_completion(&trans_done);
498 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
499 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
500
501 spin_unlock_irqrestore(&pcr->lock, flags);
502
503 timeleft = wait_for_completion_interruptible_timeout(
504 &trans_done, msecs_to_jiffies(timeout));
505 if (timeleft <= 0) {
506 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
507 err = -ETIMEDOUT;
508 goto out;
509 }
510
511 spin_lock_irqsave(&pcr->lock, flags);
512 if (pcr->trans_result == TRANS_RESULT_FAIL) {
513 err = -EILSEQ;
514 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
515 pcr->dma_error_count++;
516 }
517
518 else if (pcr->trans_result == TRANS_NO_DEVICE)
519 err = -ENODEV;
520 spin_unlock_irqrestore(&pcr->lock, flags);
521
522out:
523 spin_lock_irqsave(&pcr->lock, flags);
524 pcr->done = NULL;
525 spin_unlock_irqrestore(&pcr->lock, flags);
526
527 if ((err < 0) && (err != -ENODEV))
528 rtsx_pci_stop_cmd(pcr);
529
530 if (pcr->finish_me)
531 complete(pcr->finish_me);
532
533 return err;
534}
535EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
536
537int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
538{
539 int err;
540 int i, j;
541 u16 reg;
542 u8 *ptr;
543
544 if (buf_len > 512)
545 buf_len = 512;
546
547 ptr = buf;
548 reg = PPBUF_BASE2;
549 for (i = 0; i < buf_len / 256; i++) {
550 rtsx_pci_init_cmd(pcr);
551
552 for (j = 0; j < 256; j++)
553 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
554
555 err = rtsx_pci_send_cmd(pcr, 250);
556 if (err < 0)
557 return err;
558
559 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
560 ptr += 256;
561 }
562
563 if (buf_len % 256) {
564 rtsx_pci_init_cmd(pcr);
565
566 for (j = 0; j < buf_len % 256; j++)
567 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
568
569 err = rtsx_pci_send_cmd(pcr, 250);
570 if (err < 0)
571 return err;
572 }
573
574 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
575
576 return 0;
577}
578EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
579
580int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
581{
582 int err;
583 int i, j;
584 u16 reg;
585 u8 *ptr;
586
587 if (buf_len > 512)
588 buf_len = 512;
589
590 ptr = buf;
591 reg = PPBUF_BASE2;
592 for (i = 0; i < buf_len / 256; i++) {
593 rtsx_pci_init_cmd(pcr);
594
595 for (j = 0; j < 256; j++) {
596 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
597 reg++, 0xFF, *ptr);
598 ptr++;
599 }
600
601 err = rtsx_pci_send_cmd(pcr, 250);
602 if (err < 0)
603 return err;
604 }
605
606 if (buf_len % 256) {
607 rtsx_pci_init_cmd(pcr);
608
609 for (j = 0; j < buf_len % 256; j++) {
610 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
611 reg++, 0xFF, *ptr);
612 ptr++;
613 }
614
615 err = rtsx_pci_send_cmd(pcr, 250);
616 if (err < 0)
617 return err;
618 }
619
620 return 0;
621}
622EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
623
624static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
625{
626 rtsx_pci_init_cmd(pcr);
627
628 while (*tbl & 0xFFFF0000) {
629 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
630 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
631 tbl++;
632 }
633
634 return rtsx_pci_send_cmd(pcr, 100);
635}
636
637int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
638{
639 const u32 *tbl;
640
641 if (card == RTSX_SD_CARD)
642 tbl = pcr->sd_pull_ctl_enable_tbl;
643 else if (card == RTSX_MS_CARD)
644 tbl = pcr->ms_pull_ctl_enable_tbl;
645 else
646 return -EINVAL;
647
648 return rtsx_pci_set_pull_ctl(pcr, tbl);
649}
650EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
651
652int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
653{
654 const u32 *tbl;
655
656 if (card == RTSX_SD_CARD)
657 tbl = pcr->sd_pull_ctl_disable_tbl;
658 else if (card == RTSX_MS_CARD)
659 tbl = pcr->ms_pull_ctl_disable_tbl;
660 else
661 return -EINVAL;
662
663 return rtsx_pci_set_pull_ctl(pcr, tbl);
664}
665EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
666
667static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
668{
669 struct rtsx_hw_param *hw_param = &pcr->hw_param;
670
671 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
672 | hw_param->interrupt_en;
673
674 if (pcr->num_slots > 1)
675 pcr->bier |= MS_INT_EN;
676
677 /* Enable Bus Interrupt */
678 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
679
680 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
681}
682
683static inline u8 double_ssc_depth(u8 depth)
684{
685 return ((depth > 1) ? (depth - 1) : depth);
686}
687
688static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
689{
690 if (div > CLK_DIV_1) {
691 if (ssc_depth > (div - 1))
692 ssc_depth -= (div - 1);
693 else
694 ssc_depth = SSC_DEPTH_4M;
695 }
696
697 return ssc_depth;
698}
699
700int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
701 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
702{
703 int err, clk;
704 u8 n, clk_divider, mcu_cnt, div;
705 static const u8 depth[] = {
706 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
707 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
708 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
709 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
710 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
711 };
712
713 if (PCI_PID(pcr) == PID_5261)
714 return rts5261_pci_switch_clock(pcr, card_clock,
715 ssc_depth, initial_mode, double_clk, vpclk);
716 if (PCI_PID(pcr) == PID_5228)
717 return rts5228_pci_switch_clock(pcr, card_clock,
718 ssc_depth, initial_mode, double_clk, vpclk);
719 if (PCI_PID(pcr) == PID_5264)
720 return rts5264_pci_switch_clock(pcr, card_clock,
721 ssc_depth, initial_mode, double_clk, vpclk);
722
723 if (initial_mode) {
724 /* We use 250k(around) here, in initial stage */
725 clk_divider = SD_CLK_DIVIDE_128;
726 card_clock = 30000000;
727 } else {
728 clk_divider = SD_CLK_DIVIDE_0;
729 }
730 err = rtsx_pci_write_register(pcr, SD_CFG1,
731 SD_CLK_DIVIDE_MASK, clk_divider);
732 if (err < 0)
733 return err;
734
735 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
736 if (card_clock == UHS_SDR104_MAX_DTR &&
737 pcr->dma_error_count &&
738 PCI_PID(pcr) == RTS5227_DEVICE_ID)
739 card_clock = UHS_SDR104_MAX_DTR -
740 (pcr->dma_error_count * 20000000);
741
742 card_clock /= 1000000;
743 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
744
745 clk = card_clock;
746 if (!initial_mode && double_clk)
747 clk = card_clock * 2;
748 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
749 clk, pcr->cur_clock);
750
751 if (clk == pcr->cur_clock)
752 return 0;
753
754 if (pcr->ops->conv_clk_and_div_n)
755 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
756 else
757 n = (u8)(clk - 2);
758 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
759 return -EINVAL;
760
761 mcu_cnt = (u8)(125/clk + 3);
762 if (mcu_cnt > 15)
763 mcu_cnt = 15;
764
765 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
766 div = CLK_DIV_1;
767 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
768 if (pcr->ops->conv_clk_and_div_n) {
769 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
770 DIV_N_TO_CLK) * 2;
771 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
772 CLK_TO_DIV_N);
773 } else {
774 n = (n + 2) * 2 - 2;
775 }
776 div++;
777 }
778 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
779
780 ssc_depth = depth[ssc_depth];
781 if (double_clk)
782 ssc_depth = double_ssc_depth(ssc_depth);
783
784 ssc_depth = revise_ssc_depth(ssc_depth, div);
785 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
786
787 rtsx_pci_init_cmd(pcr);
788 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
789 CLK_LOW_FREQ, CLK_LOW_FREQ);
790 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
791 0xFF, (div << 4) | mcu_cnt);
792 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
793 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
794 SSC_DEPTH_MASK, ssc_depth);
795 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
796 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
797 if (vpclk) {
798 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
799 PHASE_NOT_RESET, 0);
800 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
801 PHASE_NOT_RESET, PHASE_NOT_RESET);
802 }
803
804 err = rtsx_pci_send_cmd(pcr, 2000);
805 if (err < 0)
806 return err;
807
808 /* Wait SSC clock stable */
809 udelay(SSC_CLOCK_STABLE_WAIT);
810 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
811 if (err < 0)
812 return err;
813
814 pcr->cur_clock = clk;
815 return 0;
816}
817EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
818
819int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
820{
821 if (pcr->ops->card_power_on)
822 return pcr->ops->card_power_on(pcr, card);
823
824 return 0;
825}
826EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
827
828int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
829{
830 if (pcr->ops->card_power_off)
831 return pcr->ops->card_power_off(pcr, card);
832
833 return 0;
834}
835EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
836
837int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
838{
839 static const unsigned int cd_mask[] = {
840 [RTSX_SD_CARD] = SD_EXIST,
841 [RTSX_MS_CARD] = MS_EXIST
842 };
843
844 if (!(pcr->flags & PCR_MS_PMOS)) {
845 /* When using single PMOS, accessing card is not permitted
846 * if the existing card is not the designated one.
847 */
848 if (pcr->card_exist & (~cd_mask[card]))
849 return -EIO;
850 }
851
852 return 0;
853}
854EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
855
856int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
857{
858 if (pcr->ops->switch_output_voltage)
859 return pcr->ops->switch_output_voltage(pcr, voltage);
860
861 return 0;
862}
863EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
864
865unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
866{
867 unsigned int val;
868
869 val = rtsx_pci_readl(pcr, RTSX_BIPR);
870 if (pcr->ops->cd_deglitch)
871 val = pcr->ops->cd_deglitch(pcr);
872
873 return val;
874}
875EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
876
877void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
878{
879 struct completion finish;
880
881 pcr->finish_me = &finish;
882 init_completion(&finish);
883
884 if (pcr->done)
885 complete(pcr->done);
886
887 if (!pcr->remove_pci)
888 rtsx_pci_stop_cmd(pcr);
889
890 wait_for_completion_interruptible_timeout(&finish,
891 msecs_to_jiffies(2));
892 pcr->finish_me = NULL;
893}
894EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
895
896static void rtsx_pci_card_detect(struct work_struct *work)
897{
898 struct delayed_work *dwork;
899 struct rtsx_pcr *pcr;
900 unsigned long flags;
901 unsigned int card_detect = 0, card_inserted, card_removed;
902 u32 irq_status;
903
904 dwork = to_delayed_work(work);
905 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
906
907 pcr_dbg(pcr, "--> %s\n", __func__);
908
909 mutex_lock(&pcr->pcr_mutex);
910 spin_lock_irqsave(&pcr->lock, flags);
911
912 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
913 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
914
915 irq_status &= CARD_EXIST;
916 card_inserted = pcr->card_inserted & irq_status;
917 card_removed = pcr->card_removed;
918 pcr->card_inserted = 0;
919 pcr->card_removed = 0;
920
921 spin_unlock_irqrestore(&pcr->lock, flags);
922
923 if (card_inserted || card_removed) {
924 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
925 card_inserted, card_removed);
926
927 if (pcr->ops->cd_deglitch)
928 card_inserted = pcr->ops->cd_deglitch(pcr);
929
930 card_detect = card_inserted | card_removed;
931
932 pcr->card_exist |= card_inserted;
933 pcr->card_exist &= ~card_removed;
934 }
935
936 mutex_unlock(&pcr->pcr_mutex);
937
938 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
939 pcr->slots[RTSX_SD_CARD].card_event(
940 pcr->slots[RTSX_SD_CARD].p_dev);
941 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
942 pcr->slots[RTSX_MS_CARD].card_event(
943 pcr->slots[RTSX_MS_CARD].p_dev);
944}
945
946static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
947{
948 if (pcr->ops->process_ocp) {
949 pcr->ops->process_ocp(pcr);
950 } else {
951 if (!pcr->option.ocp_en)
952 return;
953 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
954 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
955 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
956 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
957 rtsx_pci_clear_ocpstat(pcr);
958 pcr->ocp_stat = 0;
959 }
960 }
961}
962
963static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
964{
965 if (pcr->option.ocp_en)
966 rtsx_pci_process_ocp(pcr);
967
968 return 0;
969}
970
971static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
972{
973 struct rtsx_pcr *pcr = dev_id;
974 u32 int_reg;
975
976 if (!pcr)
977 return IRQ_NONE;
978
979 spin_lock(&pcr->lock);
980
981 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
982 /* Clear interrupt flag */
983 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
984 if ((int_reg & pcr->bier) == 0) {
985 spin_unlock(&pcr->lock);
986 return IRQ_NONE;
987 }
988 if (int_reg == 0xFFFFFFFF) {
989 spin_unlock(&pcr->lock);
990 return IRQ_HANDLED;
991 }
992
993 int_reg &= (pcr->bier | 0x7FFFFF);
994
995 if ((int_reg & SD_OC_INT) ||
996 ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
997 rtsx_pci_process_ocp_interrupt(pcr);
998
999 if (int_reg & SD_INT) {
1000 if (int_reg & SD_EXIST) {
1001 pcr->card_inserted |= SD_EXIST;
1002 } else {
1003 pcr->card_removed |= SD_EXIST;
1004 pcr->card_inserted &= ~SD_EXIST;
1005 }
1006
1007 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1008 rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1009 RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1010 pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1011 }
1012
1013 pcr->dma_error_count = 0;
1014 }
1015
1016 if (int_reg & MS_INT) {
1017 if (int_reg & MS_EXIST) {
1018 pcr->card_inserted |= MS_EXIST;
1019 } else {
1020 pcr->card_removed |= MS_EXIST;
1021 pcr->card_inserted &= ~MS_EXIST;
1022 }
1023 }
1024
1025 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1026 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1027 pcr->trans_result = TRANS_RESULT_FAIL;
1028 if (pcr->done)
1029 complete(pcr->done);
1030 } else if (int_reg & TRANS_OK_INT) {
1031 pcr->trans_result = TRANS_RESULT_OK;
1032 if (pcr->done)
1033 complete(pcr->done);
1034 }
1035 }
1036
1037 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1038 schedule_delayed_work(&pcr->carddet_work,
1039 msecs_to_jiffies(200));
1040
1041 spin_unlock(&pcr->lock);
1042 return IRQ_HANDLED;
1043}
1044
1045static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1046{
1047 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1048 __func__, pcr->msi_en, pcr->pci->irq);
1049
1050 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1051 pcr->msi_en ? 0 : IRQF_SHARED,
1052 DRV_NAME_RTSX_PCI, pcr)) {
1053 dev_err(&(pcr->pci->dev),
1054 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1055 pcr->pci->irq);
1056 return -1;
1057 }
1058
1059 pcr->irq = pcr->pci->irq;
1060 pci_intx(pcr->pci, !pcr->msi_en);
1061
1062 return 0;
1063}
1064
1065static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1066{
1067 /* Set relink_time to 0 */
1068 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1069 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1070 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1071 RELINK_TIME_MASK, 0);
1072
1073 rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1074 D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1075
1076 rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1077}
1078
1079static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1080{
1081 if (pcr->ops->turn_off_led)
1082 pcr->ops->turn_off_led(pcr);
1083
1084 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1085 pcr->bier = 0;
1086
1087 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1088 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1089
1090 if (pcr->ops->force_power_down)
1091 pcr->ops->force_power_down(pcr, pm_state, runtime);
1092 else
1093 rtsx_base_force_power_down(pcr);
1094}
1095
1096void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1097{
1098 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1099
1100 if (pcr->ops->enable_ocp) {
1101 pcr->ops->enable_ocp(pcr);
1102 } else {
1103 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1104 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1105 }
1106
1107}
1108
1109void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1110{
1111 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1112
1113 if (pcr->ops->disable_ocp) {
1114 pcr->ops->disable_ocp(pcr);
1115 } else {
1116 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1117 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1118 OC_POWER_DOWN);
1119 }
1120}
1121
1122void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1123{
1124 if (pcr->ops->init_ocp) {
1125 pcr->ops->init_ocp(pcr);
1126 } else {
1127 struct rtsx_cr_option *option = &(pcr->option);
1128
1129 if (option->ocp_en) {
1130 u8 val = option->sd_800mA_ocp_thd;
1131
1132 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1133 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1134 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1135 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1136 SD_OCP_THD_MASK, val);
1137 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1138 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1139 rtsx_pci_enable_ocp(pcr);
1140 }
1141 }
1142}
1143
1144int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1145{
1146 if (pcr->ops->get_ocpstat)
1147 return pcr->ops->get_ocpstat(pcr, val);
1148 else
1149 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1150}
1151
1152void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1153{
1154 if (pcr->ops->clear_ocpstat) {
1155 pcr->ops->clear_ocpstat(pcr);
1156 } else {
1157 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1158 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1159
1160 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1161 udelay(100);
1162 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1163 }
1164}
1165
1166void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1167{
1168 u16 val;
1169
1170 if ((PCI_PID(pcr) != PID_525A) &&
1171 (PCI_PID(pcr) != PID_5260) &&
1172 (PCI_PID(pcr) != PID_5264)) {
1173 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1174 val |= 1<<9;
1175 rtsx_pci_write_phy_register(pcr, 0x01, val);
1176 }
1177 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1178 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1179 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1180 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1181
1182}
1183
1184void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1185{
1186 u16 val;
1187
1188 if ((PCI_PID(pcr) != PID_525A) &&
1189 (PCI_PID(pcr) != PID_5260) &&
1190 (PCI_PID(pcr) != PID_5264)) {
1191 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1192 val &= ~(1<<9);
1193 rtsx_pci_write_phy_register(pcr, 0x01, val);
1194 }
1195 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1196 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1197
1198}
1199
1200int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1201{
1202 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1203 MS_CLK_EN | SD40_CLK_EN, 0);
1204 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1205 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1206
1207 msleep(50);
1208
1209 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1210
1211 return 0;
1212}
1213
1214int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1215{
1216 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1217 MS_CLK_EN | SD40_CLK_EN, 0);
1218
1219 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1220
1221 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1222 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1223
1224 return 0;
1225}
1226
1227static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1228{
1229 struct pci_dev *pdev = pcr->pci;
1230 int err;
1231
1232 if (PCI_PID(pcr) == PID_5228)
1233 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1234 RTS5228_LDO1_SR_0_5);
1235
1236 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1237
1238 rtsx_pci_enable_bus_int(pcr);
1239
1240 /* Power on SSC */
1241 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1242 /* Gating real mcu clock */
1243 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1244 RTS5261_MCU_CLOCK_GATING, 0);
1245 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1246 SSC_POWER_DOWN, 0);
1247 } else {
1248 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1249 }
1250 if (err < 0)
1251 return err;
1252
1253 /* Wait SSC power stable */
1254 udelay(200);
1255
1256 rtsx_disable_aspm(pcr);
1257 if (pcr->ops->optimize_phy) {
1258 err = pcr->ops->optimize_phy(pcr);
1259 if (err < 0)
1260 return err;
1261 }
1262
1263 rtsx_pci_init_cmd(pcr);
1264
1265 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1266 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1267
1268 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1269 /* Disable card clock */
1270 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1271 /* Reset delink mode */
1272 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1273 /* Card driving select */
1274 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1275 0xFF, pcr->card_drive_sel);
1276 /* Enable SSC Clock */
1277 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1278 0xFF, SSC_8X_EN | SSC_SEL_4M);
1279 if (PCI_PID(pcr) == PID_5261)
1280 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1281 RTS5261_SSC_DEPTH_2M);
1282 else if (PCI_PID(pcr) == PID_5228)
1283 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1284 RTS5228_SSC_DEPTH_2M);
1285 else if (is_version(pcr, 0x5264, IC_VER_A))
1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
1287 else if (PCI_PID(pcr) == PID_5264)
1288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1289 RTS5264_SSC_DEPTH_2M);
1290 else
1291 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1292
1293 /* Disable cd_pwr_save */
1294 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1295 /* Clear Link Ready Interrupt */
1296 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1297 LINK_RDY_INT, LINK_RDY_INT);
1298 /* Enlarge the estimation window of PERST# glitch
1299 * to reduce the chance of invalid card interrupt
1300 */
1301 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1302 /* Update RC oscillator to 400k
1303 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1304 * 1: 2M 0: 400k
1305 */
1306 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1307 /* Set interrupt write clear
1308 * bit 1: U_elbi_if_rd_clr_en
1309 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1310 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1311 */
1312 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1313
1314 err = rtsx_pci_send_cmd(pcr, 100);
1315 if (err < 0)
1316 return err;
1317
1318 switch (PCI_PID(pcr)) {
1319 case PID_5250:
1320 case PID_524A:
1321 case PID_525A:
1322 case PID_5260:
1323 case PID_5261:
1324 case PID_5228:
1325 case PID_5264:
1326 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1327 break;
1328 default:
1329 break;
1330 }
1331
1332 /*init ocp*/
1333 rtsx_pci_init_ocp(pcr);
1334
1335 /* Enable clk_request_n to enable clock power management */
1336 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1337 0, PCI_EXP_LNKCTL_CLKREQ_EN);
1338 /* Enter L1 when host tx idle */
1339 pci_write_config_byte(pdev, 0x70F, 0x5B);
1340
1341 if (pcr->ops->extra_init_hw) {
1342 err = pcr->ops->extra_init_hw(pcr);
1343 if (err < 0)
1344 return err;
1345 }
1346
1347 if (pcr->aspm_mode == ASPM_MODE_REG)
1348 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1349
1350 /* No CD interrupt if probing driver with card inserted.
1351 * So we need to initialize pcr->card_exist here.
1352 */
1353 if (pcr->ops->cd_deglitch)
1354 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1355 else
1356 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1357
1358 return 0;
1359}
1360
1361static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1362{
1363 struct rtsx_cr_option *option = &(pcr->option);
1364 int err, l1ss;
1365 u32 lval;
1366 u16 cfg_val;
1367 u8 val;
1368
1369 spin_lock_init(&pcr->lock);
1370 mutex_init(&pcr->pcr_mutex);
1371
1372 switch (PCI_PID(pcr)) {
1373 default:
1374 case 0x5209:
1375 rts5209_init_params(pcr);
1376 break;
1377
1378 case 0x5229:
1379 rts5229_init_params(pcr);
1380 break;
1381
1382 case 0x5289:
1383 rtl8411_init_params(pcr);
1384 break;
1385
1386 case 0x5227:
1387 rts5227_init_params(pcr);
1388 break;
1389
1390 case 0x522A:
1391 rts522a_init_params(pcr);
1392 break;
1393
1394 case 0x5249:
1395 rts5249_init_params(pcr);
1396 break;
1397
1398 case 0x524A:
1399 rts524a_init_params(pcr);
1400 break;
1401
1402 case 0x525A:
1403 rts525a_init_params(pcr);
1404 break;
1405
1406 case 0x5287:
1407 rtl8411b_init_params(pcr);
1408 break;
1409
1410 case 0x5286:
1411 rtl8402_init_params(pcr);
1412 break;
1413
1414 case 0x5260:
1415 rts5260_init_params(pcr);
1416 break;
1417
1418 case 0x5261:
1419 rts5261_init_params(pcr);
1420 break;
1421
1422 case 0x5228:
1423 rts5228_init_params(pcr);
1424 break;
1425
1426 case 0x5264:
1427 rts5264_init_params(pcr);
1428 break;
1429 }
1430
1431 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1432 PCI_PID(pcr), pcr->ic_version);
1433
1434 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1435 GFP_KERNEL);
1436 if (!pcr->slots)
1437 return -ENOMEM;
1438
1439 if (pcr->aspm_mode == ASPM_MODE_CFG) {
1440 pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1441 if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1442 pcr->aspm_enabled = true;
1443 else
1444 pcr->aspm_enabled = false;
1445
1446 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
1447 rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1448 if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1449 pcr->aspm_enabled = false;
1450 else
1451 pcr->aspm_enabled = true;
1452 }
1453
1454 l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
1455 if (l1ss) {
1456 pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
1457
1458 if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
1459 rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
1460 else
1461 rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
1462
1463 if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
1464 rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
1465 else
1466 rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
1467
1468 if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
1469 rtsx_set_dev_flag(pcr, PM_L1_1_EN);
1470 else
1471 rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
1472
1473 if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
1474 rtsx_set_dev_flag(pcr, PM_L1_2_EN);
1475 else
1476 rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
1477
1478 pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
1479 if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
1480 option->ltr_enabled = true;
1481 option->ltr_active = true;
1482 } else {
1483 option->ltr_enabled = false;
1484 }
1485
1486 if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
1487 | PM_L1_1_EN | PM_L1_2_EN))
1488 option->force_clkreq_0 = false;
1489 else
1490 option->force_clkreq_0 = true;
1491 } else {
1492 option->ltr_enabled = false;
1493 option->force_clkreq_0 = true;
1494 }
1495
1496 if (pcr->ops->fetch_vendor_settings)
1497 pcr->ops->fetch_vendor_settings(pcr);
1498
1499 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1500 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1501 pcr->sd30_drive_sel_1v8);
1502 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1503 pcr->sd30_drive_sel_3v3);
1504 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1505 pcr->card_drive_sel);
1506 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1507
1508 pcr->state = PDEV_STAT_IDLE;
1509 err = rtsx_pci_init_hw(pcr);
1510 if (err < 0) {
1511 kfree(pcr->slots);
1512 return err;
1513 }
1514
1515 return 0;
1516}
1517
1518static int rtsx_pci_probe(struct pci_dev *pcidev,
1519 const struct pci_device_id *id)
1520{
1521 struct rtsx_pcr *pcr;
1522 struct pcr_handle *handle;
1523 u32 base, len;
1524 int ret, i, bar = 0;
1525
1526 dev_dbg(&(pcidev->dev),
1527 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1528 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1529 (int)pcidev->revision);
1530
1531 ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1532 if (ret < 0)
1533 return ret;
1534
1535 ret = pci_enable_device(pcidev);
1536 if (ret)
1537 return ret;
1538
1539 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1540 if (ret)
1541 goto disable;
1542
1543 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1544 if (!pcr) {
1545 ret = -ENOMEM;
1546 goto release_pci;
1547 }
1548
1549 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1550 if (!handle) {
1551 ret = -ENOMEM;
1552 goto free_pcr;
1553 }
1554 handle->pcr = pcr;
1555
1556 idr_preload(GFP_KERNEL);
1557 spin_lock(&rtsx_pci_lock);
1558 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1559 if (ret >= 0)
1560 pcr->id = ret;
1561 spin_unlock(&rtsx_pci_lock);
1562 idr_preload_end();
1563 if (ret < 0)
1564 goto free_handle;
1565
1566 pcr->pci = pcidev;
1567 dev_set_drvdata(&pcidev->dev, handle);
1568
1569 if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
1570 bar = 1;
1571 len = pci_resource_len(pcidev, bar);
1572 base = pci_resource_start(pcidev, bar);
1573 pcr->remap_addr = ioremap(base, len);
1574 if (!pcr->remap_addr) {
1575 ret = -ENOMEM;
1576 goto free_idr;
1577 }
1578
1579 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1580 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1581 GFP_KERNEL);
1582 if (pcr->rtsx_resv_buf == NULL) {
1583 ret = -ENXIO;
1584 goto unmap;
1585 }
1586 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1587 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1588 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1589 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1590 pcr->card_inserted = 0;
1591 pcr->card_removed = 0;
1592 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1593
1594 pcr->msi_en = msi_en;
1595 if (pcr->msi_en) {
1596 ret = pci_enable_msi(pcidev);
1597 if (ret)
1598 pcr->msi_en = false;
1599 }
1600
1601 ret = rtsx_pci_acquire_irq(pcr);
1602 if (ret < 0)
1603 goto disable_msi;
1604
1605 pci_set_master(pcidev);
1606 synchronize_irq(pcr->irq);
1607
1608 ret = rtsx_pci_init_chip(pcr);
1609 if (ret < 0)
1610 goto disable_irq;
1611
1612 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1613 rtsx_pcr_cells[i].platform_data = handle;
1614 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1615 }
1616
1617
1618 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1619 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1620 if (ret < 0)
1621 goto free_slots;
1622
1623 pm_runtime_allow(&pcidev->dev);
1624 pm_runtime_put(&pcidev->dev);
1625
1626 return 0;
1627
1628free_slots:
1629 kfree(pcr->slots);
1630disable_irq:
1631 free_irq(pcr->irq, (void *)pcr);
1632disable_msi:
1633 if (pcr->msi_en)
1634 pci_disable_msi(pcr->pci);
1635 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1636 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1637unmap:
1638 iounmap(pcr->remap_addr);
1639free_idr:
1640 spin_lock(&rtsx_pci_lock);
1641 idr_remove(&rtsx_pci_idr, pcr->id);
1642 spin_unlock(&rtsx_pci_lock);
1643free_handle:
1644 kfree(handle);
1645free_pcr:
1646 kfree(pcr);
1647release_pci:
1648 pci_release_regions(pcidev);
1649disable:
1650 pci_disable_device(pcidev);
1651
1652 return ret;
1653}
1654
1655static void rtsx_pci_remove(struct pci_dev *pcidev)
1656{
1657 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1658 struct rtsx_pcr *pcr = handle->pcr;
1659
1660 pcr->remove_pci = true;
1661
1662 pm_runtime_get_sync(&pcidev->dev);
1663 pm_runtime_forbid(&pcidev->dev);
1664
1665 /* Disable interrupts at the pcr level */
1666 spin_lock_irq(&pcr->lock);
1667 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1668 pcr->bier = 0;
1669 spin_unlock_irq(&pcr->lock);
1670
1671 cancel_delayed_work_sync(&pcr->carddet_work);
1672
1673 mfd_remove_devices(&pcidev->dev);
1674
1675 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1676 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1677 free_irq(pcr->irq, (void *)pcr);
1678 if (pcr->msi_en)
1679 pci_disable_msi(pcr->pci);
1680 iounmap(pcr->remap_addr);
1681
1682 pci_release_regions(pcidev);
1683 pci_disable_device(pcidev);
1684
1685 spin_lock(&rtsx_pci_lock);
1686 idr_remove(&rtsx_pci_idr, pcr->id);
1687 spin_unlock(&rtsx_pci_lock);
1688
1689 kfree(pcr->slots);
1690 kfree(pcr);
1691 kfree(handle);
1692
1693 dev_dbg(&(pcidev->dev),
1694 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1695 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1696}
1697
1698static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1699{
1700 struct pci_dev *pcidev = to_pci_dev(dev_d);
1701 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1702 struct rtsx_pcr *pcr = handle->pcr;
1703
1704 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1705
1706 cancel_delayed_work_sync(&pcr->carddet_work);
1707
1708 mutex_lock(&pcr->pcr_mutex);
1709
1710 rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1711
1712 mutex_unlock(&pcr->pcr_mutex);
1713 return 0;
1714}
1715
1716static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1717{
1718 struct pci_dev *pcidev = to_pci_dev(dev_d);
1719 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1720 struct rtsx_pcr *pcr = handle->pcr;
1721 int ret = 0;
1722
1723 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1724
1725 mutex_lock(&pcr->pcr_mutex);
1726
1727 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1728 if (ret)
1729 goto out;
1730
1731 ret = rtsx_pci_init_hw(pcr);
1732 if (ret)
1733 goto out;
1734
1735out:
1736 mutex_unlock(&pcr->pcr_mutex);
1737 return ret;
1738}
1739
1740#ifdef CONFIG_PM
1741
1742static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1743{
1744 if (pcr->ops->set_aspm)
1745 pcr->ops->set_aspm(pcr, true);
1746 else
1747 rtsx_comm_set_aspm(pcr, true);
1748}
1749
1750static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1751{
1752 struct rtsx_cr_option *option = &pcr->option;
1753
1754 if (option->ltr_enabled) {
1755 u32 latency = option->ltr_l1off_latency;
1756
1757 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1758 mdelay(option->l1_snooze_delay);
1759
1760 rtsx_set_ltr_latency(pcr, latency);
1761 }
1762
1763 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1764 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1765
1766 rtsx_enable_aspm(pcr);
1767}
1768
1769static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1770{
1771 rtsx_comm_pm_power_saving(pcr);
1772}
1773
1774static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1775{
1776 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1777 struct rtsx_pcr *pcr = handle->pcr;
1778
1779 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1780
1781 rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1782
1783 pci_disable_device(pcidev);
1784 free_irq(pcr->irq, (void *)pcr);
1785 if (pcr->msi_en)
1786 pci_disable_msi(pcr->pci);
1787}
1788
1789static int rtsx_pci_runtime_idle(struct device *device)
1790{
1791 struct pci_dev *pcidev = to_pci_dev(device);
1792 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1793 struct rtsx_pcr *pcr = handle->pcr;
1794
1795 dev_dbg(device, "--> %s\n", __func__);
1796
1797 mutex_lock(&pcr->pcr_mutex);
1798
1799 pcr->state = PDEV_STAT_IDLE;
1800
1801 if (pcr->ops->disable_auto_blink)
1802 pcr->ops->disable_auto_blink(pcr);
1803 if (pcr->ops->turn_off_led)
1804 pcr->ops->turn_off_led(pcr);
1805
1806 rtsx_pm_power_saving(pcr);
1807
1808 mutex_unlock(&pcr->pcr_mutex);
1809
1810 if (pcr->rtd3_en)
1811 pm_schedule_suspend(device, 10000);
1812
1813 return -EBUSY;
1814}
1815
1816static int rtsx_pci_runtime_suspend(struct device *device)
1817{
1818 struct pci_dev *pcidev = to_pci_dev(device);
1819 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1820 struct rtsx_pcr *pcr = handle->pcr;
1821
1822 dev_dbg(device, "--> %s\n", __func__);
1823
1824 cancel_delayed_work_sync(&pcr->carddet_work);
1825
1826 mutex_lock(&pcr->pcr_mutex);
1827 rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1828
1829 mutex_unlock(&pcr->pcr_mutex);
1830
1831 return 0;
1832}
1833
1834static int rtsx_pci_runtime_resume(struct device *device)
1835{
1836 struct pci_dev *pcidev = to_pci_dev(device);
1837 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1838 struct rtsx_pcr *pcr = handle->pcr;
1839
1840 dev_dbg(device, "--> %s\n", __func__);
1841
1842 mutex_lock(&pcr->pcr_mutex);
1843
1844 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1845
1846 rtsx_pci_init_hw(pcr);
1847
1848 if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1849 pcr->slots[RTSX_SD_CARD].card_event(
1850 pcr->slots[RTSX_SD_CARD].p_dev);
1851 }
1852
1853 mutex_unlock(&pcr->pcr_mutex);
1854 return 0;
1855}
1856
1857#else /* CONFIG_PM */
1858
1859#define rtsx_pci_shutdown NULL
1860#define rtsx_pci_runtime_suspend NULL
1861#define rtsx_pic_runtime_resume NULL
1862
1863#endif /* CONFIG_PM */
1864
1865static const struct dev_pm_ops rtsx_pci_pm_ops = {
1866 SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1867 SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1868};
1869
1870static struct pci_driver rtsx_pci_driver = {
1871 .name = DRV_NAME_RTSX_PCI,
1872 .id_table = rtsx_pci_ids,
1873 .probe = rtsx_pci_probe,
1874 .remove = rtsx_pci_remove,
1875 .driver.pm = &rtsx_pci_pm_ops,
1876 .shutdown = rtsx_pci_shutdown,
1877};
1878module_pci_driver(rtsx_pci_driver);
1879
1880MODULE_LICENSE("GPL");
1881MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1882MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Driver for Realtek PCI-Express card reader
3 *
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 *
6 * Author:
7 * Wei WANG <wei_wang@realsil.com.cn>
8 */
9
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/dma-mapping.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/idr.h>
18#include <linux/platform_device.h>
19#include <linux/mfd/core.h>
20#include <linux/rtsx_pci.h>
21#include <linux/mmc/card.h>
22#include <asm/unaligned.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25
26#include "rtsx_pcr.h"
27#include "rts5261.h"
28#include "rts5228.h"
29
30static bool msi_en = true;
31module_param(msi_en, bool, S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(msi_en, "Enable MSI");
33
34static DEFINE_IDR(rtsx_pci_idr);
35static DEFINE_SPINLOCK(rtsx_pci_lock);
36
37static struct mfd_cell rtsx_pcr_cells[] = {
38 [RTSX_SD_CARD] = {
39 .name = DRV_NAME_RTSX_PCI_SDMMC,
40 },
41};
42
43static const struct pci_device_id rtsx_pci_ids[] = {
44 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { 0, }
58};
59
60MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
61
62static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
63{
64 rtsx_pci_write_register(pcr, MSGTXDATA0,
65 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
66 rtsx_pci_write_register(pcr, MSGTXDATA1,
67 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
68 rtsx_pci_write_register(pcr, MSGTXDATA2,
69 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
70 rtsx_pci_write_register(pcr, MSGTXDATA3,
71 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
72 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
73 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
74
75 return 0;
76}
77
78int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
79{
80 return rtsx_comm_set_ltr_latency(pcr, latency);
81}
82
83static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
84{
85 if (pcr->aspm_enabled == enable)
86 return;
87
88 if (pcr->aspm_mode == ASPM_MODE_CFG) {
89 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
90 PCI_EXP_LNKCTL_ASPMC,
91 enable ? pcr->aspm_en : 0);
92 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
93 if (pcr->aspm_en & 0x02)
94 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
95 FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
96 else
97 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
98 FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
99 }
100
101 if (!enable && (pcr->aspm_en & 0x02))
102 mdelay(10);
103
104 pcr->aspm_enabled = enable;
105}
106
107static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
108{
109 if (pcr->ops->set_aspm)
110 pcr->ops->set_aspm(pcr, false);
111 else
112 rtsx_comm_set_aspm(pcr, false);
113}
114
115int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
116{
117 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
118
119 return 0;
120}
121
122static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
123{
124 if (pcr->ops->set_l1off_cfg_sub_d0)
125 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
126}
127
128static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
129{
130 struct rtsx_cr_option *option = &pcr->option;
131
132 rtsx_disable_aspm(pcr);
133
134 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
135 msleep(1);
136
137 if (option->ltr_enabled)
138 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
139
140 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
141 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
142}
143
144static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
145{
146 rtsx_comm_pm_full_on(pcr);
147}
148
149void rtsx_pci_start_run(struct rtsx_pcr *pcr)
150{
151 /* If pci device removed, don't queue idle work any more */
152 if (pcr->remove_pci)
153 return;
154
155 if (pcr->rtd3_en)
156 if (pcr->is_runtime_suspended) {
157 pm_runtime_get(&(pcr->pci->dev));
158 pcr->is_runtime_suspended = false;
159 }
160
161 if (pcr->state != PDEV_STAT_RUN) {
162 pcr->state = PDEV_STAT_RUN;
163 if (pcr->ops->enable_auto_blink)
164 pcr->ops->enable_auto_blink(pcr);
165 rtsx_pm_full_on(pcr);
166 }
167
168 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
169}
170EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
171
172int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
173{
174 int i;
175 u32 val = HAIMR_WRITE_START;
176
177 val |= (u32)(addr & 0x3FFF) << 16;
178 val |= (u32)mask << 8;
179 val |= (u32)data;
180
181 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
182
183 for (i = 0; i < MAX_RW_REG_CNT; i++) {
184 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
185 if ((val & HAIMR_TRANS_END) == 0) {
186 if (data != (u8)val)
187 return -EIO;
188 return 0;
189 }
190 }
191
192 return -ETIMEDOUT;
193}
194EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
195
196int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
197{
198 u32 val = HAIMR_READ_START;
199 int i;
200
201 val |= (u32)(addr & 0x3FFF) << 16;
202 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
203
204 for (i = 0; i < MAX_RW_REG_CNT; i++) {
205 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
206 if ((val & HAIMR_TRANS_END) == 0)
207 break;
208 }
209
210 if (i >= MAX_RW_REG_CNT)
211 return -ETIMEDOUT;
212
213 if (data)
214 *data = (u8)(val & 0xFF);
215
216 return 0;
217}
218EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
219
220int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
221{
222 int err, i, finished = 0;
223 u8 tmp;
224
225 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
226 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
227 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
228 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
229
230 for (i = 0; i < 100000; i++) {
231 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
232 if (err < 0)
233 return err;
234
235 if (!(tmp & 0x80)) {
236 finished = 1;
237 break;
238 }
239 }
240
241 if (!finished)
242 return -ETIMEDOUT;
243
244 return 0;
245}
246
247int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
248{
249 if (pcr->ops->write_phy)
250 return pcr->ops->write_phy(pcr, addr, val);
251
252 return __rtsx_pci_write_phy_register(pcr, addr, val);
253}
254EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
255
256int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
257{
258 int err, i, finished = 0;
259 u16 data;
260 u8 tmp, val1, val2;
261
262 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
263 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
264
265 for (i = 0; i < 100000; i++) {
266 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
267 if (err < 0)
268 return err;
269
270 if (!(tmp & 0x80)) {
271 finished = 1;
272 break;
273 }
274 }
275
276 if (!finished)
277 return -ETIMEDOUT;
278
279 rtsx_pci_read_register(pcr, PHYDATA0, &val1);
280 rtsx_pci_read_register(pcr, PHYDATA1, &val2);
281 data = val1 | (val2 << 8);
282
283 if (val)
284 *val = data;
285
286 return 0;
287}
288
289int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
290{
291 if (pcr->ops->read_phy)
292 return pcr->ops->read_phy(pcr, addr, val);
293
294 return __rtsx_pci_read_phy_register(pcr, addr, val);
295}
296EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
297
298void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
299{
300 if (pcr->ops->stop_cmd)
301 return pcr->ops->stop_cmd(pcr);
302
303 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
304 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
305
306 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
307 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
308}
309EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
310
311void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
312 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
313{
314 unsigned long flags;
315 u32 val = 0;
316 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
317
318 val |= (u32)(cmd_type & 0x03) << 30;
319 val |= (u32)(reg_addr & 0x3FFF) << 16;
320 val |= (u32)mask << 8;
321 val |= (u32)data;
322
323 spin_lock_irqsave(&pcr->lock, flags);
324 ptr += pcr->ci;
325 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
326 put_unaligned_le32(val, ptr);
327 ptr++;
328 pcr->ci++;
329 }
330 spin_unlock_irqrestore(&pcr->lock, flags);
331}
332EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
333
334void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
335{
336 u32 val = 1 << 31;
337
338 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
339
340 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
341 /* Hardware Auto Response */
342 val |= 0x40000000;
343 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
344}
345EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
346
347int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
348{
349 struct completion trans_done;
350 u32 val = 1 << 31;
351 long timeleft;
352 unsigned long flags;
353 int err = 0;
354
355 spin_lock_irqsave(&pcr->lock, flags);
356
357 /* set up data structures for the wakeup system */
358 pcr->done = &trans_done;
359 pcr->trans_result = TRANS_NOT_READY;
360 init_completion(&trans_done);
361
362 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
363
364 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
365 /* Hardware Auto Response */
366 val |= 0x40000000;
367 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
368
369 spin_unlock_irqrestore(&pcr->lock, flags);
370
371 /* Wait for TRANS_OK_INT */
372 timeleft = wait_for_completion_interruptible_timeout(
373 &trans_done, msecs_to_jiffies(timeout));
374 if (timeleft <= 0) {
375 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
376 err = -ETIMEDOUT;
377 goto finish_send_cmd;
378 }
379
380 spin_lock_irqsave(&pcr->lock, flags);
381 if (pcr->trans_result == TRANS_RESULT_FAIL)
382 err = -EINVAL;
383 else if (pcr->trans_result == TRANS_RESULT_OK)
384 err = 0;
385 else if (pcr->trans_result == TRANS_NO_DEVICE)
386 err = -ENODEV;
387 spin_unlock_irqrestore(&pcr->lock, flags);
388
389finish_send_cmd:
390 spin_lock_irqsave(&pcr->lock, flags);
391 pcr->done = NULL;
392 spin_unlock_irqrestore(&pcr->lock, flags);
393
394 if ((err < 0) && (err != -ENODEV))
395 rtsx_pci_stop_cmd(pcr);
396
397 if (pcr->finish_me)
398 complete(pcr->finish_me);
399
400 return err;
401}
402EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
403
404static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
405 dma_addr_t addr, unsigned int len, int end)
406{
407 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
408 u64 val;
409 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
410
411 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
412
413 if (end)
414 option |= RTSX_SG_END;
415
416 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
417 if (len > 0xFFFF)
418 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
419 | (((u64)len >> 16) << 6) | option;
420 else
421 val = ((u64)addr << 32) | ((u64)len << 16) | option;
422 } else {
423 val = ((u64)addr << 32) | ((u64)len << 12) | option;
424 }
425 put_unaligned_le64(val, ptr);
426 pcr->sgi++;
427}
428
429int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
430 int num_sg, bool read, int timeout)
431{
432 int err = 0, count;
433
434 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
435 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
436 if (count < 1)
437 return -EINVAL;
438 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
439
440 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
441
442 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
443
444 return err;
445}
446EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
447
448int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
449 int num_sg, bool read)
450{
451 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
452
453 if (pcr->remove_pci)
454 return -EINVAL;
455
456 if ((sglist == NULL) || (num_sg <= 0))
457 return -EINVAL;
458
459 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
460}
461EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
462
463void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
464 int num_sg, bool read)
465{
466 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
467
468 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
469}
470EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
471
472int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
473 int count, bool read, int timeout)
474{
475 struct completion trans_done;
476 struct scatterlist *sg;
477 dma_addr_t addr;
478 long timeleft;
479 unsigned long flags;
480 unsigned int len;
481 int i, err = 0;
482 u32 val;
483 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
484
485 if (pcr->remove_pci)
486 return -ENODEV;
487
488 if ((sglist == NULL) || (count < 1))
489 return -EINVAL;
490
491 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
492 pcr->sgi = 0;
493 for_each_sg(sglist, sg, count, i) {
494 addr = sg_dma_address(sg);
495 len = sg_dma_len(sg);
496 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
497 }
498
499 spin_lock_irqsave(&pcr->lock, flags);
500
501 pcr->done = &trans_done;
502 pcr->trans_result = TRANS_NOT_READY;
503 init_completion(&trans_done);
504 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
505 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
506
507 spin_unlock_irqrestore(&pcr->lock, flags);
508
509 timeleft = wait_for_completion_interruptible_timeout(
510 &trans_done, msecs_to_jiffies(timeout));
511 if (timeleft <= 0) {
512 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
513 err = -ETIMEDOUT;
514 goto out;
515 }
516
517 spin_lock_irqsave(&pcr->lock, flags);
518 if (pcr->trans_result == TRANS_RESULT_FAIL) {
519 err = -EILSEQ;
520 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
521 pcr->dma_error_count++;
522 }
523
524 else if (pcr->trans_result == TRANS_NO_DEVICE)
525 err = -ENODEV;
526 spin_unlock_irqrestore(&pcr->lock, flags);
527
528out:
529 spin_lock_irqsave(&pcr->lock, flags);
530 pcr->done = NULL;
531 spin_unlock_irqrestore(&pcr->lock, flags);
532
533 if ((err < 0) && (err != -ENODEV))
534 rtsx_pci_stop_cmd(pcr);
535
536 if (pcr->finish_me)
537 complete(pcr->finish_me);
538
539 return err;
540}
541EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
542
543int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
544{
545 int err;
546 int i, j;
547 u16 reg;
548 u8 *ptr;
549
550 if (buf_len > 512)
551 buf_len = 512;
552
553 ptr = buf;
554 reg = PPBUF_BASE2;
555 for (i = 0; i < buf_len / 256; i++) {
556 rtsx_pci_init_cmd(pcr);
557
558 for (j = 0; j < 256; j++)
559 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
560
561 err = rtsx_pci_send_cmd(pcr, 250);
562 if (err < 0)
563 return err;
564
565 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
566 ptr += 256;
567 }
568
569 if (buf_len % 256) {
570 rtsx_pci_init_cmd(pcr);
571
572 for (j = 0; j < buf_len % 256; j++)
573 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
574
575 err = rtsx_pci_send_cmd(pcr, 250);
576 if (err < 0)
577 return err;
578 }
579
580 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
581
582 return 0;
583}
584EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
585
586int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
587{
588 int err;
589 int i, j;
590 u16 reg;
591 u8 *ptr;
592
593 if (buf_len > 512)
594 buf_len = 512;
595
596 ptr = buf;
597 reg = PPBUF_BASE2;
598 for (i = 0; i < buf_len / 256; i++) {
599 rtsx_pci_init_cmd(pcr);
600
601 for (j = 0; j < 256; j++) {
602 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
603 reg++, 0xFF, *ptr);
604 ptr++;
605 }
606
607 err = rtsx_pci_send_cmd(pcr, 250);
608 if (err < 0)
609 return err;
610 }
611
612 if (buf_len % 256) {
613 rtsx_pci_init_cmd(pcr);
614
615 for (j = 0; j < buf_len % 256; j++) {
616 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
617 reg++, 0xFF, *ptr);
618 ptr++;
619 }
620
621 err = rtsx_pci_send_cmd(pcr, 250);
622 if (err < 0)
623 return err;
624 }
625
626 return 0;
627}
628EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
629
630static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
631{
632 rtsx_pci_init_cmd(pcr);
633
634 while (*tbl & 0xFFFF0000) {
635 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
636 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
637 tbl++;
638 }
639
640 return rtsx_pci_send_cmd(pcr, 100);
641}
642
643int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
644{
645 const u32 *tbl;
646
647 if (card == RTSX_SD_CARD)
648 tbl = pcr->sd_pull_ctl_enable_tbl;
649 else if (card == RTSX_MS_CARD)
650 tbl = pcr->ms_pull_ctl_enable_tbl;
651 else
652 return -EINVAL;
653
654 return rtsx_pci_set_pull_ctl(pcr, tbl);
655}
656EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
657
658int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
659{
660 const u32 *tbl;
661
662 if (card == RTSX_SD_CARD)
663 tbl = pcr->sd_pull_ctl_disable_tbl;
664 else if (card == RTSX_MS_CARD)
665 tbl = pcr->ms_pull_ctl_disable_tbl;
666 else
667 return -EINVAL;
668
669 return rtsx_pci_set_pull_ctl(pcr, tbl);
670}
671EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
672
673static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
674{
675 struct rtsx_hw_param *hw_param = &pcr->hw_param;
676
677 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
678 | hw_param->interrupt_en;
679
680 if (pcr->num_slots > 1)
681 pcr->bier |= MS_INT_EN;
682
683 /* Enable Bus Interrupt */
684 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
685
686 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
687}
688
689static inline u8 double_ssc_depth(u8 depth)
690{
691 return ((depth > 1) ? (depth - 1) : depth);
692}
693
694static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
695{
696 if (div > CLK_DIV_1) {
697 if (ssc_depth > (div - 1))
698 ssc_depth -= (div - 1);
699 else
700 ssc_depth = SSC_DEPTH_4M;
701 }
702
703 return ssc_depth;
704}
705
706int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
707 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
708{
709 int err, clk;
710 u8 n, clk_divider, mcu_cnt, div;
711 static const u8 depth[] = {
712 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
713 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
714 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
715 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
716 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
717 };
718
719 if (PCI_PID(pcr) == PID_5261)
720 return rts5261_pci_switch_clock(pcr, card_clock,
721 ssc_depth, initial_mode, double_clk, vpclk);
722 if (PCI_PID(pcr) == PID_5228)
723 return rts5228_pci_switch_clock(pcr, card_clock,
724 ssc_depth, initial_mode, double_clk, vpclk);
725
726 if (initial_mode) {
727 /* We use 250k(around) here, in initial stage */
728 clk_divider = SD_CLK_DIVIDE_128;
729 card_clock = 30000000;
730 } else {
731 clk_divider = SD_CLK_DIVIDE_0;
732 }
733 err = rtsx_pci_write_register(pcr, SD_CFG1,
734 SD_CLK_DIVIDE_MASK, clk_divider);
735 if (err < 0)
736 return err;
737
738 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
739 if (card_clock == UHS_SDR104_MAX_DTR &&
740 pcr->dma_error_count &&
741 PCI_PID(pcr) == RTS5227_DEVICE_ID)
742 card_clock = UHS_SDR104_MAX_DTR -
743 (pcr->dma_error_count * 20000000);
744
745 card_clock /= 1000000;
746 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
747
748 clk = card_clock;
749 if (!initial_mode && double_clk)
750 clk = card_clock * 2;
751 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
752 clk, pcr->cur_clock);
753
754 if (clk == pcr->cur_clock)
755 return 0;
756
757 if (pcr->ops->conv_clk_and_div_n)
758 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
759 else
760 n = (u8)(clk - 2);
761 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
762 return -EINVAL;
763
764 mcu_cnt = (u8)(125/clk + 3);
765 if (mcu_cnt > 15)
766 mcu_cnt = 15;
767
768 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
769 div = CLK_DIV_1;
770 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
771 if (pcr->ops->conv_clk_and_div_n) {
772 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
773 DIV_N_TO_CLK) * 2;
774 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
775 CLK_TO_DIV_N);
776 } else {
777 n = (n + 2) * 2 - 2;
778 }
779 div++;
780 }
781 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
782
783 ssc_depth = depth[ssc_depth];
784 if (double_clk)
785 ssc_depth = double_ssc_depth(ssc_depth);
786
787 ssc_depth = revise_ssc_depth(ssc_depth, div);
788 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
789
790 rtsx_pci_init_cmd(pcr);
791 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
792 CLK_LOW_FREQ, CLK_LOW_FREQ);
793 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
794 0xFF, (div << 4) | mcu_cnt);
795 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
796 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
797 SSC_DEPTH_MASK, ssc_depth);
798 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
799 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
800 if (vpclk) {
801 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
802 PHASE_NOT_RESET, 0);
803 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
804 PHASE_NOT_RESET, PHASE_NOT_RESET);
805 }
806
807 err = rtsx_pci_send_cmd(pcr, 2000);
808 if (err < 0)
809 return err;
810
811 /* Wait SSC clock stable */
812 udelay(SSC_CLOCK_STABLE_WAIT);
813 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
814 if (err < 0)
815 return err;
816
817 pcr->cur_clock = clk;
818 return 0;
819}
820EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
821
822int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
823{
824 if (pcr->ops->card_power_on)
825 return pcr->ops->card_power_on(pcr, card);
826
827 return 0;
828}
829EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
830
831int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
832{
833 if (pcr->ops->card_power_off)
834 return pcr->ops->card_power_off(pcr, card);
835
836 return 0;
837}
838EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
839
840int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
841{
842 static const unsigned int cd_mask[] = {
843 [RTSX_SD_CARD] = SD_EXIST,
844 [RTSX_MS_CARD] = MS_EXIST
845 };
846
847 if (!(pcr->flags & PCR_MS_PMOS)) {
848 /* When using single PMOS, accessing card is not permitted
849 * if the existing card is not the designated one.
850 */
851 if (pcr->card_exist & (~cd_mask[card]))
852 return -EIO;
853 }
854
855 return 0;
856}
857EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
858
859int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
860{
861 if (pcr->ops->switch_output_voltage)
862 return pcr->ops->switch_output_voltage(pcr, voltage);
863
864 return 0;
865}
866EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
867
868unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
869{
870 unsigned int val;
871
872 val = rtsx_pci_readl(pcr, RTSX_BIPR);
873 if (pcr->ops->cd_deglitch)
874 val = pcr->ops->cd_deglitch(pcr);
875
876 return val;
877}
878EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
879
880void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
881{
882 struct completion finish;
883
884 pcr->finish_me = &finish;
885 init_completion(&finish);
886
887 if (pcr->done)
888 complete(pcr->done);
889
890 if (!pcr->remove_pci)
891 rtsx_pci_stop_cmd(pcr);
892
893 wait_for_completion_interruptible_timeout(&finish,
894 msecs_to_jiffies(2));
895 pcr->finish_me = NULL;
896}
897EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
898
899static void rtsx_pci_card_detect(struct work_struct *work)
900{
901 struct delayed_work *dwork;
902 struct rtsx_pcr *pcr;
903 unsigned long flags;
904 unsigned int card_detect = 0, card_inserted, card_removed;
905 u32 irq_status;
906
907 dwork = to_delayed_work(work);
908 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
909
910 pcr_dbg(pcr, "--> %s\n", __func__);
911
912 mutex_lock(&pcr->pcr_mutex);
913 spin_lock_irqsave(&pcr->lock, flags);
914
915 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
916 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
917
918 irq_status &= CARD_EXIST;
919 card_inserted = pcr->card_inserted & irq_status;
920 card_removed = pcr->card_removed;
921 pcr->card_inserted = 0;
922 pcr->card_removed = 0;
923
924 spin_unlock_irqrestore(&pcr->lock, flags);
925
926 if (card_inserted || card_removed) {
927 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
928 card_inserted, card_removed);
929
930 if (pcr->ops->cd_deglitch)
931 card_inserted = pcr->ops->cd_deglitch(pcr);
932
933 card_detect = card_inserted | card_removed;
934
935 pcr->card_exist |= card_inserted;
936 pcr->card_exist &= ~card_removed;
937 }
938
939 mutex_unlock(&pcr->pcr_mutex);
940
941 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
942 pcr->slots[RTSX_SD_CARD].card_event(
943 pcr->slots[RTSX_SD_CARD].p_dev);
944 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
945 pcr->slots[RTSX_MS_CARD].card_event(
946 pcr->slots[RTSX_MS_CARD].p_dev);
947}
948
949static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
950{
951 if (pcr->ops->process_ocp) {
952 pcr->ops->process_ocp(pcr);
953 } else {
954 if (!pcr->option.ocp_en)
955 return;
956 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
957 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
958 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
959 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
960 rtsx_pci_clear_ocpstat(pcr);
961 pcr->ocp_stat = 0;
962 }
963 }
964}
965
966static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
967{
968 if (pcr->option.ocp_en)
969 rtsx_pci_process_ocp(pcr);
970
971 return 0;
972}
973
974static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
975{
976 struct rtsx_pcr *pcr = dev_id;
977 u32 int_reg;
978
979 if (!pcr)
980 return IRQ_NONE;
981
982 spin_lock(&pcr->lock);
983
984 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
985 /* Clear interrupt flag */
986 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
987 if ((int_reg & pcr->bier) == 0) {
988 spin_unlock(&pcr->lock);
989 return IRQ_NONE;
990 }
991 if (int_reg == 0xFFFFFFFF) {
992 spin_unlock(&pcr->lock);
993 return IRQ_HANDLED;
994 }
995
996 int_reg &= (pcr->bier | 0x7FFFFF);
997
998 if (int_reg & SD_OC_INT)
999 rtsx_pci_process_ocp_interrupt(pcr);
1000
1001 if (int_reg & SD_INT) {
1002 if (int_reg & SD_EXIST) {
1003 pcr->card_inserted |= SD_EXIST;
1004 } else {
1005 pcr->card_removed |= SD_EXIST;
1006 pcr->card_inserted &= ~SD_EXIST;
1007 if (PCI_PID(pcr) == PID_5261) {
1008 rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1009 RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1010 pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1011 }
1012 }
1013 pcr->dma_error_count = 0;
1014 }
1015
1016 if (int_reg & MS_INT) {
1017 if (int_reg & MS_EXIST) {
1018 pcr->card_inserted |= MS_EXIST;
1019 } else {
1020 pcr->card_removed |= MS_EXIST;
1021 pcr->card_inserted &= ~MS_EXIST;
1022 }
1023 }
1024
1025 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1026 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1027 pcr->trans_result = TRANS_RESULT_FAIL;
1028 if (pcr->done)
1029 complete(pcr->done);
1030 } else if (int_reg & TRANS_OK_INT) {
1031 pcr->trans_result = TRANS_RESULT_OK;
1032 if (pcr->done)
1033 complete(pcr->done);
1034 }
1035 }
1036
1037 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1038 schedule_delayed_work(&pcr->carddet_work,
1039 msecs_to_jiffies(200));
1040
1041 spin_unlock(&pcr->lock);
1042 return IRQ_HANDLED;
1043}
1044
1045static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1046{
1047 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1048 __func__, pcr->msi_en, pcr->pci->irq);
1049
1050 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1051 pcr->msi_en ? 0 : IRQF_SHARED,
1052 DRV_NAME_RTSX_PCI, pcr)) {
1053 dev_err(&(pcr->pci->dev),
1054 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1055 pcr->pci->irq);
1056 return -1;
1057 }
1058
1059 pcr->irq = pcr->pci->irq;
1060 pci_intx(pcr->pci, !pcr->msi_en);
1061
1062 return 0;
1063}
1064
1065static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1066{
1067 if (pcr->ops->set_aspm)
1068 pcr->ops->set_aspm(pcr, true);
1069 else
1070 rtsx_comm_set_aspm(pcr, true);
1071}
1072
1073static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1074{
1075 struct rtsx_cr_option *option = &pcr->option;
1076
1077 if (option->ltr_enabled) {
1078 u32 latency = option->ltr_l1off_latency;
1079
1080 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1081 mdelay(option->l1_snooze_delay);
1082
1083 rtsx_set_ltr_latency(pcr, latency);
1084 }
1085
1086 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1087 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1088
1089 rtsx_enable_aspm(pcr);
1090}
1091
1092static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1093{
1094 rtsx_comm_pm_power_saving(pcr);
1095}
1096
1097static void rtsx_pci_rtd3_work(struct work_struct *work)
1098{
1099 struct delayed_work *dwork = to_delayed_work(work);
1100 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work);
1101
1102 pcr_dbg(pcr, "--> %s\n", __func__);
1103 if (!pcr->is_runtime_suspended)
1104 pm_runtime_put(&(pcr->pci->dev));
1105}
1106
1107static void rtsx_pci_idle_work(struct work_struct *work)
1108{
1109 struct delayed_work *dwork = to_delayed_work(work);
1110 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1111
1112 pcr_dbg(pcr, "--> %s\n", __func__);
1113
1114 mutex_lock(&pcr->pcr_mutex);
1115
1116 pcr->state = PDEV_STAT_IDLE;
1117
1118 if (pcr->ops->disable_auto_blink)
1119 pcr->ops->disable_auto_blink(pcr);
1120 if (pcr->ops->turn_off_led)
1121 pcr->ops->turn_off_led(pcr);
1122
1123 rtsx_pm_power_saving(pcr);
1124
1125 mutex_unlock(&pcr->pcr_mutex);
1126
1127 if (pcr->rtd3_en)
1128 mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000));
1129}
1130
1131static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
1132{
1133 /* Set relink_time to 0 */
1134 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1135 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1136 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1137 RELINK_TIME_MASK, 0);
1138
1139 rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1140 D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1141
1142 rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1143}
1144
1145static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1146{
1147 if (pcr->ops->turn_off_led)
1148 pcr->ops->turn_off_led(pcr);
1149
1150 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1151 pcr->bier = 0;
1152
1153 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1154 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1155
1156 if (pcr->ops->force_power_down)
1157 pcr->ops->force_power_down(pcr, pm_state);
1158 else
1159 rtsx_base_force_power_down(pcr, pm_state);
1160}
1161
1162void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1163{
1164 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1165
1166 if (pcr->ops->enable_ocp) {
1167 pcr->ops->enable_ocp(pcr);
1168 } else {
1169 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1170 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1171 }
1172
1173}
1174
1175void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1176{
1177 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1178
1179 if (pcr->ops->disable_ocp) {
1180 pcr->ops->disable_ocp(pcr);
1181 } else {
1182 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1183 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1184 OC_POWER_DOWN);
1185 }
1186}
1187
1188void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1189{
1190 if (pcr->ops->init_ocp) {
1191 pcr->ops->init_ocp(pcr);
1192 } else {
1193 struct rtsx_cr_option *option = &(pcr->option);
1194
1195 if (option->ocp_en) {
1196 u8 val = option->sd_800mA_ocp_thd;
1197
1198 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1199 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1200 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1201 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1202 SD_OCP_THD_MASK, val);
1203 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1204 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1205 rtsx_pci_enable_ocp(pcr);
1206 }
1207 }
1208}
1209
1210int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1211{
1212 if (pcr->ops->get_ocpstat)
1213 return pcr->ops->get_ocpstat(pcr, val);
1214 else
1215 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1216}
1217
1218void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1219{
1220 if (pcr->ops->clear_ocpstat) {
1221 pcr->ops->clear_ocpstat(pcr);
1222 } else {
1223 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1224 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1225
1226 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1227 udelay(100);
1228 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1229 }
1230}
1231
1232void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1233{
1234 u16 val;
1235
1236 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1237 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1238 val |= 1<<9;
1239 rtsx_pci_write_phy_register(pcr, 0x01, val);
1240 }
1241 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1242 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1243 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1244 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1245
1246}
1247
1248void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1249{
1250 u16 val;
1251
1252 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1253 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1254 val &= ~(1<<9);
1255 rtsx_pci_write_phy_register(pcr, 0x01, val);
1256 }
1257 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1258 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1259
1260}
1261
1262int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1263{
1264 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1265 MS_CLK_EN | SD40_CLK_EN, 0);
1266 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1267 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1268
1269 msleep(50);
1270
1271 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1272
1273 return 0;
1274}
1275
1276int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1277{
1278 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1279 MS_CLK_EN | SD40_CLK_EN, 0);
1280
1281 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1282
1283 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1284 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1285
1286 return 0;
1287}
1288
1289static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1290{
1291 struct pci_dev *pdev = pcr->pci;
1292 int err;
1293
1294 if (PCI_PID(pcr) == PID_5228)
1295 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1296 RTS5228_LDO1_SR_0_5);
1297
1298 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1299
1300 rtsx_pci_enable_bus_int(pcr);
1301
1302 /* Power on SSC */
1303 if (PCI_PID(pcr) == PID_5261) {
1304 /* Gating real mcu clock */
1305 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1306 RTS5261_MCU_CLOCK_GATING, 0);
1307 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1308 SSC_POWER_DOWN, 0);
1309 } else {
1310 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1311 }
1312 if (err < 0)
1313 return err;
1314
1315 /* Wait SSC power stable */
1316 udelay(200);
1317
1318 rtsx_disable_aspm(pcr);
1319 if (pcr->ops->optimize_phy) {
1320 err = pcr->ops->optimize_phy(pcr);
1321 if (err < 0)
1322 return err;
1323 }
1324
1325 rtsx_pci_init_cmd(pcr);
1326
1327 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1328 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1329
1330 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1331 /* Disable card clock */
1332 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1333 /* Reset delink mode */
1334 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1335 /* Card driving select */
1336 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1337 0xFF, pcr->card_drive_sel);
1338 /* Enable SSC Clock */
1339 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1340 0xFF, SSC_8X_EN | SSC_SEL_4M);
1341 if (PCI_PID(pcr) == PID_5261)
1342 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1343 RTS5261_SSC_DEPTH_2M);
1344 else if (PCI_PID(pcr) == PID_5228)
1345 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1346 RTS5228_SSC_DEPTH_2M);
1347 else
1348 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1349
1350 /* Disable cd_pwr_save */
1351 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1352 /* Clear Link Ready Interrupt */
1353 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1354 LINK_RDY_INT, LINK_RDY_INT);
1355 /* Enlarge the estimation window of PERST# glitch
1356 * to reduce the chance of invalid card interrupt
1357 */
1358 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1359 /* Update RC oscillator to 400k
1360 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1361 * 1: 2M 0: 400k
1362 */
1363 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1364 /* Set interrupt write clear
1365 * bit 1: U_elbi_if_rd_clr_en
1366 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1367 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1368 */
1369 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1370
1371 err = rtsx_pci_send_cmd(pcr, 100);
1372 if (err < 0)
1373 return err;
1374
1375 switch (PCI_PID(pcr)) {
1376 case PID_5250:
1377 case PID_524A:
1378 case PID_525A:
1379 case PID_5260:
1380 case PID_5261:
1381 case PID_5228:
1382 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1383 break;
1384 default:
1385 break;
1386 }
1387
1388 /*init ocp*/
1389 rtsx_pci_init_ocp(pcr);
1390
1391 /* Enable clk_request_n to enable clock power management */
1392 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1393 0, PCI_EXP_LNKCTL_CLKREQ_EN);
1394 /* Enter L1 when host tx idle */
1395 pci_write_config_byte(pdev, 0x70F, 0x5B);
1396
1397 if (pcr->ops->extra_init_hw) {
1398 err = pcr->ops->extra_init_hw(pcr);
1399 if (err < 0)
1400 return err;
1401 }
1402
1403 if (pcr->aspm_mode == ASPM_MODE_REG)
1404 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1405
1406 /* No CD interrupt if probing driver with card inserted.
1407 * So we need to initialize pcr->card_exist here.
1408 */
1409 if (pcr->ops->cd_deglitch)
1410 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1411 else
1412 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1413
1414 return 0;
1415}
1416
1417static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1418{
1419 int err;
1420 u16 cfg_val;
1421 u8 val;
1422
1423 spin_lock_init(&pcr->lock);
1424 mutex_init(&pcr->pcr_mutex);
1425
1426 switch (PCI_PID(pcr)) {
1427 default:
1428 case 0x5209:
1429 rts5209_init_params(pcr);
1430 break;
1431
1432 case 0x5229:
1433 rts5229_init_params(pcr);
1434 break;
1435
1436 case 0x5289:
1437 rtl8411_init_params(pcr);
1438 break;
1439
1440 case 0x5227:
1441 rts5227_init_params(pcr);
1442 break;
1443
1444 case 0x522A:
1445 rts522a_init_params(pcr);
1446 break;
1447
1448 case 0x5249:
1449 rts5249_init_params(pcr);
1450 break;
1451
1452 case 0x524A:
1453 rts524a_init_params(pcr);
1454 break;
1455
1456 case 0x525A:
1457 rts525a_init_params(pcr);
1458 break;
1459
1460 case 0x5287:
1461 rtl8411b_init_params(pcr);
1462 break;
1463
1464 case 0x5286:
1465 rtl8402_init_params(pcr);
1466 break;
1467
1468 case 0x5260:
1469 rts5260_init_params(pcr);
1470 break;
1471
1472 case 0x5261:
1473 rts5261_init_params(pcr);
1474 break;
1475
1476 case 0x5228:
1477 rts5228_init_params(pcr);
1478 break;
1479 }
1480
1481 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1482 PCI_PID(pcr), pcr->ic_version);
1483
1484 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1485 GFP_KERNEL);
1486 if (!pcr->slots)
1487 return -ENOMEM;
1488
1489 if (pcr->aspm_mode == ASPM_MODE_CFG) {
1490 pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1491 if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1492 pcr->aspm_enabled = true;
1493 else
1494 pcr->aspm_enabled = false;
1495
1496 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
1497 rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1498 if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1499 pcr->aspm_enabled = false;
1500 else
1501 pcr->aspm_enabled = true;
1502 }
1503
1504 if (pcr->ops->fetch_vendor_settings)
1505 pcr->ops->fetch_vendor_settings(pcr);
1506
1507 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1508 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1509 pcr->sd30_drive_sel_1v8);
1510 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1511 pcr->sd30_drive_sel_3v3);
1512 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1513 pcr->card_drive_sel);
1514 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1515
1516 pcr->state = PDEV_STAT_IDLE;
1517 err = rtsx_pci_init_hw(pcr);
1518 if (err < 0) {
1519 kfree(pcr->slots);
1520 return err;
1521 }
1522
1523 return 0;
1524}
1525
1526static int rtsx_pci_probe(struct pci_dev *pcidev,
1527 const struct pci_device_id *id)
1528{
1529 struct rtsx_pcr *pcr;
1530 struct pcr_handle *handle;
1531 u32 base, len;
1532 int ret, i, bar = 0;
1533
1534 dev_dbg(&(pcidev->dev),
1535 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1536 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1537 (int)pcidev->revision);
1538
1539 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1540 if (ret < 0)
1541 return ret;
1542
1543 ret = pci_enable_device(pcidev);
1544 if (ret)
1545 return ret;
1546
1547 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1548 if (ret)
1549 goto disable;
1550
1551 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1552 if (!pcr) {
1553 ret = -ENOMEM;
1554 goto release_pci;
1555 }
1556
1557 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1558 if (!handle) {
1559 ret = -ENOMEM;
1560 goto free_pcr;
1561 }
1562 handle->pcr = pcr;
1563
1564 idr_preload(GFP_KERNEL);
1565 spin_lock(&rtsx_pci_lock);
1566 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1567 if (ret >= 0)
1568 pcr->id = ret;
1569 spin_unlock(&rtsx_pci_lock);
1570 idr_preload_end();
1571 if (ret < 0)
1572 goto free_handle;
1573
1574 pcr->pci = pcidev;
1575 dev_set_drvdata(&pcidev->dev, handle);
1576
1577 if (CHK_PCI_PID(pcr, 0x525A))
1578 bar = 1;
1579 len = pci_resource_len(pcidev, bar);
1580 base = pci_resource_start(pcidev, bar);
1581 pcr->remap_addr = ioremap(base, len);
1582 if (!pcr->remap_addr) {
1583 ret = -ENOMEM;
1584 goto free_handle;
1585 }
1586
1587 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1588 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1589 GFP_KERNEL);
1590 if (pcr->rtsx_resv_buf == NULL) {
1591 ret = -ENXIO;
1592 goto unmap;
1593 }
1594 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1595 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1596 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1597 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1598 pcr->card_inserted = 0;
1599 pcr->card_removed = 0;
1600 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1601 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1602
1603 pcr->msi_en = msi_en;
1604 if (pcr->msi_en) {
1605 ret = pci_enable_msi(pcidev);
1606 if (ret)
1607 pcr->msi_en = false;
1608 }
1609
1610 ret = rtsx_pci_acquire_irq(pcr);
1611 if (ret < 0)
1612 goto disable_msi;
1613
1614 pci_set_master(pcidev);
1615 synchronize_irq(pcr->irq);
1616
1617 ret = rtsx_pci_init_chip(pcr);
1618 if (ret < 0)
1619 goto disable_irq;
1620
1621 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1622 rtsx_pcr_cells[i].platform_data = handle;
1623 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1624 }
1625
1626 if (pcr->rtd3_en) {
1627 INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work);
1628 pm_runtime_allow(&pcidev->dev);
1629 pm_runtime_enable(&pcidev->dev);
1630 pcr->is_runtime_suspended = false;
1631 }
1632
1633
1634 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1635 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1636 if (ret < 0)
1637 goto free_slots;
1638
1639 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1640
1641 return 0;
1642
1643free_slots:
1644 kfree(pcr->slots);
1645disable_irq:
1646 free_irq(pcr->irq, (void *)pcr);
1647disable_msi:
1648 if (pcr->msi_en)
1649 pci_disable_msi(pcr->pci);
1650 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1651 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1652unmap:
1653 iounmap(pcr->remap_addr);
1654free_handle:
1655 kfree(handle);
1656free_pcr:
1657 kfree(pcr);
1658release_pci:
1659 pci_release_regions(pcidev);
1660disable:
1661 pci_disable_device(pcidev);
1662
1663 return ret;
1664}
1665
1666static void rtsx_pci_remove(struct pci_dev *pcidev)
1667{
1668 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1669 struct rtsx_pcr *pcr = handle->pcr;
1670
1671 if (pcr->rtd3_en)
1672 pm_runtime_get_noresume(&pcr->pci->dev);
1673
1674 pcr->remove_pci = true;
1675
1676 /* Disable interrupts at the pcr level */
1677 spin_lock_irq(&pcr->lock);
1678 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1679 pcr->bier = 0;
1680 spin_unlock_irq(&pcr->lock);
1681
1682 cancel_delayed_work_sync(&pcr->carddet_work);
1683 cancel_delayed_work_sync(&pcr->idle_work);
1684 if (pcr->rtd3_en)
1685 cancel_delayed_work_sync(&pcr->rtd3_work);
1686
1687 mfd_remove_devices(&pcidev->dev);
1688
1689 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1690 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1691 free_irq(pcr->irq, (void *)pcr);
1692 if (pcr->msi_en)
1693 pci_disable_msi(pcr->pci);
1694 iounmap(pcr->remap_addr);
1695
1696 pci_release_regions(pcidev);
1697 pci_disable_device(pcidev);
1698
1699 spin_lock(&rtsx_pci_lock);
1700 idr_remove(&rtsx_pci_idr, pcr->id);
1701 spin_unlock(&rtsx_pci_lock);
1702
1703 if (pcr->rtd3_en) {
1704 pm_runtime_disable(&pcr->pci->dev);
1705 pm_runtime_put_noidle(&pcr->pci->dev);
1706 }
1707
1708 kfree(pcr->slots);
1709 kfree(pcr);
1710 kfree(handle);
1711
1712 dev_dbg(&(pcidev->dev),
1713 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1714 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1715}
1716
1717static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1718{
1719 struct pci_dev *pcidev = to_pci_dev(dev_d);
1720 struct pcr_handle *handle;
1721 struct rtsx_pcr *pcr;
1722
1723 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1724
1725 handle = pci_get_drvdata(pcidev);
1726 pcr = handle->pcr;
1727
1728 cancel_delayed_work(&pcr->carddet_work);
1729 cancel_delayed_work(&pcr->idle_work);
1730
1731 mutex_lock(&pcr->pcr_mutex);
1732
1733 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1734
1735 device_wakeup_disable(dev_d);
1736
1737 mutex_unlock(&pcr->pcr_mutex);
1738 return 0;
1739}
1740
1741static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1742{
1743 struct pci_dev *pcidev = to_pci_dev(dev_d);
1744 struct pcr_handle *handle;
1745 struct rtsx_pcr *pcr;
1746 int ret = 0;
1747
1748 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1749
1750 handle = pci_get_drvdata(pcidev);
1751 pcr = handle->pcr;
1752
1753 mutex_lock(&pcr->pcr_mutex);
1754
1755 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1756 if (ret)
1757 goto out;
1758
1759 ret = rtsx_pci_init_hw(pcr);
1760 if (ret)
1761 goto out;
1762
1763 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1764
1765out:
1766 mutex_unlock(&pcr->pcr_mutex);
1767 return ret;
1768}
1769
1770#ifdef CONFIG_PM
1771
1772static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1773{
1774 struct pcr_handle *handle;
1775 struct rtsx_pcr *pcr;
1776
1777 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1778
1779 handle = pci_get_drvdata(pcidev);
1780 pcr = handle->pcr;
1781 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1782
1783 pci_disable_device(pcidev);
1784 free_irq(pcr->irq, (void *)pcr);
1785 if (pcr->msi_en)
1786 pci_disable_msi(pcr->pci);
1787}
1788
1789static int rtsx_pci_runtime_suspend(struct device *device)
1790{
1791 struct pci_dev *pcidev = to_pci_dev(device);
1792 struct pcr_handle *handle;
1793 struct rtsx_pcr *pcr;
1794
1795 handle = pci_get_drvdata(pcidev);
1796 pcr = handle->pcr;
1797 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1798
1799 cancel_delayed_work(&pcr->carddet_work);
1800 cancel_delayed_work(&pcr->rtd3_work);
1801 cancel_delayed_work(&pcr->idle_work);
1802
1803 mutex_lock(&pcr->pcr_mutex);
1804 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1805
1806 free_irq(pcr->irq, (void *)pcr);
1807
1808 mutex_unlock(&pcr->pcr_mutex);
1809
1810 pcr->is_runtime_suspended = true;
1811
1812 return 0;
1813}
1814
1815static int rtsx_pci_runtime_resume(struct device *device)
1816{
1817 struct pci_dev *pcidev = to_pci_dev(device);
1818 struct pcr_handle *handle;
1819 struct rtsx_pcr *pcr;
1820
1821 handle = pci_get_drvdata(pcidev);
1822 pcr = handle->pcr;
1823 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1824
1825 mutex_lock(&pcr->pcr_mutex);
1826
1827 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1828 rtsx_pci_acquire_irq(pcr);
1829 synchronize_irq(pcr->irq);
1830
1831 if (pcr->ops->fetch_vendor_settings)
1832 pcr->ops->fetch_vendor_settings(pcr);
1833
1834 rtsx_pci_init_hw(pcr);
1835
1836 if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1837 pcr->slots[RTSX_SD_CARD].card_event(
1838 pcr->slots[RTSX_SD_CARD].p_dev);
1839 }
1840
1841 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1842
1843 mutex_unlock(&pcr->pcr_mutex);
1844 return 0;
1845}
1846
1847#else /* CONFIG_PM */
1848
1849#define rtsx_pci_shutdown NULL
1850#define rtsx_pci_runtime_suspend NULL
1851#define rtsx_pic_runtime_resume NULL
1852
1853#endif /* CONFIG_PM */
1854
1855static const struct dev_pm_ops rtsx_pci_pm_ops = {
1856 SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1857 SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL)
1858};
1859
1860static struct pci_driver rtsx_pci_driver = {
1861 .name = DRV_NAME_RTSX_PCI,
1862 .id_table = rtsx_pci_ids,
1863 .probe = rtsx_pci_probe,
1864 .remove = rtsx_pci_remove,
1865 .driver.pm = &rtsx_pci_pm_ops,
1866 .shutdown = rtsx_pci_shutdown,
1867};
1868module_pci_driver(rtsx_pci_driver);
1869
1870MODULE_LICENSE("GPL");
1871MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1872MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");