Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Driver for Realtek PCI-Express card reader
   3 *
   4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
   5 *
   6 * Author:
   7 *   Wei WANG <wei_wang@realsil.com.cn>
   8 */
   9
  10#include <linux/pci.h>
  11#include <linux/module.h>
  12#include <linux/slab.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <linux/delay.h>
  17#include <linux/idr.h>
  18#include <linux/platform_device.h>
  19#include <linux/mfd/core.h>
  20#include <linux/rtsx_pci.h>
  21#include <linux/mmc/card.h>
  22#include <asm/unaligned.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25
  26#include "rtsx_pcr.h"
  27#include "rts5261.h"
  28#include "rts5228.h"
  29#include "rts5264.h"
  30
  31static bool msi_en = true;
  32module_param(msi_en, bool, S_IRUGO | S_IWUSR);
  33MODULE_PARM_DESC(msi_en, "Enable MSI");
  34
  35static DEFINE_IDR(rtsx_pci_idr);
  36static DEFINE_SPINLOCK(rtsx_pci_lock);
  37
  38static struct mfd_cell rtsx_pcr_cells[] = {
  39	[RTSX_SD_CARD] = {
  40		.name = DRV_NAME_RTSX_PCI_SDMMC,
  41	},
  42};
  43
  44static const struct pci_device_id rtsx_pci_ids[] = {
  45	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  46	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  47	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  48	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  49	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  50	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  51	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  52	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  53	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  54	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  55	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  56	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  57	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  58	{ PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  59	{ 0, }
  60};
  61
  62MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
  63
  64static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  65{
  66	rtsx_pci_write_register(pcr, MSGTXDATA0,
  67				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
  68	rtsx_pci_write_register(pcr, MSGTXDATA1,
  69				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
  70	rtsx_pci_write_register(pcr, MSGTXDATA2,
  71				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
  72	rtsx_pci_write_register(pcr, MSGTXDATA3,
  73				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
  74	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
  75		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
  76
  77	return 0;
  78}
  79
  80int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  81{
  82	return rtsx_comm_set_ltr_latency(pcr, latency);
  83}
  84
  85static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
  86{
  87	if (pcr->aspm_enabled == enable)
  88		return;
  89
  90	if (pcr->aspm_mode == ASPM_MODE_CFG) {
  91		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
  92						PCI_EXP_LNKCTL_ASPMC,
  93						enable ? pcr->aspm_en : 0);
  94	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
  95		if (pcr->aspm_en & 0x02)
  96			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  97				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  98		else
  99			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
 100				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
 101	}
 102
 103	if (!enable && (pcr->aspm_en & 0x02))
 104		mdelay(10);
 105
 106	pcr->aspm_enabled = enable;
 107}
 108
 109static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
 110{
 111	if (pcr->ops->set_aspm)
 112		pcr->ops->set_aspm(pcr, false);
 113	else
 114		rtsx_comm_set_aspm(pcr, false);
 115}
 116
 117int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
 118{
 119	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
 120
 121	return 0;
 122}
 123
 124static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
 125{
 126	if (pcr->ops->set_l1off_cfg_sub_d0)
 127		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
 128}
 129
 130static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
 131{
 132	struct rtsx_cr_option *option = &pcr->option;
 133
 134	rtsx_disable_aspm(pcr);
 135
 136	/* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
 137	msleep(1);
 138
 139	if (option->ltr_enabled)
 140		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
 141
 142	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
 143		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
 144}
 145
 146static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
 147{
 148	rtsx_comm_pm_full_on(pcr);
 149}
 150
 151void rtsx_pci_start_run(struct rtsx_pcr *pcr)
 152{
 153	/* If pci device removed, don't queue idle work any more */
 154	if (pcr->remove_pci)
 155		return;
 156
 157	if (pcr->state != PDEV_STAT_RUN) {
 158		pcr->state = PDEV_STAT_RUN;
 159		if (pcr->ops->enable_auto_blink)
 160			pcr->ops->enable_auto_blink(pcr);
 161		rtsx_pm_full_on(pcr);
 162	}
 163}
 164EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
 165
 166int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
 167{
 168	int i;
 169	u32 val = HAIMR_WRITE_START;
 170
 171	val |= (u32)(addr & 0x3FFF) << 16;
 172	val |= (u32)mask << 8;
 173	val |= (u32)data;
 174
 175	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 176
 177	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 178		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 179		if ((val & HAIMR_TRANS_END) == 0) {
 180			if (data != (u8)val)
 181				return -EIO;
 182			return 0;
 183		}
 184	}
 185
 186	return -ETIMEDOUT;
 187}
 188EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
 189
 190int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
 191{
 192	u32 val = HAIMR_READ_START;
 193	int i;
 194
 195	val |= (u32)(addr & 0x3FFF) << 16;
 196	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 197
 198	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 199		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 200		if ((val & HAIMR_TRANS_END) == 0)
 201			break;
 202	}
 203
 204	if (i >= MAX_RW_REG_CNT)
 205		return -ETIMEDOUT;
 206
 207	if (data)
 208		*data = (u8)(val & 0xFF);
 209
 210	return 0;
 211}
 212EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
 213
 214int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 215{
 216	int err, i, finished = 0;
 217	u8 tmp;
 218
 219	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
 220	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
 221	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 222	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
 223
 224	for (i = 0; i < 100000; i++) {
 225		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 226		if (err < 0)
 227			return err;
 228
 229		if (!(tmp & 0x80)) {
 230			finished = 1;
 231			break;
 232		}
 233	}
 234
 235	if (!finished)
 236		return -ETIMEDOUT;
 237
 238	return 0;
 239}
 240
 241int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 242{
 243	if (pcr->ops->write_phy)
 244		return pcr->ops->write_phy(pcr, addr, val);
 245
 246	return __rtsx_pci_write_phy_register(pcr, addr, val);
 247}
 248EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
 249
 250int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 251{
 252	int err, i, finished = 0;
 253	u16 data;
 254	u8 tmp, val1, val2;
 255
 256	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 257	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
 258
 259	for (i = 0; i < 100000; i++) {
 260		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 261		if (err < 0)
 262			return err;
 263
 264		if (!(tmp & 0x80)) {
 265			finished = 1;
 266			break;
 267		}
 268	}
 269
 270	if (!finished)
 271		return -ETIMEDOUT;
 272
 273	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
 274	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
 275	data = val1 | (val2 << 8);
 276
 277	if (val)
 278		*val = data;
 279
 280	return 0;
 281}
 282
 283int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 284{
 285	if (pcr->ops->read_phy)
 286		return pcr->ops->read_phy(pcr, addr, val);
 287
 288	return __rtsx_pci_read_phy_register(pcr, addr, val);
 289}
 290EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
 291
 292void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
 293{
 294	if (pcr->ops->stop_cmd)
 295		return pcr->ops->stop_cmd(pcr);
 296
 297	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
 298	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
 299
 300	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
 301	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
 302}
 303EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
 304
 305void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
 306		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
 307{
 308	unsigned long flags;
 309	u32 val = 0;
 310	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
 311
 312	val |= (u32)(cmd_type & 0x03) << 30;
 313	val |= (u32)(reg_addr & 0x3FFF) << 16;
 314	val |= (u32)mask << 8;
 315	val |= (u32)data;
 316
 317	spin_lock_irqsave(&pcr->lock, flags);
 318	ptr += pcr->ci;
 319	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
 320		put_unaligned_le32(val, ptr);
 321		ptr++;
 322		pcr->ci++;
 323	}
 324	spin_unlock_irqrestore(&pcr->lock, flags);
 325}
 326EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
 327
 328void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
 329{
 330	u32 val = 1 << 31;
 331
 332	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 333
 334	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 335	/* Hardware Auto Response */
 336	val |= 0x40000000;
 337	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 338}
 339EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
 340
 341int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
 342{
 343	struct completion trans_done;
 344	u32 val = 1 << 31;
 345	long timeleft;
 346	unsigned long flags;
 347	int err = 0;
 348
 349	spin_lock_irqsave(&pcr->lock, flags);
 350
 351	/* set up data structures for the wakeup system */
 352	pcr->done = &trans_done;
 353	pcr->trans_result = TRANS_NOT_READY;
 354	init_completion(&trans_done);
 355
 356	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 357
 358	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 359	/* Hardware Auto Response */
 360	val |= 0x40000000;
 361	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 362
 363	spin_unlock_irqrestore(&pcr->lock, flags);
 364
 365	/* Wait for TRANS_OK_INT */
 366	timeleft = wait_for_completion_interruptible_timeout(
 367			&trans_done, msecs_to_jiffies(timeout));
 368	if (timeleft <= 0) {
 369		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 370		err = -ETIMEDOUT;
 371		goto finish_send_cmd;
 372	}
 373
 374	spin_lock_irqsave(&pcr->lock, flags);
 375	if (pcr->trans_result == TRANS_RESULT_FAIL)
 376		err = -EINVAL;
 377	else if (pcr->trans_result == TRANS_RESULT_OK)
 378		err = 0;
 379	else if (pcr->trans_result == TRANS_NO_DEVICE)
 380		err = -ENODEV;
 381	spin_unlock_irqrestore(&pcr->lock, flags);
 382
 383finish_send_cmd:
 384	spin_lock_irqsave(&pcr->lock, flags);
 385	pcr->done = NULL;
 386	spin_unlock_irqrestore(&pcr->lock, flags);
 387
 388	if ((err < 0) && (err != -ENODEV))
 389		rtsx_pci_stop_cmd(pcr);
 390
 391	if (pcr->finish_me)
 392		complete(pcr->finish_me);
 393
 394	return err;
 395}
 396EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
 397
 398static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
 399		dma_addr_t addr, unsigned int len, int end)
 400{
 401	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
 402	u64 val;
 403	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
 404
 405	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
 406
 407	if (end)
 408		option |= RTSX_SG_END;
 409
 410	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
 411		if (len > 0xFFFF)
 412			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
 413				| (((u64)len >> 16) << 6) | option;
 414		else
 415			val = ((u64)addr << 32) | ((u64)len << 16) | option;
 416	} else {
 417		val = ((u64)addr << 32) | ((u64)len << 12) | option;
 418	}
 419	put_unaligned_le64(val, ptr);
 420	pcr->sgi++;
 421}
 422
 423int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 424		int num_sg, bool read, int timeout)
 425{
 426	int err = 0, count;
 427
 428	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
 429	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
 430	if (count < 1)
 431		return -EINVAL;
 432	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
 433
 434	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
 435
 436	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
 437
 438	return err;
 439}
 440EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
 441
 442int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 443		int num_sg, bool read)
 444{
 445	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 446
 447	if (pcr->remove_pci)
 448		return -EINVAL;
 449
 450	if ((sglist == NULL) || (num_sg <= 0))
 451		return -EINVAL;
 452
 453	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 454}
 455EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
 456
 457void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 458		int num_sg, bool read)
 459{
 460	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 461
 462	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 463}
 464EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
 465
 466int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 467		int count, bool read, int timeout)
 468{
 469	struct completion trans_done;
 470	struct scatterlist *sg;
 471	dma_addr_t addr;
 472	long timeleft;
 473	unsigned long flags;
 474	unsigned int len;
 475	int i, err = 0;
 476	u32 val;
 477	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
 478
 479	if (pcr->remove_pci)
 480		return -ENODEV;
 481
 482	if ((sglist == NULL) || (count < 1))
 483		return -EINVAL;
 484
 485	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
 486	pcr->sgi = 0;
 487	for_each_sg(sglist, sg, count, i) {
 488		addr = sg_dma_address(sg);
 489		len = sg_dma_len(sg);
 490		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
 491	}
 492
 493	spin_lock_irqsave(&pcr->lock, flags);
 494
 495	pcr->done = &trans_done;
 496	pcr->trans_result = TRANS_NOT_READY;
 497	init_completion(&trans_done);
 498	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
 499	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
 500
 501	spin_unlock_irqrestore(&pcr->lock, flags);
 502
 503	timeleft = wait_for_completion_interruptible_timeout(
 504			&trans_done, msecs_to_jiffies(timeout));
 505	if (timeleft <= 0) {
 506		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 507		err = -ETIMEDOUT;
 508		goto out;
 509	}
 510
 511	spin_lock_irqsave(&pcr->lock, flags);
 512	if (pcr->trans_result == TRANS_RESULT_FAIL) {
 513		err = -EILSEQ;
 514		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
 515			pcr->dma_error_count++;
 516	}
 517
 518	else if (pcr->trans_result == TRANS_NO_DEVICE)
 519		err = -ENODEV;
 520	spin_unlock_irqrestore(&pcr->lock, flags);
 521
 522out:
 523	spin_lock_irqsave(&pcr->lock, flags);
 524	pcr->done = NULL;
 525	spin_unlock_irqrestore(&pcr->lock, flags);
 526
 527	if ((err < 0) && (err != -ENODEV))
 528		rtsx_pci_stop_cmd(pcr);
 529
 530	if (pcr->finish_me)
 531		complete(pcr->finish_me);
 532
 533	return err;
 534}
 535EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
 536
 537int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 538{
 539	int err;
 540	int i, j;
 541	u16 reg;
 542	u8 *ptr;
 543
 544	if (buf_len > 512)
 545		buf_len = 512;
 546
 547	ptr = buf;
 548	reg = PPBUF_BASE2;
 549	for (i = 0; i < buf_len / 256; i++) {
 550		rtsx_pci_init_cmd(pcr);
 551
 552		for (j = 0; j < 256; j++)
 553			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 554
 555		err = rtsx_pci_send_cmd(pcr, 250);
 556		if (err < 0)
 557			return err;
 558
 559		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
 560		ptr += 256;
 561	}
 562
 563	if (buf_len % 256) {
 564		rtsx_pci_init_cmd(pcr);
 565
 566		for (j = 0; j < buf_len % 256; j++)
 567			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 568
 569		err = rtsx_pci_send_cmd(pcr, 250);
 570		if (err < 0)
 571			return err;
 572	}
 573
 574	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
 575
 576	return 0;
 577}
 578EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
 579
 580int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 581{
 582	int err;
 583	int i, j;
 584	u16 reg;
 585	u8 *ptr;
 586
 587	if (buf_len > 512)
 588		buf_len = 512;
 589
 590	ptr = buf;
 591	reg = PPBUF_BASE2;
 592	for (i = 0; i < buf_len / 256; i++) {
 593		rtsx_pci_init_cmd(pcr);
 594
 595		for (j = 0; j < 256; j++) {
 596			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 597					reg++, 0xFF, *ptr);
 598			ptr++;
 599		}
 600
 601		err = rtsx_pci_send_cmd(pcr, 250);
 602		if (err < 0)
 603			return err;
 604	}
 605
 606	if (buf_len % 256) {
 607		rtsx_pci_init_cmd(pcr);
 608
 609		for (j = 0; j < buf_len % 256; j++) {
 610			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 611					reg++, 0xFF, *ptr);
 612			ptr++;
 613		}
 614
 615		err = rtsx_pci_send_cmd(pcr, 250);
 616		if (err < 0)
 617			return err;
 618	}
 619
 620	return 0;
 621}
 622EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
 623
 624static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
 625{
 626	rtsx_pci_init_cmd(pcr);
 627
 628	while (*tbl & 0xFFFF0000) {
 629		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 630				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
 631		tbl++;
 632	}
 633
 634	return rtsx_pci_send_cmd(pcr, 100);
 635}
 636
 637int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
 638{
 639	const u32 *tbl;
 640
 641	if (card == RTSX_SD_CARD)
 642		tbl = pcr->sd_pull_ctl_enable_tbl;
 643	else if (card == RTSX_MS_CARD)
 644		tbl = pcr->ms_pull_ctl_enable_tbl;
 645	else
 646		return -EINVAL;
 647
 648	return rtsx_pci_set_pull_ctl(pcr, tbl);
 649}
 650EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
 651
 652int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
 653{
 654	const u32 *tbl;
 655
 656	if (card == RTSX_SD_CARD)
 657		tbl = pcr->sd_pull_ctl_disable_tbl;
 658	else if (card == RTSX_MS_CARD)
 659		tbl = pcr->ms_pull_ctl_disable_tbl;
 660	else
 661		return -EINVAL;
 662
 663	return rtsx_pci_set_pull_ctl(pcr, tbl);
 664}
 665EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
 666
 667static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
 668{
 669	struct rtsx_hw_param *hw_param = &pcr->hw_param;
 670
 671	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
 672		| hw_param->interrupt_en;
 673
 674	if (pcr->num_slots > 1)
 675		pcr->bier |= MS_INT_EN;
 676
 677	/* Enable Bus Interrupt */
 678	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
 679
 680	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
 681}
 682
 683static inline u8 double_ssc_depth(u8 depth)
 684{
 685	return ((depth > 1) ? (depth - 1) : depth);
 686}
 687
 688static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
 689{
 690	if (div > CLK_DIV_1) {
 691		if (ssc_depth > (div - 1))
 692			ssc_depth -= (div - 1);
 693		else
 694			ssc_depth = SSC_DEPTH_4M;
 695	}
 696
 697	return ssc_depth;
 698}
 699
 700int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 701		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
 702{
 703	int err, clk;
 704	u8 n, clk_divider, mcu_cnt, div;
 705	static const u8 depth[] = {
 706		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
 707		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
 708		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
 709		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
 710		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
 711	};
 712
 713	if (PCI_PID(pcr) == PID_5261)
 714		return rts5261_pci_switch_clock(pcr, card_clock,
 715				ssc_depth, initial_mode, double_clk, vpclk);
 716	if (PCI_PID(pcr) == PID_5228)
 717		return rts5228_pci_switch_clock(pcr, card_clock,
 718				ssc_depth, initial_mode, double_clk, vpclk);
 719	if (PCI_PID(pcr) == PID_5264)
 720		return rts5264_pci_switch_clock(pcr, card_clock,
 721				ssc_depth, initial_mode, double_clk, vpclk);
 722
 723	if (initial_mode) {
 724		/* We use 250k(around) here, in initial stage */
 725		clk_divider = SD_CLK_DIVIDE_128;
 726		card_clock = 30000000;
 727	} else {
 728		clk_divider = SD_CLK_DIVIDE_0;
 729	}
 730	err = rtsx_pci_write_register(pcr, SD_CFG1,
 731			SD_CLK_DIVIDE_MASK, clk_divider);
 732	if (err < 0)
 733		return err;
 734
 735	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
 736	if (card_clock == UHS_SDR104_MAX_DTR &&
 737	    pcr->dma_error_count &&
 738	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
 739		card_clock = UHS_SDR104_MAX_DTR -
 740			(pcr->dma_error_count * 20000000);
 741
 742	card_clock /= 1000000;
 743	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
 744
 745	clk = card_clock;
 746	if (!initial_mode && double_clk)
 747		clk = card_clock * 2;
 748	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
 749		clk, pcr->cur_clock);
 750
 751	if (clk == pcr->cur_clock)
 752		return 0;
 753
 754	if (pcr->ops->conv_clk_and_div_n)
 755		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
 756	else
 757		n = (u8)(clk - 2);
 758	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
 759		return -EINVAL;
 760
 761	mcu_cnt = (u8)(125/clk + 3);
 762	if (mcu_cnt > 15)
 763		mcu_cnt = 15;
 764
 765	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
 766	div = CLK_DIV_1;
 767	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
 768		if (pcr->ops->conv_clk_and_div_n) {
 769			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
 770					DIV_N_TO_CLK) * 2;
 771			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
 772					CLK_TO_DIV_N);
 773		} else {
 774			n = (n + 2) * 2 - 2;
 775		}
 776		div++;
 777	}
 778	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
 779
 780	ssc_depth = depth[ssc_depth];
 781	if (double_clk)
 782		ssc_depth = double_ssc_depth(ssc_depth);
 783
 784	ssc_depth = revise_ssc_depth(ssc_depth, div);
 785	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
 786
 787	rtsx_pci_init_cmd(pcr);
 788	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
 789			CLK_LOW_FREQ, CLK_LOW_FREQ);
 790	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
 791			0xFF, (div << 4) | mcu_cnt);
 792	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
 793	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
 794			SSC_DEPTH_MASK, ssc_depth);
 795	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
 796	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
 797	if (vpclk) {
 798		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 799				PHASE_NOT_RESET, 0);
 800		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 801				PHASE_NOT_RESET, PHASE_NOT_RESET);
 802	}
 803
 804	err = rtsx_pci_send_cmd(pcr, 2000);
 805	if (err < 0)
 806		return err;
 807
 808	/* Wait SSC clock stable */
 809	udelay(SSC_CLOCK_STABLE_WAIT);
 810	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
 811	if (err < 0)
 812		return err;
 813
 814	pcr->cur_clock = clk;
 815	return 0;
 816}
 817EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
 818
 819int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
 820{
 821	if (pcr->ops->card_power_on)
 822		return pcr->ops->card_power_on(pcr, card);
 823
 824	return 0;
 825}
 826EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
 827
 828int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
 829{
 830	if (pcr->ops->card_power_off)
 831		return pcr->ops->card_power_off(pcr, card);
 832
 833	return 0;
 834}
 835EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
 836
 837int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
 838{
 839	static const unsigned int cd_mask[] = {
 840		[RTSX_SD_CARD] = SD_EXIST,
 841		[RTSX_MS_CARD] = MS_EXIST
 842	};
 843
 844	if (!(pcr->flags & PCR_MS_PMOS)) {
 845		/* When using single PMOS, accessing card is not permitted
 846		 * if the existing card is not the designated one.
 847		 */
 848		if (pcr->card_exist & (~cd_mask[card]))
 849			return -EIO;
 850	}
 851
 852	return 0;
 853}
 854EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
 855
 856int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 857{
 858	if (pcr->ops->switch_output_voltage)
 859		return pcr->ops->switch_output_voltage(pcr, voltage);
 860
 861	return 0;
 862}
 863EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
 864
 865unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
 866{
 867	unsigned int val;
 868
 869	val = rtsx_pci_readl(pcr, RTSX_BIPR);
 870	if (pcr->ops->cd_deglitch)
 871		val = pcr->ops->cd_deglitch(pcr);
 872
 873	return val;
 874}
 875EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
 876
 877void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
 878{
 879	struct completion finish;
 880
 881	pcr->finish_me = &finish;
 882	init_completion(&finish);
 883
 884	if (pcr->done)
 885		complete(pcr->done);
 886
 887	if (!pcr->remove_pci)
 888		rtsx_pci_stop_cmd(pcr);
 889
 890	wait_for_completion_interruptible_timeout(&finish,
 891			msecs_to_jiffies(2));
 892	pcr->finish_me = NULL;
 893}
 894EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
 895
 896static void rtsx_pci_card_detect(struct work_struct *work)
 897{
 898	struct delayed_work *dwork;
 899	struct rtsx_pcr *pcr;
 900	unsigned long flags;
 901	unsigned int card_detect = 0, card_inserted, card_removed;
 902	u32 irq_status;
 903
 904	dwork = to_delayed_work(work);
 905	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
 906
 907	pcr_dbg(pcr, "--> %s\n", __func__);
 908
 909	mutex_lock(&pcr->pcr_mutex);
 910	spin_lock_irqsave(&pcr->lock, flags);
 911
 912	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
 913	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
 914
 915	irq_status &= CARD_EXIST;
 916	card_inserted = pcr->card_inserted & irq_status;
 917	card_removed = pcr->card_removed;
 918	pcr->card_inserted = 0;
 919	pcr->card_removed = 0;
 920
 921	spin_unlock_irqrestore(&pcr->lock, flags);
 922
 923	if (card_inserted || card_removed) {
 924		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
 925			card_inserted, card_removed);
 926
 927		if (pcr->ops->cd_deglitch)
 928			card_inserted = pcr->ops->cd_deglitch(pcr);
 929
 930		card_detect = card_inserted | card_removed;
 931
 932		pcr->card_exist |= card_inserted;
 933		pcr->card_exist &= ~card_removed;
 934	}
 935
 936	mutex_unlock(&pcr->pcr_mutex);
 937
 938	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
 939		pcr->slots[RTSX_SD_CARD].card_event(
 940				pcr->slots[RTSX_SD_CARD].p_dev);
 941	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
 942		pcr->slots[RTSX_MS_CARD].card_event(
 943				pcr->slots[RTSX_MS_CARD].p_dev);
 944}
 945
 946static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
 947{
 948	if (pcr->ops->process_ocp) {
 949		pcr->ops->process_ocp(pcr);
 950	} else {
 951		if (!pcr->option.ocp_en)
 952			return;
 953		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
 954		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
 955			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
 956			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
 957			rtsx_pci_clear_ocpstat(pcr);
 958			pcr->ocp_stat = 0;
 959		}
 960	}
 961}
 962
 963static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
 964{
 965	if (pcr->option.ocp_en)
 966		rtsx_pci_process_ocp(pcr);
 967
 968	return 0;
 969}
 970
 971static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
 972{
 973	struct rtsx_pcr *pcr = dev_id;
 974	u32 int_reg;
 975
 976	if (!pcr)
 977		return IRQ_NONE;
 978
 979	spin_lock(&pcr->lock);
 980
 981	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
 982	/* Clear interrupt flag */
 983	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
 984	if ((int_reg & pcr->bier) == 0) {
 985		spin_unlock(&pcr->lock);
 986		return IRQ_NONE;
 987	}
 988	if (int_reg == 0xFFFFFFFF) {
 989		spin_unlock(&pcr->lock);
 990		return IRQ_HANDLED;
 991	}
 992
 993	int_reg &= (pcr->bier | 0x7FFFFF);
 994
 995	if ((int_reg & SD_OC_INT) ||
 996			((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
 997		rtsx_pci_process_ocp_interrupt(pcr);
 998
 999	if (int_reg & SD_INT) {
1000		if (int_reg & SD_EXIST) {
1001			pcr->card_inserted |= SD_EXIST;
1002		} else {
1003			pcr->card_removed |= SD_EXIST;
1004			pcr->card_inserted &= ~SD_EXIST;
1005			if (PCI_PID(pcr) == PID_5261) {
1006				rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1007					RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1008				pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1009			}
1010		}
1011		pcr->dma_error_count = 0;
1012	}
1013
1014	if (int_reg & MS_INT) {
1015		if (int_reg & MS_EXIST) {
1016			pcr->card_inserted |= MS_EXIST;
1017		} else {
1018			pcr->card_removed |= MS_EXIST;
1019			pcr->card_inserted &= ~MS_EXIST;
1020		}
1021	}
1022
1023	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1024		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1025			pcr->trans_result = TRANS_RESULT_FAIL;
1026			if (pcr->done)
1027				complete(pcr->done);
1028		} else if (int_reg & TRANS_OK_INT) {
1029			pcr->trans_result = TRANS_RESULT_OK;
1030			if (pcr->done)
1031				complete(pcr->done);
1032		}
1033	}
1034
1035	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1036		schedule_delayed_work(&pcr->carddet_work,
1037				msecs_to_jiffies(200));
1038
1039	spin_unlock(&pcr->lock);
1040	return IRQ_HANDLED;
1041}
1042
1043static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1044{
1045	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1046			__func__, pcr->msi_en, pcr->pci->irq);
1047
1048	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1049			pcr->msi_en ? 0 : IRQF_SHARED,
1050			DRV_NAME_RTSX_PCI, pcr)) {
1051		dev_err(&(pcr->pci->dev),
1052			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1053			pcr->pci->irq);
1054		return -1;
1055	}
1056
1057	pcr->irq = pcr->pci->irq;
1058	pci_intx(pcr->pci, !pcr->msi_en);
1059
1060	return 0;
1061}
1062
1063static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1064{
1065	/* Set relink_time to 0 */
1066	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1067	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1068	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1069			RELINK_TIME_MASK, 0);
1070
1071	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1072			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1073
1074	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1075}
1076
1077static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1078{
1079	if (pcr->ops->turn_off_led)
1080		pcr->ops->turn_off_led(pcr);
1081
1082	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1083	pcr->bier = 0;
1084
1085	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1086	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1087
1088	if (pcr->ops->force_power_down)
1089		pcr->ops->force_power_down(pcr, pm_state, runtime);
1090	else
1091		rtsx_base_force_power_down(pcr);
1092}
1093
1094void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1095{
1096	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1097
1098	if (pcr->ops->enable_ocp) {
1099		pcr->ops->enable_ocp(pcr);
1100	} else {
1101		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1102		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1103	}
1104
1105}
1106
1107void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1108{
1109	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1110
1111	if (pcr->ops->disable_ocp) {
1112		pcr->ops->disable_ocp(pcr);
1113	} else {
1114		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1115		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1116				OC_POWER_DOWN);
1117	}
1118}
1119
1120void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1121{
1122	if (pcr->ops->init_ocp) {
1123		pcr->ops->init_ocp(pcr);
1124	} else {
1125		struct rtsx_cr_option *option = &(pcr->option);
1126
1127		if (option->ocp_en) {
1128			u8 val = option->sd_800mA_ocp_thd;
1129
1130			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1131			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1132				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1133			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1134				SD_OCP_THD_MASK, val);
1135			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1136				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1137			rtsx_pci_enable_ocp(pcr);
1138		}
1139	}
1140}
1141
1142int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1143{
1144	if (pcr->ops->get_ocpstat)
1145		return pcr->ops->get_ocpstat(pcr, val);
1146	else
1147		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1148}
1149
1150void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1151{
1152	if (pcr->ops->clear_ocpstat) {
1153		pcr->ops->clear_ocpstat(pcr);
1154	} else {
1155		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1156		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1157
1158		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1159		udelay(100);
1160		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1161	}
1162}
1163
1164void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1165{
1166	u16 val;
1167
1168	if ((PCI_PID(pcr) != PID_525A) &&
1169		(PCI_PID(pcr) != PID_5260) &&
1170		(PCI_PID(pcr) != PID_5264)) {
1171		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1172		val |= 1<<9;
1173		rtsx_pci_write_phy_register(pcr, 0x01, val);
1174	}
1175	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1176	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1177	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1178	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1179
1180}
1181
1182void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1183{
1184	u16 val;
1185
1186	if ((PCI_PID(pcr) != PID_525A) &&
1187		(PCI_PID(pcr) != PID_5260) &&
1188		(PCI_PID(pcr) != PID_5264)) {
1189		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1190		val &= ~(1<<9);
1191		rtsx_pci_write_phy_register(pcr, 0x01, val);
1192	}
1193	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1194	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1195
1196}
1197
1198int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1199{
1200	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1201		MS_CLK_EN | SD40_CLK_EN, 0);
1202	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1203	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1204
1205	msleep(50);
1206
1207	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1208
1209	return 0;
1210}
1211
1212int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1213{
1214	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1215		MS_CLK_EN | SD40_CLK_EN, 0);
1216
1217	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1218
1219	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1220	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1221
1222	return 0;
1223}
1224
1225static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1226{
1227	struct pci_dev *pdev = pcr->pci;
1228	int err;
1229
1230	if (PCI_PID(pcr) == PID_5228)
1231		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1232				RTS5228_LDO1_SR_0_5);
1233
1234	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1235
1236	rtsx_pci_enable_bus_int(pcr);
1237
1238	/* Power on SSC */
1239	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1240		/* Gating real mcu clock */
1241		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1242			RTS5261_MCU_CLOCK_GATING, 0);
1243		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1244			SSC_POWER_DOWN, 0);
1245	} else {
1246		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1247	}
1248	if (err < 0)
1249		return err;
1250
1251	/* Wait SSC power stable */
1252	udelay(200);
1253
1254	rtsx_disable_aspm(pcr);
1255	if (pcr->ops->optimize_phy) {
1256		err = pcr->ops->optimize_phy(pcr);
1257		if (err < 0)
1258			return err;
1259	}
1260
1261	rtsx_pci_init_cmd(pcr);
1262
1263	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1264	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1265
1266	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1267	/* Disable card clock */
1268	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1269	/* Reset delink mode */
1270	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1271	/* Card driving select */
1272	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1273			0xFF, pcr->card_drive_sel);
1274	/* Enable SSC Clock */
1275	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1276			0xFF, SSC_8X_EN | SSC_SEL_4M);
1277	if (PCI_PID(pcr) == PID_5261)
1278		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1279			RTS5261_SSC_DEPTH_2M);
1280	else if (PCI_PID(pcr) == PID_5228)
1281		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1282			RTS5228_SSC_DEPTH_2M);
1283	else if (is_version(pcr, 0x5264, IC_VER_A))
1284		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
1285	else if (PCI_PID(pcr) == PID_5264)
1286		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1287			RTS5264_SSC_DEPTH_2M);
1288	else
1289		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1290
1291	/* Disable cd_pwr_save */
1292	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1293	/* Clear Link Ready Interrupt */
1294	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1295			LINK_RDY_INT, LINK_RDY_INT);
1296	/* Enlarge the estimation window of PERST# glitch
1297	 * to reduce the chance of invalid card interrupt
1298	 */
1299	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1300	/* Update RC oscillator to 400k
1301	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1302	 *                1: 2M  0: 400k
1303	 */
1304	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1305	/* Set interrupt write clear
1306	 * bit 1: U_elbi_if_rd_clr_en
1307	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1308	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1309	 */
1310	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1311
1312	err = rtsx_pci_send_cmd(pcr, 100);
1313	if (err < 0)
1314		return err;
1315
1316	switch (PCI_PID(pcr)) {
1317	case PID_5250:
1318	case PID_524A:
1319	case PID_525A:
1320	case PID_5260:
1321	case PID_5261:
1322	case PID_5228:
1323	case PID_5264:
1324		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1325		break;
1326	default:
1327		break;
1328	}
1329
1330	/*init ocp*/
1331	rtsx_pci_init_ocp(pcr);
1332
1333	/* Enable clk_request_n to enable clock power management */
1334	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1335					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1336	/* Enter L1 when host tx idle */
1337	pci_write_config_byte(pdev, 0x70F, 0x5B);
1338
1339	if (pcr->ops->extra_init_hw) {
1340		err = pcr->ops->extra_init_hw(pcr);
1341		if (err < 0)
1342			return err;
1343	}
1344
1345	if (pcr->aspm_mode == ASPM_MODE_REG)
1346		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1347
1348	/* No CD interrupt if probing driver with card inserted.
1349	 * So we need to initialize pcr->card_exist here.
1350	 */
1351	if (pcr->ops->cd_deglitch)
1352		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1353	else
1354		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1355
1356	return 0;
1357}
1358
1359static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1360{
1361	struct rtsx_cr_option *option = &(pcr->option);
1362	int err, l1ss;
1363	u32 lval;
1364	u16 cfg_val;
1365	u8 val;
1366
1367	spin_lock_init(&pcr->lock);
1368	mutex_init(&pcr->pcr_mutex);
1369
1370	switch (PCI_PID(pcr)) {
1371	default:
1372	case 0x5209:
1373		rts5209_init_params(pcr);
1374		break;
1375
1376	case 0x5229:
1377		rts5229_init_params(pcr);
1378		break;
1379
1380	case 0x5289:
1381		rtl8411_init_params(pcr);
1382		break;
1383
1384	case 0x5227:
1385		rts5227_init_params(pcr);
1386		break;
1387
1388	case 0x522A:
1389		rts522a_init_params(pcr);
1390		break;
1391
1392	case 0x5249:
1393		rts5249_init_params(pcr);
1394		break;
1395
1396	case 0x524A:
1397		rts524a_init_params(pcr);
1398		break;
1399
1400	case 0x525A:
1401		rts525a_init_params(pcr);
1402		break;
1403
1404	case 0x5287:
1405		rtl8411b_init_params(pcr);
1406		break;
1407
1408	case 0x5286:
1409		rtl8402_init_params(pcr);
1410		break;
1411
1412	case 0x5260:
1413		rts5260_init_params(pcr);
1414		break;
1415
1416	case 0x5261:
1417		rts5261_init_params(pcr);
1418		break;
1419
1420	case 0x5228:
1421		rts5228_init_params(pcr);
1422		break;
1423
1424	case 0x5264:
1425		rts5264_init_params(pcr);
1426		break;
1427	}
1428
1429	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1430			PCI_PID(pcr), pcr->ic_version);
1431
1432	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1433			GFP_KERNEL);
1434	if (!pcr->slots)
1435		return -ENOMEM;
1436
1437	if (pcr->aspm_mode == ASPM_MODE_CFG) {
1438		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1439		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1440			pcr->aspm_enabled = true;
1441		else
1442			pcr->aspm_enabled = false;
1443
1444	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
1445		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1446		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1447			pcr->aspm_enabled = false;
1448		else
1449			pcr->aspm_enabled = true;
1450	}
1451
1452	l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
1453	if (l1ss) {
1454		pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
1455
1456		if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
1457			rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
1458		else
1459			rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
1460
1461		if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
1462			rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
1463		else
1464			rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
1465
1466		if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
1467			rtsx_set_dev_flag(pcr, PM_L1_1_EN);
1468		else
1469			rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
1470
1471		if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
1472			rtsx_set_dev_flag(pcr, PM_L1_2_EN);
1473		else
1474			rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
1475
1476		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
1477		if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
1478			option->ltr_enabled = true;
1479			option->ltr_active = true;
1480		} else {
1481			option->ltr_enabled = false;
1482		}
1483
1484		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
1485				| PM_L1_1_EN | PM_L1_2_EN))
1486			option->force_clkreq_0 = false;
1487		else
1488			option->force_clkreq_0 = true;
1489	} else {
1490		option->ltr_enabled = false;
1491		option->force_clkreq_0 = true;
1492	}
1493
1494	if (pcr->ops->fetch_vendor_settings)
1495		pcr->ops->fetch_vendor_settings(pcr);
1496
1497	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1498	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1499			pcr->sd30_drive_sel_1v8);
1500	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1501			pcr->sd30_drive_sel_3v3);
1502	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1503			pcr->card_drive_sel);
1504	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1505
1506	pcr->state = PDEV_STAT_IDLE;
1507	err = rtsx_pci_init_hw(pcr);
1508	if (err < 0) {
1509		kfree(pcr->slots);
1510		return err;
1511	}
1512
1513	return 0;
1514}
1515
1516static int rtsx_pci_probe(struct pci_dev *pcidev,
1517			  const struct pci_device_id *id)
1518{
1519	struct rtsx_pcr *pcr;
1520	struct pcr_handle *handle;
1521	u32 base, len;
1522	int ret, i, bar = 0;
1523
1524	dev_dbg(&(pcidev->dev),
1525		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1526		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1527		(int)pcidev->revision);
1528
1529	ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1530	if (ret < 0)
1531		return ret;
1532
1533	ret = pci_enable_device(pcidev);
1534	if (ret)
1535		return ret;
1536
1537	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1538	if (ret)
1539		goto disable;
1540
1541	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1542	if (!pcr) {
1543		ret = -ENOMEM;
1544		goto release_pci;
1545	}
1546
1547	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1548	if (!handle) {
1549		ret = -ENOMEM;
1550		goto free_pcr;
1551	}
1552	handle->pcr = pcr;
1553
1554	idr_preload(GFP_KERNEL);
1555	spin_lock(&rtsx_pci_lock);
1556	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1557	if (ret >= 0)
1558		pcr->id = ret;
1559	spin_unlock(&rtsx_pci_lock);
1560	idr_preload_end();
1561	if (ret < 0)
1562		goto free_handle;
1563
1564	pcr->pci = pcidev;
1565	dev_set_drvdata(&pcidev->dev, handle);
1566
1567	if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
1568		bar = 1;
1569	len = pci_resource_len(pcidev, bar);
1570	base = pci_resource_start(pcidev, bar);
1571	pcr->remap_addr = ioremap(base, len);
1572	if (!pcr->remap_addr) {
1573		ret = -ENOMEM;
1574		goto free_idr;
1575	}
1576
1577	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1578			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1579			GFP_KERNEL);
1580	if (pcr->rtsx_resv_buf == NULL) {
1581		ret = -ENXIO;
1582		goto unmap;
1583	}
1584	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1585	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1586	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1587	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1588	pcr->card_inserted = 0;
1589	pcr->card_removed = 0;
1590	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1591
1592	pcr->msi_en = msi_en;
1593	if (pcr->msi_en) {
1594		ret = pci_enable_msi(pcidev);
1595		if (ret)
1596			pcr->msi_en = false;
1597	}
1598
1599	ret = rtsx_pci_acquire_irq(pcr);
1600	if (ret < 0)
1601		goto disable_msi;
1602
1603	pci_set_master(pcidev);
1604	synchronize_irq(pcr->irq);
1605
1606	ret = rtsx_pci_init_chip(pcr);
1607	if (ret < 0)
1608		goto disable_irq;
1609
1610	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1611		rtsx_pcr_cells[i].platform_data = handle;
1612		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1613	}
1614
1615
1616	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1617			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1618	if (ret < 0)
1619		goto free_slots;
1620
1621	pm_runtime_allow(&pcidev->dev);
1622	pm_runtime_put(&pcidev->dev);
1623
1624	return 0;
1625
1626free_slots:
1627	kfree(pcr->slots);
1628disable_irq:
1629	free_irq(pcr->irq, (void *)pcr);
1630disable_msi:
1631	if (pcr->msi_en)
1632		pci_disable_msi(pcr->pci);
1633	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1634			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1635unmap:
1636	iounmap(pcr->remap_addr);
1637free_idr:
1638	spin_lock(&rtsx_pci_lock);
1639	idr_remove(&rtsx_pci_idr, pcr->id);
1640	spin_unlock(&rtsx_pci_lock);
1641free_handle:
1642	kfree(handle);
1643free_pcr:
1644	kfree(pcr);
1645release_pci:
1646	pci_release_regions(pcidev);
1647disable:
1648	pci_disable_device(pcidev);
1649
1650	return ret;
1651}
1652
1653static void rtsx_pci_remove(struct pci_dev *pcidev)
1654{
1655	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1656	struct rtsx_pcr *pcr = handle->pcr;
1657
1658	pcr->remove_pci = true;
1659
1660	pm_runtime_get_sync(&pcidev->dev);
1661	pm_runtime_forbid(&pcidev->dev);
1662
1663	/* Disable interrupts at the pcr level */
1664	spin_lock_irq(&pcr->lock);
1665	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1666	pcr->bier = 0;
1667	spin_unlock_irq(&pcr->lock);
1668
1669	cancel_delayed_work_sync(&pcr->carddet_work);
1670
1671	mfd_remove_devices(&pcidev->dev);
1672
1673	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1674			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1675	free_irq(pcr->irq, (void *)pcr);
1676	if (pcr->msi_en)
1677		pci_disable_msi(pcr->pci);
1678	iounmap(pcr->remap_addr);
1679
1680	pci_release_regions(pcidev);
1681	pci_disable_device(pcidev);
1682
1683	spin_lock(&rtsx_pci_lock);
1684	idr_remove(&rtsx_pci_idr, pcr->id);
1685	spin_unlock(&rtsx_pci_lock);
1686
1687	kfree(pcr->slots);
1688	kfree(pcr);
1689	kfree(handle);
1690
1691	dev_dbg(&(pcidev->dev),
1692		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1693		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1694}
1695
1696static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1697{
1698	struct pci_dev *pcidev = to_pci_dev(dev_d);
1699	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1700	struct rtsx_pcr *pcr = handle->pcr;
1701
1702	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1703
1704	cancel_delayed_work_sync(&pcr->carddet_work);
1705
1706	mutex_lock(&pcr->pcr_mutex);
1707
1708	rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1709
1710	mutex_unlock(&pcr->pcr_mutex);
1711	return 0;
1712}
1713
1714static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1715{
1716	struct pci_dev *pcidev = to_pci_dev(dev_d);
1717	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1718	struct rtsx_pcr *pcr = handle->pcr;
1719	int ret = 0;
1720
1721	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1722
1723	mutex_lock(&pcr->pcr_mutex);
1724
1725	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1726	if (ret)
1727		goto out;
1728
1729	ret = rtsx_pci_init_hw(pcr);
1730	if (ret)
1731		goto out;
1732
1733out:
1734	mutex_unlock(&pcr->pcr_mutex);
1735	return ret;
1736}
1737
1738#ifdef CONFIG_PM
1739
1740static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1741{
1742	if (pcr->ops->set_aspm)
1743		pcr->ops->set_aspm(pcr, true);
1744	else
1745		rtsx_comm_set_aspm(pcr, true);
1746}
1747
1748static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1749{
1750	struct rtsx_cr_option *option = &pcr->option;
1751
1752	if (option->ltr_enabled) {
1753		u32 latency = option->ltr_l1off_latency;
1754
1755		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1756			mdelay(option->l1_snooze_delay);
1757
1758		rtsx_set_ltr_latency(pcr, latency);
1759	}
1760
1761	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1762		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1763
1764	rtsx_enable_aspm(pcr);
1765}
1766
1767static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1768{
1769	rtsx_comm_pm_power_saving(pcr);
1770}
1771
1772static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1773{
1774	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1775	struct rtsx_pcr *pcr = handle->pcr;
1776
1777	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1778
1779	rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1780
1781	pci_disable_device(pcidev);
1782	free_irq(pcr->irq, (void *)pcr);
1783	if (pcr->msi_en)
1784		pci_disable_msi(pcr->pci);
1785}
1786
1787static int rtsx_pci_runtime_idle(struct device *device)
1788{
1789	struct pci_dev *pcidev = to_pci_dev(device);
1790	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1791	struct rtsx_pcr *pcr = handle->pcr;
1792
1793	dev_dbg(device, "--> %s\n", __func__);
1794
1795	mutex_lock(&pcr->pcr_mutex);
1796
1797	pcr->state = PDEV_STAT_IDLE;
1798
1799	if (pcr->ops->disable_auto_blink)
1800		pcr->ops->disable_auto_blink(pcr);
1801	if (pcr->ops->turn_off_led)
1802		pcr->ops->turn_off_led(pcr);
1803
1804	rtsx_pm_power_saving(pcr);
1805
1806	mutex_unlock(&pcr->pcr_mutex);
1807
1808	if (pcr->rtd3_en)
1809		pm_schedule_suspend(device, 10000);
1810
1811	return -EBUSY;
1812}
1813
1814static int rtsx_pci_runtime_suspend(struct device *device)
1815{
1816	struct pci_dev *pcidev = to_pci_dev(device);
1817	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1818	struct rtsx_pcr *pcr = handle->pcr;
1819
1820	dev_dbg(device, "--> %s\n", __func__);
1821
1822	cancel_delayed_work_sync(&pcr->carddet_work);
1823
1824	mutex_lock(&pcr->pcr_mutex);
1825	rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1826
1827	mutex_unlock(&pcr->pcr_mutex);
1828
1829	return 0;
1830}
1831
1832static int rtsx_pci_runtime_resume(struct device *device)
1833{
1834	struct pci_dev *pcidev = to_pci_dev(device);
1835	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1836	struct rtsx_pcr *pcr = handle->pcr;
1837
1838	dev_dbg(device, "--> %s\n", __func__);
1839
1840	mutex_lock(&pcr->pcr_mutex);
1841
1842	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1843
1844	rtsx_pci_init_hw(pcr);
1845
1846	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1847		pcr->slots[RTSX_SD_CARD].card_event(
1848				pcr->slots[RTSX_SD_CARD].p_dev);
1849	}
1850
1851	mutex_unlock(&pcr->pcr_mutex);
1852	return 0;
1853}
1854
1855#else /* CONFIG_PM */
1856
1857#define rtsx_pci_shutdown NULL
1858#define rtsx_pci_runtime_suspend NULL
1859#define rtsx_pic_runtime_resume NULL
1860
1861#endif /* CONFIG_PM */
1862
1863static const struct dev_pm_ops rtsx_pci_pm_ops = {
1864	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1865	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1866};
1867
1868static struct pci_driver rtsx_pci_driver = {
1869	.name = DRV_NAME_RTSX_PCI,
1870	.id_table = rtsx_pci_ids,
1871	.probe = rtsx_pci_probe,
1872	.remove = rtsx_pci_remove,
1873	.driver.pm = &rtsx_pci_pm_ops,
1874	.shutdown = rtsx_pci_shutdown,
1875};
1876module_pci_driver(rtsx_pci_driver);
1877
1878MODULE_LICENSE("GPL");
1879MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1880MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Driver for Realtek PCI-Express card reader
   3 *
   4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
   5 *
   6 * Author:
   7 *   Wei WANG <wei_wang@realsil.com.cn>
   8 */
   9
  10#include <linux/pci.h>
  11#include <linux/module.h>
  12#include <linux/slab.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <linux/delay.h>
  17#include <linux/idr.h>
  18#include <linux/platform_device.h>
  19#include <linux/mfd/core.h>
  20#include <linux/rtsx_pci.h>
  21#include <linux/mmc/card.h>
  22#include <asm/unaligned.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25
  26#include "rtsx_pcr.h"
  27#include "rts5261.h"
  28#include "rts5228.h"
 
  29
  30static bool msi_en = true;
  31module_param(msi_en, bool, S_IRUGO | S_IWUSR);
  32MODULE_PARM_DESC(msi_en, "Enable MSI");
  33
  34static DEFINE_IDR(rtsx_pci_idr);
  35static DEFINE_SPINLOCK(rtsx_pci_lock);
  36
  37static struct mfd_cell rtsx_pcr_cells[] = {
  38	[RTSX_SD_CARD] = {
  39		.name = DRV_NAME_RTSX_PCI_SDMMC,
  40	},
  41};
  42
  43static const struct pci_device_id rtsx_pci_ids[] = {
  44	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  45	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  46	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  47	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  48	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  49	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  50	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  51	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  52	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  53	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  54	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  55	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  56	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
 
  57	{ 0, }
  58};
  59
  60MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
  61
  62static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  63{
  64	rtsx_pci_write_register(pcr, MSGTXDATA0,
  65				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
  66	rtsx_pci_write_register(pcr, MSGTXDATA1,
  67				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
  68	rtsx_pci_write_register(pcr, MSGTXDATA2,
  69				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
  70	rtsx_pci_write_register(pcr, MSGTXDATA3,
  71				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
  72	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
  73		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
  74
  75	return 0;
  76}
  77
  78int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  79{
  80	return rtsx_comm_set_ltr_latency(pcr, latency);
  81}
  82
  83static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
  84{
  85	if (pcr->aspm_enabled == enable)
  86		return;
  87
  88	if (pcr->aspm_mode == ASPM_MODE_CFG) {
  89		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
  90						PCI_EXP_LNKCTL_ASPMC,
  91						enable ? pcr->aspm_en : 0);
  92	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
  93		if (pcr->aspm_en & 0x02)
  94			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  95				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  96		else
  97			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  98				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  99	}
 100
 101	if (!enable && (pcr->aspm_en & 0x02))
 102		mdelay(10);
 103
 104	pcr->aspm_enabled = enable;
 105}
 106
 107static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
 108{
 109	if (pcr->ops->set_aspm)
 110		pcr->ops->set_aspm(pcr, false);
 111	else
 112		rtsx_comm_set_aspm(pcr, false);
 113}
 114
 115int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
 116{
 117	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
 118
 119	return 0;
 120}
 121
 122static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
 123{
 124	if (pcr->ops->set_l1off_cfg_sub_d0)
 125		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
 126}
 127
 128static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
 129{
 130	struct rtsx_cr_option *option = &pcr->option;
 131
 132	rtsx_disable_aspm(pcr);
 133
 134	/* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
 135	msleep(1);
 136
 137	if (option->ltr_enabled)
 138		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
 139
 140	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
 141		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
 142}
 143
 144static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
 145{
 146	rtsx_comm_pm_full_on(pcr);
 147}
 148
 149void rtsx_pci_start_run(struct rtsx_pcr *pcr)
 150{
 151	/* If pci device removed, don't queue idle work any more */
 152	if (pcr->remove_pci)
 153		return;
 154
 155	if (pcr->state != PDEV_STAT_RUN) {
 156		pcr->state = PDEV_STAT_RUN;
 157		if (pcr->ops->enable_auto_blink)
 158			pcr->ops->enable_auto_blink(pcr);
 159		rtsx_pm_full_on(pcr);
 160	}
 161}
 162EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
 163
 164int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
 165{
 166	int i;
 167	u32 val = HAIMR_WRITE_START;
 168
 169	val |= (u32)(addr & 0x3FFF) << 16;
 170	val |= (u32)mask << 8;
 171	val |= (u32)data;
 172
 173	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 174
 175	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 176		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 177		if ((val & HAIMR_TRANS_END) == 0) {
 178			if (data != (u8)val)
 179				return -EIO;
 180			return 0;
 181		}
 182	}
 183
 184	return -ETIMEDOUT;
 185}
 186EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
 187
 188int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
 189{
 190	u32 val = HAIMR_READ_START;
 191	int i;
 192
 193	val |= (u32)(addr & 0x3FFF) << 16;
 194	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 195
 196	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 197		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 198		if ((val & HAIMR_TRANS_END) == 0)
 199			break;
 200	}
 201
 202	if (i >= MAX_RW_REG_CNT)
 203		return -ETIMEDOUT;
 204
 205	if (data)
 206		*data = (u8)(val & 0xFF);
 207
 208	return 0;
 209}
 210EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
 211
 212int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 213{
 214	int err, i, finished = 0;
 215	u8 tmp;
 216
 217	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
 218	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
 219	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 220	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
 221
 222	for (i = 0; i < 100000; i++) {
 223		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 224		if (err < 0)
 225			return err;
 226
 227		if (!(tmp & 0x80)) {
 228			finished = 1;
 229			break;
 230		}
 231	}
 232
 233	if (!finished)
 234		return -ETIMEDOUT;
 235
 236	return 0;
 237}
 238
 239int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 240{
 241	if (pcr->ops->write_phy)
 242		return pcr->ops->write_phy(pcr, addr, val);
 243
 244	return __rtsx_pci_write_phy_register(pcr, addr, val);
 245}
 246EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
 247
 248int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 249{
 250	int err, i, finished = 0;
 251	u16 data;
 252	u8 tmp, val1, val2;
 253
 254	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 255	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
 256
 257	for (i = 0; i < 100000; i++) {
 258		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 259		if (err < 0)
 260			return err;
 261
 262		if (!(tmp & 0x80)) {
 263			finished = 1;
 264			break;
 265		}
 266	}
 267
 268	if (!finished)
 269		return -ETIMEDOUT;
 270
 271	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
 272	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
 273	data = val1 | (val2 << 8);
 274
 275	if (val)
 276		*val = data;
 277
 278	return 0;
 279}
 280
 281int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 282{
 283	if (pcr->ops->read_phy)
 284		return pcr->ops->read_phy(pcr, addr, val);
 285
 286	return __rtsx_pci_read_phy_register(pcr, addr, val);
 287}
 288EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
 289
 290void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
 291{
 292	if (pcr->ops->stop_cmd)
 293		return pcr->ops->stop_cmd(pcr);
 294
 295	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
 296	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
 297
 298	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
 299	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
 300}
 301EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
 302
 303void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
 304		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
 305{
 306	unsigned long flags;
 307	u32 val = 0;
 308	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
 309
 310	val |= (u32)(cmd_type & 0x03) << 30;
 311	val |= (u32)(reg_addr & 0x3FFF) << 16;
 312	val |= (u32)mask << 8;
 313	val |= (u32)data;
 314
 315	spin_lock_irqsave(&pcr->lock, flags);
 316	ptr += pcr->ci;
 317	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
 318		put_unaligned_le32(val, ptr);
 319		ptr++;
 320		pcr->ci++;
 321	}
 322	spin_unlock_irqrestore(&pcr->lock, flags);
 323}
 324EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
 325
 326void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
 327{
 328	u32 val = 1 << 31;
 329
 330	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 331
 332	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 333	/* Hardware Auto Response */
 334	val |= 0x40000000;
 335	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 336}
 337EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
 338
 339int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
 340{
 341	struct completion trans_done;
 342	u32 val = 1 << 31;
 343	long timeleft;
 344	unsigned long flags;
 345	int err = 0;
 346
 347	spin_lock_irqsave(&pcr->lock, flags);
 348
 349	/* set up data structures for the wakeup system */
 350	pcr->done = &trans_done;
 351	pcr->trans_result = TRANS_NOT_READY;
 352	init_completion(&trans_done);
 353
 354	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 355
 356	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 357	/* Hardware Auto Response */
 358	val |= 0x40000000;
 359	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 360
 361	spin_unlock_irqrestore(&pcr->lock, flags);
 362
 363	/* Wait for TRANS_OK_INT */
 364	timeleft = wait_for_completion_interruptible_timeout(
 365			&trans_done, msecs_to_jiffies(timeout));
 366	if (timeleft <= 0) {
 367		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 368		err = -ETIMEDOUT;
 369		goto finish_send_cmd;
 370	}
 371
 372	spin_lock_irqsave(&pcr->lock, flags);
 373	if (pcr->trans_result == TRANS_RESULT_FAIL)
 374		err = -EINVAL;
 375	else if (pcr->trans_result == TRANS_RESULT_OK)
 376		err = 0;
 377	else if (pcr->trans_result == TRANS_NO_DEVICE)
 378		err = -ENODEV;
 379	spin_unlock_irqrestore(&pcr->lock, flags);
 380
 381finish_send_cmd:
 382	spin_lock_irqsave(&pcr->lock, flags);
 383	pcr->done = NULL;
 384	spin_unlock_irqrestore(&pcr->lock, flags);
 385
 386	if ((err < 0) && (err != -ENODEV))
 387		rtsx_pci_stop_cmd(pcr);
 388
 389	if (pcr->finish_me)
 390		complete(pcr->finish_me);
 391
 392	return err;
 393}
 394EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
 395
 396static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
 397		dma_addr_t addr, unsigned int len, int end)
 398{
 399	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
 400	u64 val;
 401	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
 402
 403	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
 404
 405	if (end)
 406		option |= RTSX_SG_END;
 407
 408	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
 409		if (len > 0xFFFF)
 410			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
 411				| (((u64)len >> 16) << 6) | option;
 412		else
 413			val = ((u64)addr << 32) | ((u64)len << 16) | option;
 414	} else {
 415		val = ((u64)addr << 32) | ((u64)len << 12) | option;
 416	}
 417	put_unaligned_le64(val, ptr);
 418	pcr->sgi++;
 419}
 420
 421int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 422		int num_sg, bool read, int timeout)
 423{
 424	int err = 0, count;
 425
 426	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
 427	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
 428	if (count < 1)
 429		return -EINVAL;
 430	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
 431
 432	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
 433
 434	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
 435
 436	return err;
 437}
 438EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
 439
 440int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 441		int num_sg, bool read)
 442{
 443	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 444
 445	if (pcr->remove_pci)
 446		return -EINVAL;
 447
 448	if ((sglist == NULL) || (num_sg <= 0))
 449		return -EINVAL;
 450
 451	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 452}
 453EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
 454
 455void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 456		int num_sg, bool read)
 457{
 458	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 459
 460	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 461}
 462EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
 463
 464int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 465		int count, bool read, int timeout)
 466{
 467	struct completion trans_done;
 468	struct scatterlist *sg;
 469	dma_addr_t addr;
 470	long timeleft;
 471	unsigned long flags;
 472	unsigned int len;
 473	int i, err = 0;
 474	u32 val;
 475	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
 476
 477	if (pcr->remove_pci)
 478		return -ENODEV;
 479
 480	if ((sglist == NULL) || (count < 1))
 481		return -EINVAL;
 482
 483	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
 484	pcr->sgi = 0;
 485	for_each_sg(sglist, sg, count, i) {
 486		addr = sg_dma_address(sg);
 487		len = sg_dma_len(sg);
 488		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
 489	}
 490
 491	spin_lock_irqsave(&pcr->lock, flags);
 492
 493	pcr->done = &trans_done;
 494	pcr->trans_result = TRANS_NOT_READY;
 495	init_completion(&trans_done);
 496	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
 497	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
 498
 499	spin_unlock_irqrestore(&pcr->lock, flags);
 500
 501	timeleft = wait_for_completion_interruptible_timeout(
 502			&trans_done, msecs_to_jiffies(timeout));
 503	if (timeleft <= 0) {
 504		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 505		err = -ETIMEDOUT;
 506		goto out;
 507	}
 508
 509	spin_lock_irqsave(&pcr->lock, flags);
 510	if (pcr->trans_result == TRANS_RESULT_FAIL) {
 511		err = -EILSEQ;
 512		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
 513			pcr->dma_error_count++;
 514	}
 515
 516	else if (pcr->trans_result == TRANS_NO_DEVICE)
 517		err = -ENODEV;
 518	spin_unlock_irqrestore(&pcr->lock, flags);
 519
 520out:
 521	spin_lock_irqsave(&pcr->lock, flags);
 522	pcr->done = NULL;
 523	spin_unlock_irqrestore(&pcr->lock, flags);
 524
 525	if ((err < 0) && (err != -ENODEV))
 526		rtsx_pci_stop_cmd(pcr);
 527
 528	if (pcr->finish_me)
 529		complete(pcr->finish_me);
 530
 531	return err;
 532}
 533EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
 534
 535int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 536{
 537	int err;
 538	int i, j;
 539	u16 reg;
 540	u8 *ptr;
 541
 542	if (buf_len > 512)
 543		buf_len = 512;
 544
 545	ptr = buf;
 546	reg = PPBUF_BASE2;
 547	for (i = 0; i < buf_len / 256; i++) {
 548		rtsx_pci_init_cmd(pcr);
 549
 550		for (j = 0; j < 256; j++)
 551			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 552
 553		err = rtsx_pci_send_cmd(pcr, 250);
 554		if (err < 0)
 555			return err;
 556
 557		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
 558		ptr += 256;
 559	}
 560
 561	if (buf_len % 256) {
 562		rtsx_pci_init_cmd(pcr);
 563
 564		for (j = 0; j < buf_len % 256; j++)
 565			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 566
 567		err = rtsx_pci_send_cmd(pcr, 250);
 568		if (err < 0)
 569			return err;
 570	}
 571
 572	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
 573
 574	return 0;
 575}
 576EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
 577
 578int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 579{
 580	int err;
 581	int i, j;
 582	u16 reg;
 583	u8 *ptr;
 584
 585	if (buf_len > 512)
 586		buf_len = 512;
 587
 588	ptr = buf;
 589	reg = PPBUF_BASE2;
 590	for (i = 0; i < buf_len / 256; i++) {
 591		rtsx_pci_init_cmd(pcr);
 592
 593		for (j = 0; j < 256; j++) {
 594			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 595					reg++, 0xFF, *ptr);
 596			ptr++;
 597		}
 598
 599		err = rtsx_pci_send_cmd(pcr, 250);
 600		if (err < 0)
 601			return err;
 602	}
 603
 604	if (buf_len % 256) {
 605		rtsx_pci_init_cmd(pcr);
 606
 607		for (j = 0; j < buf_len % 256; j++) {
 608			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 609					reg++, 0xFF, *ptr);
 610			ptr++;
 611		}
 612
 613		err = rtsx_pci_send_cmd(pcr, 250);
 614		if (err < 0)
 615			return err;
 616	}
 617
 618	return 0;
 619}
 620EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
 621
 622static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
 623{
 624	rtsx_pci_init_cmd(pcr);
 625
 626	while (*tbl & 0xFFFF0000) {
 627		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 628				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
 629		tbl++;
 630	}
 631
 632	return rtsx_pci_send_cmd(pcr, 100);
 633}
 634
 635int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
 636{
 637	const u32 *tbl;
 638
 639	if (card == RTSX_SD_CARD)
 640		tbl = pcr->sd_pull_ctl_enable_tbl;
 641	else if (card == RTSX_MS_CARD)
 642		tbl = pcr->ms_pull_ctl_enable_tbl;
 643	else
 644		return -EINVAL;
 645
 646	return rtsx_pci_set_pull_ctl(pcr, tbl);
 647}
 648EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
 649
 650int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
 651{
 652	const u32 *tbl;
 653
 654	if (card == RTSX_SD_CARD)
 655		tbl = pcr->sd_pull_ctl_disable_tbl;
 656	else if (card == RTSX_MS_CARD)
 657		tbl = pcr->ms_pull_ctl_disable_tbl;
 658	else
 659		return -EINVAL;
 660
 661	return rtsx_pci_set_pull_ctl(pcr, tbl);
 662}
 663EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
 664
 665static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
 666{
 667	struct rtsx_hw_param *hw_param = &pcr->hw_param;
 668
 669	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
 670		| hw_param->interrupt_en;
 671
 672	if (pcr->num_slots > 1)
 673		pcr->bier |= MS_INT_EN;
 674
 675	/* Enable Bus Interrupt */
 676	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
 677
 678	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
 679}
 680
 681static inline u8 double_ssc_depth(u8 depth)
 682{
 683	return ((depth > 1) ? (depth - 1) : depth);
 684}
 685
 686static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
 687{
 688	if (div > CLK_DIV_1) {
 689		if (ssc_depth > (div - 1))
 690			ssc_depth -= (div - 1);
 691		else
 692			ssc_depth = SSC_DEPTH_4M;
 693	}
 694
 695	return ssc_depth;
 696}
 697
 698int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 699		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
 700{
 701	int err, clk;
 702	u8 n, clk_divider, mcu_cnt, div;
 703	static const u8 depth[] = {
 704		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
 705		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
 706		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
 707		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
 708		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
 709	};
 710
 711	if (PCI_PID(pcr) == PID_5261)
 712		return rts5261_pci_switch_clock(pcr, card_clock,
 713				ssc_depth, initial_mode, double_clk, vpclk);
 714	if (PCI_PID(pcr) == PID_5228)
 715		return rts5228_pci_switch_clock(pcr, card_clock,
 716				ssc_depth, initial_mode, double_clk, vpclk);
 
 
 
 717
 718	if (initial_mode) {
 719		/* We use 250k(around) here, in initial stage */
 720		clk_divider = SD_CLK_DIVIDE_128;
 721		card_clock = 30000000;
 722	} else {
 723		clk_divider = SD_CLK_DIVIDE_0;
 724	}
 725	err = rtsx_pci_write_register(pcr, SD_CFG1,
 726			SD_CLK_DIVIDE_MASK, clk_divider);
 727	if (err < 0)
 728		return err;
 729
 730	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
 731	if (card_clock == UHS_SDR104_MAX_DTR &&
 732	    pcr->dma_error_count &&
 733	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
 734		card_clock = UHS_SDR104_MAX_DTR -
 735			(pcr->dma_error_count * 20000000);
 736
 737	card_clock /= 1000000;
 738	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
 739
 740	clk = card_clock;
 741	if (!initial_mode && double_clk)
 742		clk = card_clock * 2;
 743	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
 744		clk, pcr->cur_clock);
 745
 746	if (clk == pcr->cur_clock)
 747		return 0;
 748
 749	if (pcr->ops->conv_clk_and_div_n)
 750		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
 751	else
 752		n = (u8)(clk - 2);
 753	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
 754		return -EINVAL;
 755
 756	mcu_cnt = (u8)(125/clk + 3);
 757	if (mcu_cnt > 15)
 758		mcu_cnt = 15;
 759
 760	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
 761	div = CLK_DIV_1;
 762	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
 763		if (pcr->ops->conv_clk_and_div_n) {
 764			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
 765					DIV_N_TO_CLK) * 2;
 766			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
 767					CLK_TO_DIV_N);
 768		} else {
 769			n = (n + 2) * 2 - 2;
 770		}
 771		div++;
 772	}
 773	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
 774
 775	ssc_depth = depth[ssc_depth];
 776	if (double_clk)
 777		ssc_depth = double_ssc_depth(ssc_depth);
 778
 779	ssc_depth = revise_ssc_depth(ssc_depth, div);
 780	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
 781
 782	rtsx_pci_init_cmd(pcr);
 783	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
 784			CLK_LOW_FREQ, CLK_LOW_FREQ);
 785	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
 786			0xFF, (div << 4) | mcu_cnt);
 787	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
 788	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
 789			SSC_DEPTH_MASK, ssc_depth);
 790	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
 791	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
 792	if (vpclk) {
 793		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 794				PHASE_NOT_RESET, 0);
 795		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 796				PHASE_NOT_RESET, PHASE_NOT_RESET);
 797	}
 798
 799	err = rtsx_pci_send_cmd(pcr, 2000);
 800	if (err < 0)
 801		return err;
 802
 803	/* Wait SSC clock stable */
 804	udelay(SSC_CLOCK_STABLE_WAIT);
 805	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
 806	if (err < 0)
 807		return err;
 808
 809	pcr->cur_clock = clk;
 810	return 0;
 811}
 812EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
 813
 814int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
 815{
 816	if (pcr->ops->card_power_on)
 817		return pcr->ops->card_power_on(pcr, card);
 818
 819	return 0;
 820}
 821EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
 822
 823int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
 824{
 825	if (pcr->ops->card_power_off)
 826		return pcr->ops->card_power_off(pcr, card);
 827
 828	return 0;
 829}
 830EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
 831
 832int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
 833{
 834	static const unsigned int cd_mask[] = {
 835		[RTSX_SD_CARD] = SD_EXIST,
 836		[RTSX_MS_CARD] = MS_EXIST
 837	};
 838
 839	if (!(pcr->flags & PCR_MS_PMOS)) {
 840		/* When using single PMOS, accessing card is not permitted
 841		 * if the existing card is not the designated one.
 842		 */
 843		if (pcr->card_exist & (~cd_mask[card]))
 844			return -EIO;
 845	}
 846
 847	return 0;
 848}
 849EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
 850
 851int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 852{
 853	if (pcr->ops->switch_output_voltage)
 854		return pcr->ops->switch_output_voltage(pcr, voltage);
 855
 856	return 0;
 857}
 858EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
 859
 860unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
 861{
 862	unsigned int val;
 863
 864	val = rtsx_pci_readl(pcr, RTSX_BIPR);
 865	if (pcr->ops->cd_deglitch)
 866		val = pcr->ops->cd_deglitch(pcr);
 867
 868	return val;
 869}
 870EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
 871
 872void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
 873{
 874	struct completion finish;
 875
 876	pcr->finish_me = &finish;
 877	init_completion(&finish);
 878
 879	if (pcr->done)
 880		complete(pcr->done);
 881
 882	if (!pcr->remove_pci)
 883		rtsx_pci_stop_cmd(pcr);
 884
 885	wait_for_completion_interruptible_timeout(&finish,
 886			msecs_to_jiffies(2));
 887	pcr->finish_me = NULL;
 888}
 889EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
 890
 891static void rtsx_pci_card_detect(struct work_struct *work)
 892{
 893	struct delayed_work *dwork;
 894	struct rtsx_pcr *pcr;
 895	unsigned long flags;
 896	unsigned int card_detect = 0, card_inserted, card_removed;
 897	u32 irq_status;
 898
 899	dwork = to_delayed_work(work);
 900	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
 901
 902	pcr_dbg(pcr, "--> %s\n", __func__);
 903
 904	mutex_lock(&pcr->pcr_mutex);
 905	spin_lock_irqsave(&pcr->lock, flags);
 906
 907	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
 908	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
 909
 910	irq_status &= CARD_EXIST;
 911	card_inserted = pcr->card_inserted & irq_status;
 912	card_removed = pcr->card_removed;
 913	pcr->card_inserted = 0;
 914	pcr->card_removed = 0;
 915
 916	spin_unlock_irqrestore(&pcr->lock, flags);
 917
 918	if (card_inserted || card_removed) {
 919		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
 920			card_inserted, card_removed);
 921
 922		if (pcr->ops->cd_deglitch)
 923			card_inserted = pcr->ops->cd_deglitch(pcr);
 924
 925		card_detect = card_inserted | card_removed;
 926
 927		pcr->card_exist |= card_inserted;
 928		pcr->card_exist &= ~card_removed;
 929	}
 930
 931	mutex_unlock(&pcr->pcr_mutex);
 932
 933	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
 934		pcr->slots[RTSX_SD_CARD].card_event(
 935				pcr->slots[RTSX_SD_CARD].p_dev);
 936	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
 937		pcr->slots[RTSX_MS_CARD].card_event(
 938				pcr->slots[RTSX_MS_CARD].p_dev);
 939}
 940
 941static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
 942{
 943	if (pcr->ops->process_ocp) {
 944		pcr->ops->process_ocp(pcr);
 945	} else {
 946		if (!pcr->option.ocp_en)
 947			return;
 948		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
 949		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
 950			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
 951			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
 952			rtsx_pci_clear_ocpstat(pcr);
 953			pcr->ocp_stat = 0;
 954		}
 955	}
 956}
 957
 958static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
 959{
 960	if (pcr->option.ocp_en)
 961		rtsx_pci_process_ocp(pcr);
 962
 963	return 0;
 964}
 965
 966static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
 967{
 968	struct rtsx_pcr *pcr = dev_id;
 969	u32 int_reg;
 970
 971	if (!pcr)
 972		return IRQ_NONE;
 973
 974	spin_lock(&pcr->lock);
 975
 976	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
 977	/* Clear interrupt flag */
 978	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
 979	if ((int_reg & pcr->bier) == 0) {
 980		spin_unlock(&pcr->lock);
 981		return IRQ_NONE;
 982	}
 983	if (int_reg == 0xFFFFFFFF) {
 984		spin_unlock(&pcr->lock);
 985		return IRQ_HANDLED;
 986	}
 987
 988	int_reg &= (pcr->bier | 0x7FFFFF);
 989
 990	if (int_reg & SD_OC_INT)
 
 991		rtsx_pci_process_ocp_interrupt(pcr);
 992
 993	if (int_reg & SD_INT) {
 994		if (int_reg & SD_EXIST) {
 995			pcr->card_inserted |= SD_EXIST;
 996		} else {
 997			pcr->card_removed |= SD_EXIST;
 998			pcr->card_inserted &= ~SD_EXIST;
 999			if (PCI_PID(pcr) == PID_5261) {
1000				rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1001					RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1002				pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1003			}
1004		}
1005		pcr->dma_error_count = 0;
1006	}
1007
1008	if (int_reg & MS_INT) {
1009		if (int_reg & MS_EXIST) {
1010			pcr->card_inserted |= MS_EXIST;
1011		} else {
1012			pcr->card_removed |= MS_EXIST;
1013			pcr->card_inserted &= ~MS_EXIST;
1014		}
1015	}
1016
1017	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1018		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1019			pcr->trans_result = TRANS_RESULT_FAIL;
1020			if (pcr->done)
1021				complete(pcr->done);
1022		} else if (int_reg & TRANS_OK_INT) {
1023			pcr->trans_result = TRANS_RESULT_OK;
1024			if (pcr->done)
1025				complete(pcr->done);
1026		}
1027	}
1028
1029	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1030		schedule_delayed_work(&pcr->carddet_work,
1031				msecs_to_jiffies(200));
1032
1033	spin_unlock(&pcr->lock);
1034	return IRQ_HANDLED;
1035}
1036
1037static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1038{
1039	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1040			__func__, pcr->msi_en, pcr->pci->irq);
1041
1042	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1043			pcr->msi_en ? 0 : IRQF_SHARED,
1044			DRV_NAME_RTSX_PCI, pcr)) {
1045		dev_err(&(pcr->pci->dev),
1046			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1047			pcr->pci->irq);
1048		return -1;
1049	}
1050
1051	pcr->irq = pcr->pci->irq;
1052	pci_intx(pcr->pci, !pcr->msi_en);
1053
1054	return 0;
1055}
1056
1057static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1058{
1059	/* Set relink_time to 0 */
1060	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1061	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1062	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1063			RELINK_TIME_MASK, 0);
1064
1065	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1066			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1067
1068	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1069}
1070
1071static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1072{
1073	if (pcr->ops->turn_off_led)
1074		pcr->ops->turn_off_led(pcr);
1075
1076	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1077	pcr->bier = 0;
1078
1079	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1080	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1081
1082	if (pcr->ops->force_power_down)
1083		pcr->ops->force_power_down(pcr, pm_state, runtime);
1084	else
1085		rtsx_base_force_power_down(pcr);
1086}
1087
1088void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1089{
1090	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1091
1092	if (pcr->ops->enable_ocp) {
1093		pcr->ops->enable_ocp(pcr);
1094	} else {
1095		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1096		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1097	}
1098
1099}
1100
1101void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1102{
1103	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1104
1105	if (pcr->ops->disable_ocp) {
1106		pcr->ops->disable_ocp(pcr);
1107	} else {
1108		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1109		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1110				OC_POWER_DOWN);
1111	}
1112}
1113
1114void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1115{
1116	if (pcr->ops->init_ocp) {
1117		pcr->ops->init_ocp(pcr);
1118	} else {
1119		struct rtsx_cr_option *option = &(pcr->option);
1120
1121		if (option->ocp_en) {
1122			u8 val = option->sd_800mA_ocp_thd;
1123
1124			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1125			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1126				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1127			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1128				SD_OCP_THD_MASK, val);
1129			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1130				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1131			rtsx_pci_enable_ocp(pcr);
1132		}
1133	}
1134}
1135
1136int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1137{
1138	if (pcr->ops->get_ocpstat)
1139		return pcr->ops->get_ocpstat(pcr, val);
1140	else
1141		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1142}
1143
1144void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1145{
1146	if (pcr->ops->clear_ocpstat) {
1147		pcr->ops->clear_ocpstat(pcr);
1148	} else {
1149		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1150		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1151
1152		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1153		udelay(100);
1154		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1155	}
1156}
1157
1158void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1159{
1160	u16 val;
1161
1162	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
 
 
1163		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1164		val |= 1<<9;
1165		rtsx_pci_write_phy_register(pcr, 0x01, val);
1166	}
1167	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1168	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1169	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1170	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1171
1172}
1173
1174void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1175{
1176	u16 val;
1177
1178	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
 
 
1179		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1180		val &= ~(1<<9);
1181		rtsx_pci_write_phy_register(pcr, 0x01, val);
1182	}
1183	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1184	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1185
1186}
1187
1188int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1189{
1190	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1191		MS_CLK_EN | SD40_CLK_EN, 0);
1192	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1193	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1194
1195	msleep(50);
1196
1197	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1198
1199	return 0;
1200}
1201
1202int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1203{
1204	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1205		MS_CLK_EN | SD40_CLK_EN, 0);
1206
1207	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1208
1209	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1210	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1211
1212	return 0;
1213}
1214
1215static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1216{
1217	struct pci_dev *pdev = pcr->pci;
1218	int err;
1219
1220	if (PCI_PID(pcr) == PID_5228)
1221		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1222				RTS5228_LDO1_SR_0_5);
1223
1224	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1225
1226	rtsx_pci_enable_bus_int(pcr);
1227
1228	/* Power on SSC */
1229	if (PCI_PID(pcr) == PID_5261) {
1230		/* Gating real mcu clock */
1231		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1232			RTS5261_MCU_CLOCK_GATING, 0);
1233		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1234			SSC_POWER_DOWN, 0);
1235	} else {
1236		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1237	}
1238	if (err < 0)
1239		return err;
1240
1241	/* Wait SSC power stable */
1242	udelay(200);
1243
1244	rtsx_disable_aspm(pcr);
1245	if (pcr->ops->optimize_phy) {
1246		err = pcr->ops->optimize_phy(pcr);
1247		if (err < 0)
1248			return err;
1249	}
1250
1251	rtsx_pci_init_cmd(pcr);
1252
1253	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1254	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1255
1256	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1257	/* Disable card clock */
1258	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1259	/* Reset delink mode */
1260	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1261	/* Card driving select */
1262	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1263			0xFF, pcr->card_drive_sel);
1264	/* Enable SSC Clock */
1265	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1266			0xFF, SSC_8X_EN | SSC_SEL_4M);
1267	if (PCI_PID(pcr) == PID_5261)
1268		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1269			RTS5261_SSC_DEPTH_2M);
1270	else if (PCI_PID(pcr) == PID_5228)
1271		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1272			RTS5228_SSC_DEPTH_2M);
 
 
 
 
 
1273	else
1274		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1275
1276	/* Disable cd_pwr_save */
1277	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1278	/* Clear Link Ready Interrupt */
1279	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1280			LINK_RDY_INT, LINK_RDY_INT);
1281	/* Enlarge the estimation window of PERST# glitch
1282	 * to reduce the chance of invalid card interrupt
1283	 */
1284	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1285	/* Update RC oscillator to 400k
1286	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1287	 *                1: 2M  0: 400k
1288	 */
1289	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1290	/* Set interrupt write clear
1291	 * bit 1: U_elbi_if_rd_clr_en
1292	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1293	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1294	 */
1295	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1296
1297	err = rtsx_pci_send_cmd(pcr, 100);
1298	if (err < 0)
1299		return err;
1300
1301	switch (PCI_PID(pcr)) {
1302	case PID_5250:
1303	case PID_524A:
1304	case PID_525A:
1305	case PID_5260:
1306	case PID_5261:
1307	case PID_5228:
 
1308		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1309		break;
1310	default:
1311		break;
1312	}
1313
1314	/*init ocp*/
1315	rtsx_pci_init_ocp(pcr);
1316
1317	/* Enable clk_request_n to enable clock power management */
1318	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1319					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1320	/* Enter L1 when host tx idle */
1321	pci_write_config_byte(pdev, 0x70F, 0x5B);
1322
1323	if (pcr->ops->extra_init_hw) {
1324		err = pcr->ops->extra_init_hw(pcr);
1325		if (err < 0)
1326			return err;
1327	}
1328
1329	if (pcr->aspm_mode == ASPM_MODE_REG)
1330		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1331
1332	/* No CD interrupt if probing driver with card inserted.
1333	 * So we need to initialize pcr->card_exist here.
1334	 */
1335	if (pcr->ops->cd_deglitch)
1336		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1337	else
1338		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1339
1340	return 0;
1341}
1342
1343static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1344{
1345	int err;
 
 
1346	u16 cfg_val;
1347	u8 val;
1348
1349	spin_lock_init(&pcr->lock);
1350	mutex_init(&pcr->pcr_mutex);
1351
1352	switch (PCI_PID(pcr)) {
1353	default:
1354	case 0x5209:
1355		rts5209_init_params(pcr);
1356		break;
1357
1358	case 0x5229:
1359		rts5229_init_params(pcr);
1360		break;
1361
1362	case 0x5289:
1363		rtl8411_init_params(pcr);
1364		break;
1365
1366	case 0x5227:
1367		rts5227_init_params(pcr);
1368		break;
1369
1370	case 0x522A:
1371		rts522a_init_params(pcr);
1372		break;
1373
1374	case 0x5249:
1375		rts5249_init_params(pcr);
1376		break;
1377
1378	case 0x524A:
1379		rts524a_init_params(pcr);
1380		break;
1381
1382	case 0x525A:
1383		rts525a_init_params(pcr);
1384		break;
1385
1386	case 0x5287:
1387		rtl8411b_init_params(pcr);
1388		break;
1389
1390	case 0x5286:
1391		rtl8402_init_params(pcr);
1392		break;
1393
1394	case 0x5260:
1395		rts5260_init_params(pcr);
1396		break;
1397
1398	case 0x5261:
1399		rts5261_init_params(pcr);
1400		break;
1401
1402	case 0x5228:
1403		rts5228_init_params(pcr);
1404		break;
 
 
 
 
1405	}
1406
1407	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1408			PCI_PID(pcr), pcr->ic_version);
1409
1410	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1411			GFP_KERNEL);
1412	if (!pcr->slots)
1413		return -ENOMEM;
1414
1415	if (pcr->aspm_mode == ASPM_MODE_CFG) {
1416		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1417		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1418			pcr->aspm_enabled = true;
1419		else
1420			pcr->aspm_enabled = false;
1421
1422	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
1423		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1424		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1425			pcr->aspm_enabled = false;
1426		else
1427			pcr->aspm_enabled = true;
1428	}
1429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1430	if (pcr->ops->fetch_vendor_settings)
1431		pcr->ops->fetch_vendor_settings(pcr);
1432
1433	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1434	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1435			pcr->sd30_drive_sel_1v8);
1436	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1437			pcr->sd30_drive_sel_3v3);
1438	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1439			pcr->card_drive_sel);
1440	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1441
1442	pcr->state = PDEV_STAT_IDLE;
1443	err = rtsx_pci_init_hw(pcr);
1444	if (err < 0) {
1445		kfree(pcr->slots);
1446		return err;
1447	}
1448
1449	return 0;
1450}
1451
1452static int rtsx_pci_probe(struct pci_dev *pcidev,
1453			  const struct pci_device_id *id)
1454{
1455	struct rtsx_pcr *pcr;
1456	struct pcr_handle *handle;
1457	u32 base, len;
1458	int ret, i, bar = 0;
1459
1460	dev_dbg(&(pcidev->dev),
1461		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1462		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1463		(int)pcidev->revision);
1464
1465	ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1466	if (ret < 0)
1467		return ret;
1468
1469	ret = pci_enable_device(pcidev);
1470	if (ret)
1471		return ret;
1472
1473	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1474	if (ret)
1475		goto disable;
1476
1477	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1478	if (!pcr) {
1479		ret = -ENOMEM;
1480		goto release_pci;
1481	}
1482
1483	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1484	if (!handle) {
1485		ret = -ENOMEM;
1486		goto free_pcr;
1487	}
1488	handle->pcr = pcr;
1489
1490	idr_preload(GFP_KERNEL);
1491	spin_lock(&rtsx_pci_lock);
1492	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1493	if (ret >= 0)
1494		pcr->id = ret;
1495	spin_unlock(&rtsx_pci_lock);
1496	idr_preload_end();
1497	if (ret < 0)
1498		goto free_handle;
1499
1500	pcr->pci = pcidev;
1501	dev_set_drvdata(&pcidev->dev, handle);
1502
1503	if (CHK_PCI_PID(pcr, 0x525A))
1504		bar = 1;
1505	len = pci_resource_len(pcidev, bar);
1506	base = pci_resource_start(pcidev, bar);
1507	pcr->remap_addr = ioremap(base, len);
1508	if (!pcr->remap_addr) {
1509		ret = -ENOMEM;
1510		goto free_idr;
1511	}
1512
1513	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1514			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1515			GFP_KERNEL);
1516	if (pcr->rtsx_resv_buf == NULL) {
1517		ret = -ENXIO;
1518		goto unmap;
1519	}
1520	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1521	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1522	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1523	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1524	pcr->card_inserted = 0;
1525	pcr->card_removed = 0;
1526	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1527
1528	pcr->msi_en = msi_en;
1529	if (pcr->msi_en) {
1530		ret = pci_enable_msi(pcidev);
1531		if (ret)
1532			pcr->msi_en = false;
1533	}
1534
1535	ret = rtsx_pci_acquire_irq(pcr);
1536	if (ret < 0)
1537		goto disable_msi;
1538
1539	pci_set_master(pcidev);
1540	synchronize_irq(pcr->irq);
1541
1542	ret = rtsx_pci_init_chip(pcr);
1543	if (ret < 0)
1544		goto disable_irq;
1545
1546	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1547		rtsx_pcr_cells[i].platform_data = handle;
1548		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1549	}
1550
1551
1552	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1553			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1554	if (ret < 0)
1555		goto free_slots;
1556
1557	pm_runtime_allow(&pcidev->dev);
1558	pm_runtime_put(&pcidev->dev);
1559
1560	return 0;
1561
1562free_slots:
1563	kfree(pcr->slots);
1564disable_irq:
1565	free_irq(pcr->irq, (void *)pcr);
1566disable_msi:
1567	if (pcr->msi_en)
1568		pci_disable_msi(pcr->pci);
1569	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1570			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1571unmap:
1572	iounmap(pcr->remap_addr);
1573free_idr:
1574	spin_lock(&rtsx_pci_lock);
1575	idr_remove(&rtsx_pci_idr, pcr->id);
1576	spin_unlock(&rtsx_pci_lock);
1577free_handle:
1578	kfree(handle);
1579free_pcr:
1580	kfree(pcr);
1581release_pci:
1582	pci_release_regions(pcidev);
1583disable:
1584	pci_disable_device(pcidev);
1585
1586	return ret;
1587}
1588
1589static void rtsx_pci_remove(struct pci_dev *pcidev)
1590{
1591	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1592	struct rtsx_pcr *pcr = handle->pcr;
1593
1594	pcr->remove_pci = true;
1595
1596	pm_runtime_get_sync(&pcidev->dev);
1597	pm_runtime_forbid(&pcidev->dev);
1598
1599	/* Disable interrupts at the pcr level */
1600	spin_lock_irq(&pcr->lock);
1601	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1602	pcr->bier = 0;
1603	spin_unlock_irq(&pcr->lock);
1604
1605	cancel_delayed_work_sync(&pcr->carddet_work);
1606
1607	mfd_remove_devices(&pcidev->dev);
1608
1609	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1610			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1611	free_irq(pcr->irq, (void *)pcr);
1612	if (pcr->msi_en)
1613		pci_disable_msi(pcr->pci);
1614	iounmap(pcr->remap_addr);
1615
1616	pci_release_regions(pcidev);
1617	pci_disable_device(pcidev);
1618
1619	spin_lock(&rtsx_pci_lock);
1620	idr_remove(&rtsx_pci_idr, pcr->id);
1621	spin_unlock(&rtsx_pci_lock);
1622
1623	kfree(pcr->slots);
1624	kfree(pcr);
1625	kfree(handle);
1626
1627	dev_dbg(&(pcidev->dev),
1628		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1629		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1630}
1631
1632static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1633{
1634	struct pci_dev *pcidev = to_pci_dev(dev_d);
1635	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1636	struct rtsx_pcr *pcr = handle->pcr;
1637
1638	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1639
1640	cancel_delayed_work_sync(&pcr->carddet_work);
1641
1642	mutex_lock(&pcr->pcr_mutex);
1643
1644	rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1645
1646	mutex_unlock(&pcr->pcr_mutex);
1647	return 0;
1648}
1649
1650static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1651{
1652	struct pci_dev *pcidev = to_pci_dev(dev_d);
1653	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1654	struct rtsx_pcr *pcr = handle->pcr;
1655	int ret = 0;
1656
1657	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1658
1659	mutex_lock(&pcr->pcr_mutex);
1660
1661	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1662	if (ret)
1663		goto out;
1664
1665	ret = rtsx_pci_init_hw(pcr);
1666	if (ret)
1667		goto out;
1668
1669out:
1670	mutex_unlock(&pcr->pcr_mutex);
1671	return ret;
1672}
1673
1674#ifdef CONFIG_PM
1675
1676static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1677{
1678	if (pcr->ops->set_aspm)
1679		pcr->ops->set_aspm(pcr, true);
1680	else
1681		rtsx_comm_set_aspm(pcr, true);
1682}
1683
1684static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1685{
1686	struct rtsx_cr_option *option = &pcr->option;
1687
1688	if (option->ltr_enabled) {
1689		u32 latency = option->ltr_l1off_latency;
1690
1691		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1692			mdelay(option->l1_snooze_delay);
1693
1694		rtsx_set_ltr_latency(pcr, latency);
1695	}
1696
1697	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1698		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1699
1700	rtsx_enable_aspm(pcr);
1701}
1702
1703static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1704{
1705	rtsx_comm_pm_power_saving(pcr);
1706}
1707
1708static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1709{
1710	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1711	struct rtsx_pcr *pcr = handle->pcr;
1712
1713	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1714
1715	rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1716
1717	pci_disable_device(pcidev);
1718	free_irq(pcr->irq, (void *)pcr);
1719	if (pcr->msi_en)
1720		pci_disable_msi(pcr->pci);
1721}
1722
1723static int rtsx_pci_runtime_idle(struct device *device)
1724{
1725	struct pci_dev *pcidev = to_pci_dev(device);
1726	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1727	struct rtsx_pcr *pcr = handle->pcr;
1728
1729	dev_dbg(device, "--> %s\n", __func__);
1730
1731	mutex_lock(&pcr->pcr_mutex);
1732
1733	pcr->state = PDEV_STAT_IDLE;
1734
1735	if (pcr->ops->disable_auto_blink)
1736		pcr->ops->disable_auto_blink(pcr);
1737	if (pcr->ops->turn_off_led)
1738		pcr->ops->turn_off_led(pcr);
1739
1740	rtsx_pm_power_saving(pcr);
1741
1742	mutex_unlock(&pcr->pcr_mutex);
1743
1744	if (pcr->rtd3_en)
1745		pm_schedule_suspend(device, 10000);
1746
1747	return -EBUSY;
1748}
1749
1750static int rtsx_pci_runtime_suspend(struct device *device)
1751{
1752	struct pci_dev *pcidev = to_pci_dev(device);
1753	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1754	struct rtsx_pcr *pcr = handle->pcr;
1755
1756	dev_dbg(device, "--> %s\n", __func__);
1757
1758	cancel_delayed_work_sync(&pcr->carddet_work);
1759
1760	mutex_lock(&pcr->pcr_mutex);
1761	rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1762
1763	mutex_unlock(&pcr->pcr_mutex);
1764
1765	return 0;
1766}
1767
1768static int rtsx_pci_runtime_resume(struct device *device)
1769{
1770	struct pci_dev *pcidev = to_pci_dev(device);
1771	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1772	struct rtsx_pcr *pcr = handle->pcr;
1773
1774	dev_dbg(device, "--> %s\n", __func__);
1775
1776	mutex_lock(&pcr->pcr_mutex);
1777
1778	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1779
1780	rtsx_pci_init_hw(pcr);
1781
1782	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1783		pcr->slots[RTSX_SD_CARD].card_event(
1784				pcr->slots[RTSX_SD_CARD].p_dev);
1785	}
1786
1787	mutex_unlock(&pcr->pcr_mutex);
1788	return 0;
1789}
1790
1791#else /* CONFIG_PM */
1792
1793#define rtsx_pci_shutdown NULL
1794#define rtsx_pci_runtime_suspend NULL
1795#define rtsx_pic_runtime_resume NULL
1796
1797#endif /* CONFIG_PM */
1798
1799static const struct dev_pm_ops rtsx_pci_pm_ops = {
1800	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1801	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1802};
1803
1804static struct pci_driver rtsx_pci_driver = {
1805	.name = DRV_NAME_RTSX_PCI,
1806	.id_table = rtsx_pci_ids,
1807	.probe = rtsx_pci_probe,
1808	.remove = rtsx_pci_remove,
1809	.driver.pm = &rtsx_pci_pm_ops,
1810	.shutdown = rtsx_pci_shutdown,
1811};
1812module_pci_driver(rtsx_pci_driver);
1813
1814MODULE_LICENSE("GPL");
1815MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1816MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");