Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Driver for Realtek PCI-Express card reader
   3 *
   4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
   5 *
   6 * Author:
   7 *   Wei WANG <wei_wang@realsil.com.cn>
   8 */
   9
  10#include <linux/pci.h>
  11#include <linux/module.h>
  12#include <linux/slab.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <linux/delay.h>
  17#include <linux/idr.h>
  18#include <linux/platform_device.h>
  19#include <linux/mfd/core.h>
  20#include <linux/rtsx_pci.h>
  21#include <linux/mmc/card.h>
  22#include <asm/unaligned.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25
  26#include "rtsx_pcr.h"
  27#include "rts5261.h"
  28#include "rts5228.h"
  29
  30static bool msi_en = true;
  31module_param(msi_en, bool, S_IRUGO | S_IWUSR);
  32MODULE_PARM_DESC(msi_en, "Enable MSI");
  33
  34static DEFINE_IDR(rtsx_pci_idr);
  35static DEFINE_SPINLOCK(rtsx_pci_lock);
  36
  37static struct mfd_cell rtsx_pcr_cells[] = {
  38	[RTSX_SD_CARD] = {
  39		.name = DRV_NAME_RTSX_PCI_SDMMC,
  40	},
  41};
  42
  43static const struct pci_device_id rtsx_pci_ids[] = {
  44	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  45	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  46	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  47	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  48	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  49	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  50	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  51	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  52	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  53	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  54	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  55	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  56	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  57	{ 0, }
  58};
  59
  60MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
  61
  62static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  63{
  64	rtsx_pci_write_register(pcr, MSGTXDATA0,
  65				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
  66	rtsx_pci_write_register(pcr, MSGTXDATA1,
  67				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
  68	rtsx_pci_write_register(pcr, MSGTXDATA2,
  69				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
  70	rtsx_pci_write_register(pcr, MSGTXDATA3,
  71				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
  72	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
  73		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
  74
  75	return 0;
  76}
  77
  78int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  79{
  80	return rtsx_comm_set_ltr_latency(pcr, latency);
  81}
  82
  83static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
  84{
  85	if (pcr->aspm_enabled == enable)
  86		return;
  87
  88	if (pcr->aspm_mode == ASPM_MODE_CFG) {
  89		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
  90						PCI_EXP_LNKCTL_ASPMC,
  91						enable ? pcr->aspm_en : 0);
  92	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
  93		if (pcr->aspm_en & 0x02)
  94			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  95				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  96		else
  97			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  98				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  99	}
 100
 101	if (!enable && (pcr->aspm_en & 0x02))
 102		mdelay(10);
 103
 104	pcr->aspm_enabled = enable;
 105}
 106
 107static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
 108{
 109	if (pcr->ops->set_aspm)
 110		pcr->ops->set_aspm(pcr, false);
 111	else
 112		rtsx_comm_set_aspm(pcr, false);
 113}
 114
 115int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
 116{
 117	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
 118
 119	return 0;
 120}
 121
 122static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
 123{
 124	if (pcr->ops->set_l1off_cfg_sub_d0)
 125		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
 126}
 127
 128static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
 129{
 130	struct rtsx_cr_option *option = &pcr->option;
 131
 132	rtsx_disable_aspm(pcr);
 133
 134	/* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
 135	msleep(1);
 136
 137	if (option->ltr_enabled)
 138		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
 139
 140	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
 141		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
 142}
 143
 144static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
 145{
 146	rtsx_comm_pm_full_on(pcr);
 147}
 148
 149void rtsx_pci_start_run(struct rtsx_pcr *pcr)
 150{
 151	/* If pci device removed, don't queue idle work any more */
 152	if (pcr->remove_pci)
 153		return;
 154
 
 
 
 
 
 
 155	if (pcr->state != PDEV_STAT_RUN) {
 156		pcr->state = PDEV_STAT_RUN;
 157		if (pcr->ops->enable_auto_blink)
 158			pcr->ops->enable_auto_blink(pcr);
 159		rtsx_pm_full_on(pcr);
 160	}
 
 
 161}
 162EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
 163
 164int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
 165{
 166	int i;
 167	u32 val = HAIMR_WRITE_START;
 168
 169	val |= (u32)(addr & 0x3FFF) << 16;
 170	val |= (u32)mask << 8;
 171	val |= (u32)data;
 172
 173	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 174
 175	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 176		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 177		if ((val & HAIMR_TRANS_END) == 0) {
 178			if (data != (u8)val)
 179				return -EIO;
 180			return 0;
 181		}
 182	}
 183
 184	return -ETIMEDOUT;
 185}
 186EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
 187
 188int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
 189{
 190	u32 val = HAIMR_READ_START;
 191	int i;
 192
 193	val |= (u32)(addr & 0x3FFF) << 16;
 194	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 195
 196	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 197		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 198		if ((val & HAIMR_TRANS_END) == 0)
 199			break;
 200	}
 201
 202	if (i >= MAX_RW_REG_CNT)
 203		return -ETIMEDOUT;
 204
 205	if (data)
 206		*data = (u8)(val & 0xFF);
 207
 208	return 0;
 209}
 210EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
 211
 212int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 213{
 214	int err, i, finished = 0;
 215	u8 tmp;
 216
 217	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
 218	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
 219	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 220	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
 221
 222	for (i = 0; i < 100000; i++) {
 223		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 224		if (err < 0)
 225			return err;
 226
 227		if (!(tmp & 0x80)) {
 228			finished = 1;
 229			break;
 230		}
 231	}
 232
 233	if (!finished)
 234		return -ETIMEDOUT;
 235
 236	return 0;
 237}
 238
 239int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 240{
 241	if (pcr->ops->write_phy)
 242		return pcr->ops->write_phy(pcr, addr, val);
 243
 244	return __rtsx_pci_write_phy_register(pcr, addr, val);
 245}
 246EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
 247
 248int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 249{
 250	int err, i, finished = 0;
 251	u16 data;
 252	u8 tmp, val1, val2;
 253
 254	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 255	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
 256
 257	for (i = 0; i < 100000; i++) {
 258		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 259		if (err < 0)
 260			return err;
 261
 262		if (!(tmp & 0x80)) {
 263			finished = 1;
 264			break;
 265		}
 266	}
 267
 268	if (!finished)
 269		return -ETIMEDOUT;
 270
 271	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
 272	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
 273	data = val1 | (val2 << 8);
 274
 275	if (val)
 276		*val = data;
 277
 278	return 0;
 279}
 280
 281int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 282{
 283	if (pcr->ops->read_phy)
 284		return pcr->ops->read_phy(pcr, addr, val);
 285
 286	return __rtsx_pci_read_phy_register(pcr, addr, val);
 287}
 288EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
 289
 290void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
 291{
 292	if (pcr->ops->stop_cmd)
 293		return pcr->ops->stop_cmd(pcr);
 294
 295	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
 296	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
 297
 298	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
 299	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
 300}
 301EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
 302
 303void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
 304		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
 305{
 306	unsigned long flags;
 307	u32 val = 0;
 308	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
 309
 310	val |= (u32)(cmd_type & 0x03) << 30;
 311	val |= (u32)(reg_addr & 0x3FFF) << 16;
 312	val |= (u32)mask << 8;
 313	val |= (u32)data;
 314
 315	spin_lock_irqsave(&pcr->lock, flags);
 316	ptr += pcr->ci;
 317	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
 318		put_unaligned_le32(val, ptr);
 319		ptr++;
 320		pcr->ci++;
 321	}
 322	spin_unlock_irqrestore(&pcr->lock, flags);
 323}
 324EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
 325
 326void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
 327{
 328	u32 val = 1 << 31;
 329
 330	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 331
 332	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 333	/* Hardware Auto Response */
 334	val |= 0x40000000;
 335	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 336}
 337EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
 338
 339int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
 340{
 341	struct completion trans_done;
 342	u32 val = 1 << 31;
 343	long timeleft;
 344	unsigned long flags;
 345	int err = 0;
 346
 347	spin_lock_irqsave(&pcr->lock, flags);
 348
 349	/* set up data structures for the wakeup system */
 350	pcr->done = &trans_done;
 351	pcr->trans_result = TRANS_NOT_READY;
 352	init_completion(&trans_done);
 353
 354	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 355
 356	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 357	/* Hardware Auto Response */
 358	val |= 0x40000000;
 359	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 360
 361	spin_unlock_irqrestore(&pcr->lock, flags);
 362
 363	/* Wait for TRANS_OK_INT */
 364	timeleft = wait_for_completion_interruptible_timeout(
 365			&trans_done, msecs_to_jiffies(timeout));
 366	if (timeleft <= 0) {
 367		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 368		err = -ETIMEDOUT;
 369		goto finish_send_cmd;
 370	}
 371
 372	spin_lock_irqsave(&pcr->lock, flags);
 373	if (pcr->trans_result == TRANS_RESULT_FAIL)
 374		err = -EINVAL;
 375	else if (pcr->trans_result == TRANS_RESULT_OK)
 376		err = 0;
 377	else if (pcr->trans_result == TRANS_NO_DEVICE)
 378		err = -ENODEV;
 379	spin_unlock_irqrestore(&pcr->lock, flags);
 380
 381finish_send_cmd:
 382	spin_lock_irqsave(&pcr->lock, flags);
 383	pcr->done = NULL;
 384	spin_unlock_irqrestore(&pcr->lock, flags);
 385
 386	if ((err < 0) && (err != -ENODEV))
 387		rtsx_pci_stop_cmd(pcr);
 388
 389	if (pcr->finish_me)
 390		complete(pcr->finish_me);
 391
 392	return err;
 393}
 394EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
 395
 396static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
 397		dma_addr_t addr, unsigned int len, int end)
 398{
 399	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
 400	u64 val;
 401	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
 402
 403	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
 404
 405	if (end)
 406		option |= RTSX_SG_END;
 407
 408	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
 409		if (len > 0xFFFF)
 410			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
 411				| (((u64)len >> 16) << 6) | option;
 412		else
 413			val = ((u64)addr << 32) | ((u64)len << 16) | option;
 414	} else {
 415		val = ((u64)addr << 32) | ((u64)len << 12) | option;
 416	}
 417	put_unaligned_le64(val, ptr);
 418	pcr->sgi++;
 419}
 420
 421int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 422		int num_sg, bool read, int timeout)
 423{
 424	int err = 0, count;
 425
 426	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
 427	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
 428	if (count < 1)
 429		return -EINVAL;
 430	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
 431
 432	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
 433
 434	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
 435
 436	return err;
 437}
 438EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
 439
 440int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 441		int num_sg, bool read)
 442{
 443	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 444
 445	if (pcr->remove_pci)
 446		return -EINVAL;
 447
 448	if ((sglist == NULL) || (num_sg <= 0))
 449		return -EINVAL;
 450
 451	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 452}
 453EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
 454
 455void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 456		int num_sg, bool read)
 457{
 458	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 459
 460	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 461}
 462EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
 463
 464int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 465		int count, bool read, int timeout)
 466{
 467	struct completion trans_done;
 468	struct scatterlist *sg;
 469	dma_addr_t addr;
 470	long timeleft;
 471	unsigned long flags;
 472	unsigned int len;
 473	int i, err = 0;
 474	u32 val;
 475	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
 476
 477	if (pcr->remove_pci)
 478		return -ENODEV;
 479
 480	if ((sglist == NULL) || (count < 1))
 481		return -EINVAL;
 482
 483	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
 484	pcr->sgi = 0;
 485	for_each_sg(sglist, sg, count, i) {
 486		addr = sg_dma_address(sg);
 487		len = sg_dma_len(sg);
 488		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
 489	}
 490
 491	spin_lock_irqsave(&pcr->lock, flags);
 492
 493	pcr->done = &trans_done;
 494	pcr->trans_result = TRANS_NOT_READY;
 495	init_completion(&trans_done);
 496	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
 497	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
 498
 499	spin_unlock_irqrestore(&pcr->lock, flags);
 500
 501	timeleft = wait_for_completion_interruptible_timeout(
 502			&trans_done, msecs_to_jiffies(timeout));
 503	if (timeleft <= 0) {
 504		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 505		err = -ETIMEDOUT;
 506		goto out;
 507	}
 508
 509	spin_lock_irqsave(&pcr->lock, flags);
 510	if (pcr->trans_result == TRANS_RESULT_FAIL) {
 511		err = -EILSEQ;
 512		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
 513			pcr->dma_error_count++;
 514	}
 515
 516	else if (pcr->trans_result == TRANS_NO_DEVICE)
 517		err = -ENODEV;
 518	spin_unlock_irqrestore(&pcr->lock, flags);
 519
 520out:
 521	spin_lock_irqsave(&pcr->lock, flags);
 522	pcr->done = NULL;
 523	spin_unlock_irqrestore(&pcr->lock, flags);
 524
 525	if ((err < 0) && (err != -ENODEV))
 526		rtsx_pci_stop_cmd(pcr);
 527
 528	if (pcr->finish_me)
 529		complete(pcr->finish_me);
 530
 531	return err;
 532}
 533EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
 534
 535int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 536{
 537	int err;
 538	int i, j;
 539	u16 reg;
 540	u8 *ptr;
 541
 542	if (buf_len > 512)
 543		buf_len = 512;
 544
 545	ptr = buf;
 546	reg = PPBUF_BASE2;
 547	for (i = 0; i < buf_len / 256; i++) {
 548		rtsx_pci_init_cmd(pcr);
 549
 550		for (j = 0; j < 256; j++)
 551			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 552
 553		err = rtsx_pci_send_cmd(pcr, 250);
 554		if (err < 0)
 555			return err;
 556
 557		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
 558		ptr += 256;
 559	}
 560
 561	if (buf_len % 256) {
 562		rtsx_pci_init_cmd(pcr);
 563
 564		for (j = 0; j < buf_len % 256; j++)
 565			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 566
 567		err = rtsx_pci_send_cmd(pcr, 250);
 568		if (err < 0)
 569			return err;
 570	}
 571
 572	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
 573
 574	return 0;
 575}
 576EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
 577
 578int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 579{
 580	int err;
 581	int i, j;
 582	u16 reg;
 583	u8 *ptr;
 584
 585	if (buf_len > 512)
 586		buf_len = 512;
 587
 588	ptr = buf;
 589	reg = PPBUF_BASE2;
 590	for (i = 0; i < buf_len / 256; i++) {
 591		rtsx_pci_init_cmd(pcr);
 592
 593		for (j = 0; j < 256; j++) {
 594			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 595					reg++, 0xFF, *ptr);
 596			ptr++;
 597		}
 598
 599		err = rtsx_pci_send_cmd(pcr, 250);
 600		if (err < 0)
 601			return err;
 602	}
 603
 604	if (buf_len % 256) {
 605		rtsx_pci_init_cmd(pcr);
 606
 607		for (j = 0; j < buf_len % 256; j++) {
 608			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 609					reg++, 0xFF, *ptr);
 610			ptr++;
 611		}
 612
 613		err = rtsx_pci_send_cmd(pcr, 250);
 614		if (err < 0)
 615			return err;
 616	}
 617
 618	return 0;
 619}
 620EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
 621
 622static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
 623{
 624	rtsx_pci_init_cmd(pcr);
 625
 626	while (*tbl & 0xFFFF0000) {
 627		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 628				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
 629		tbl++;
 630	}
 631
 632	return rtsx_pci_send_cmd(pcr, 100);
 633}
 634
 635int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
 636{
 637	const u32 *tbl;
 638
 639	if (card == RTSX_SD_CARD)
 640		tbl = pcr->sd_pull_ctl_enable_tbl;
 641	else if (card == RTSX_MS_CARD)
 642		tbl = pcr->ms_pull_ctl_enable_tbl;
 643	else
 644		return -EINVAL;
 645
 646	return rtsx_pci_set_pull_ctl(pcr, tbl);
 647}
 648EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
 649
 650int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
 651{
 652	const u32 *tbl;
 653
 654	if (card == RTSX_SD_CARD)
 655		tbl = pcr->sd_pull_ctl_disable_tbl;
 656	else if (card == RTSX_MS_CARD)
 657		tbl = pcr->ms_pull_ctl_disable_tbl;
 658	else
 659		return -EINVAL;
 660
 661	return rtsx_pci_set_pull_ctl(pcr, tbl);
 662}
 663EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
 664
 665static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
 666{
 667	struct rtsx_hw_param *hw_param = &pcr->hw_param;
 668
 669	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
 670		| hw_param->interrupt_en;
 671
 672	if (pcr->num_slots > 1)
 673		pcr->bier |= MS_INT_EN;
 674
 675	/* Enable Bus Interrupt */
 676	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
 677
 678	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
 679}
 680
 681static inline u8 double_ssc_depth(u8 depth)
 682{
 683	return ((depth > 1) ? (depth - 1) : depth);
 684}
 685
 686static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
 687{
 688	if (div > CLK_DIV_1) {
 689		if (ssc_depth > (div - 1))
 690			ssc_depth -= (div - 1);
 691		else
 692			ssc_depth = SSC_DEPTH_4M;
 693	}
 694
 695	return ssc_depth;
 696}
 697
 698int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 699		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
 700{
 701	int err, clk;
 702	u8 n, clk_divider, mcu_cnt, div;
 703	static const u8 depth[] = {
 704		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
 705		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
 706		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
 707		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
 708		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
 709	};
 710
 711	if (PCI_PID(pcr) == PID_5261)
 712		return rts5261_pci_switch_clock(pcr, card_clock,
 713				ssc_depth, initial_mode, double_clk, vpclk);
 714	if (PCI_PID(pcr) == PID_5228)
 715		return rts5228_pci_switch_clock(pcr, card_clock,
 716				ssc_depth, initial_mode, double_clk, vpclk);
 717
 718	if (initial_mode) {
 719		/* We use 250k(around) here, in initial stage */
 720		clk_divider = SD_CLK_DIVIDE_128;
 721		card_clock = 30000000;
 722	} else {
 723		clk_divider = SD_CLK_DIVIDE_0;
 724	}
 725	err = rtsx_pci_write_register(pcr, SD_CFG1,
 726			SD_CLK_DIVIDE_MASK, clk_divider);
 727	if (err < 0)
 728		return err;
 729
 730	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
 731	if (card_clock == UHS_SDR104_MAX_DTR &&
 732	    pcr->dma_error_count &&
 733	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
 734		card_clock = UHS_SDR104_MAX_DTR -
 735			(pcr->dma_error_count * 20000000);
 736
 737	card_clock /= 1000000;
 738	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
 739
 740	clk = card_clock;
 741	if (!initial_mode && double_clk)
 742		clk = card_clock * 2;
 743	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
 744		clk, pcr->cur_clock);
 745
 746	if (clk == pcr->cur_clock)
 747		return 0;
 748
 749	if (pcr->ops->conv_clk_and_div_n)
 750		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
 751	else
 752		n = (u8)(clk - 2);
 753	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
 754		return -EINVAL;
 755
 756	mcu_cnt = (u8)(125/clk + 3);
 757	if (mcu_cnt > 15)
 758		mcu_cnt = 15;
 759
 760	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
 761	div = CLK_DIV_1;
 762	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
 763		if (pcr->ops->conv_clk_and_div_n) {
 764			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
 765					DIV_N_TO_CLK) * 2;
 766			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
 767					CLK_TO_DIV_N);
 768		} else {
 769			n = (n + 2) * 2 - 2;
 770		}
 771		div++;
 772	}
 773	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
 774
 775	ssc_depth = depth[ssc_depth];
 776	if (double_clk)
 777		ssc_depth = double_ssc_depth(ssc_depth);
 778
 779	ssc_depth = revise_ssc_depth(ssc_depth, div);
 780	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
 781
 782	rtsx_pci_init_cmd(pcr);
 783	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
 784			CLK_LOW_FREQ, CLK_LOW_FREQ);
 785	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
 786			0xFF, (div << 4) | mcu_cnt);
 787	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
 788	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
 789			SSC_DEPTH_MASK, ssc_depth);
 790	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
 791	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
 792	if (vpclk) {
 793		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 794				PHASE_NOT_RESET, 0);
 795		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 796				PHASE_NOT_RESET, PHASE_NOT_RESET);
 797	}
 798
 799	err = rtsx_pci_send_cmd(pcr, 2000);
 800	if (err < 0)
 801		return err;
 802
 803	/* Wait SSC clock stable */
 804	udelay(SSC_CLOCK_STABLE_WAIT);
 805	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
 806	if (err < 0)
 807		return err;
 808
 809	pcr->cur_clock = clk;
 810	return 0;
 811}
 812EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
 813
 814int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
 815{
 816	if (pcr->ops->card_power_on)
 817		return pcr->ops->card_power_on(pcr, card);
 818
 819	return 0;
 820}
 821EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
 822
 823int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
 824{
 825	if (pcr->ops->card_power_off)
 826		return pcr->ops->card_power_off(pcr, card);
 827
 828	return 0;
 829}
 830EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
 831
 832int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
 833{
 834	static const unsigned int cd_mask[] = {
 835		[RTSX_SD_CARD] = SD_EXIST,
 836		[RTSX_MS_CARD] = MS_EXIST
 837	};
 838
 839	if (!(pcr->flags & PCR_MS_PMOS)) {
 840		/* When using single PMOS, accessing card is not permitted
 841		 * if the existing card is not the designated one.
 842		 */
 843		if (pcr->card_exist & (~cd_mask[card]))
 844			return -EIO;
 845	}
 846
 847	return 0;
 848}
 849EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
 850
 851int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 852{
 853	if (pcr->ops->switch_output_voltage)
 854		return pcr->ops->switch_output_voltage(pcr, voltage);
 855
 856	return 0;
 857}
 858EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
 859
 860unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
 861{
 862	unsigned int val;
 863
 864	val = rtsx_pci_readl(pcr, RTSX_BIPR);
 865	if (pcr->ops->cd_deglitch)
 866		val = pcr->ops->cd_deglitch(pcr);
 867
 868	return val;
 869}
 870EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
 871
 872void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
 873{
 874	struct completion finish;
 875
 876	pcr->finish_me = &finish;
 877	init_completion(&finish);
 878
 879	if (pcr->done)
 880		complete(pcr->done);
 881
 882	if (!pcr->remove_pci)
 883		rtsx_pci_stop_cmd(pcr);
 884
 885	wait_for_completion_interruptible_timeout(&finish,
 886			msecs_to_jiffies(2));
 887	pcr->finish_me = NULL;
 888}
 889EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
 890
 891static void rtsx_pci_card_detect(struct work_struct *work)
 892{
 893	struct delayed_work *dwork;
 894	struct rtsx_pcr *pcr;
 895	unsigned long flags;
 896	unsigned int card_detect = 0, card_inserted, card_removed;
 897	u32 irq_status;
 898
 899	dwork = to_delayed_work(work);
 900	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
 901
 902	pcr_dbg(pcr, "--> %s\n", __func__);
 903
 904	mutex_lock(&pcr->pcr_mutex);
 905	spin_lock_irqsave(&pcr->lock, flags);
 906
 907	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
 908	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
 909
 910	irq_status &= CARD_EXIST;
 911	card_inserted = pcr->card_inserted & irq_status;
 912	card_removed = pcr->card_removed;
 913	pcr->card_inserted = 0;
 914	pcr->card_removed = 0;
 915
 916	spin_unlock_irqrestore(&pcr->lock, flags);
 917
 918	if (card_inserted || card_removed) {
 919		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
 920			card_inserted, card_removed);
 921
 922		if (pcr->ops->cd_deglitch)
 923			card_inserted = pcr->ops->cd_deglitch(pcr);
 924
 925		card_detect = card_inserted | card_removed;
 926
 927		pcr->card_exist |= card_inserted;
 928		pcr->card_exist &= ~card_removed;
 929	}
 930
 931	mutex_unlock(&pcr->pcr_mutex);
 932
 933	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
 934		pcr->slots[RTSX_SD_CARD].card_event(
 935				pcr->slots[RTSX_SD_CARD].p_dev);
 936	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
 937		pcr->slots[RTSX_MS_CARD].card_event(
 938				pcr->slots[RTSX_MS_CARD].p_dev);
 939}
 940
 941static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
 942{
 943	if (pcr->ops->process_ocp) {
 944		pcr->ops->process_ocp(pcr);
 945	} else {
 946		if (!pcr->option.ocp_en)
 947			return;
 948		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
 949		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
 950			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
 951			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
 952			rtsx_pci_clear_ocpstat(pcr);
 953			pcr->ocp_stat = 0;
 954		}
 955	}
 956}
 957
 958static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
 959{
 960	if (pcr->option.ocp_en)
 961		rtsx_pci_process_ocp(pcr);
 962
 963	return 0;
 964}
 965
 966static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
 967{
 968	struct rtsx_pcr *pcr = dev_id;
 969	u32 int_reg;
 970
 971	if (!pcr)
 972		return IRQ_NONE;
 973
 974	spin_lock(&pcr->lock);
 975
 976	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
 977	/* Clear interrupt flag */
 978	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
 979	if ((int_reg & pcr->bier) == 0) {
 980		spin_unlock(&pcr->lock);
 981		return IRQ_NONE;
 982	}
 983	if (int_reg == 0xFFFFFFFF) {
 984		spin_unlock(&pcr->lock);
 985		return IRQ_HANDLED;
 986	}
 987
 988	int_reg &= (pcr->bier | 0x7FFFFF);
 989
 990	if (int_reg & SD_OC_INT)
 991		rtsx_pci_process_ocp_interrupt(pcr);
 992
 993	if (int_reg & SD_INT) {
 994		if (int_reg & SD_EXIST) {
 995			pcr->card_inserted |= SD_EXIST;
 996		} else {
 997			pcr->card_removed |= SD_EXIST;
 998			pcr->card_inserted &= ~SD_EXIST;
 999			if (PCI_PID(pcr) == PID_5261) {
1000				rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1001					RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1002				pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1003			}
1004		}
1005		pcr->dma_error_count = 0;
1006	}
1007
1008	if (int_reg & MS_INT) {
1009		if (int_reg & MS_EXIST) {
1010			pcr->card_inserted |= MS_EXIST;
1011		} else {
1012			pcr->card_removed |= MS_EXIST;
1013			pcr->card_inserted &= ~MS_EXIST;
1014		}
1015	}
1016
1017	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1018		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1019			pcr->trans_result = TRANS_RESULT_FAIL;
1020			if (pcr->done)
1021				complete(pcr->done);
1022		} else if (int_reg & TRANS_OK_INT) {
1023			pcr->trans_result = TRANS_RESULT_OK;
1024			if (pcr->done)
1025				complete(pcr->done);
1026		}
1027	}
1028
1029	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1030		schedule_delayed_work(&pcr->carddet_work,
1031				msecs_to_jiffies(200));
1032
1033	spin_unlock(&pcr->lock);
1034	return IRQ_HANDLED;
1035}
1036
1037static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1038{
1039	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1040			__func__, pcr->msi_en, pcr->pci->irq);
1041
1042	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1043			pcr->msi_en ? 0 : IRQF_SHARED,
1044			DRV_NAME_RTSX_PCI, pcr)) {
1045		dev_err(&(pcr->pci->dev),
1046			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1047			pcr->pci->irq);
1048		return -1;
1049	}
1050
1051	pcr->irq = pcr->pci->irq;
1052	pci_intx(pcr->pci, !pcr->msi_en);
1053
1054	return 0;
1055}
1056
1057static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058{
1059	/* Set relink_time to 0 */
1060	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1061	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1062	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1063			RELINK_TIME_MASK, 0);
1064
1065	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1066			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1067
1068	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1069}
1070
1071static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1072{
1073	if (pcr->ops->turn_off_led)
1074		pcr->ops->turn_off_led(pcr);
1075
1076	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1077	pcr->bier = 0;
1078
1079	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1080	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1081
1082	if (pcr->ops->force_power_down)
1083		pcr->ops->force_power_down(pcr, pm_state, runtime);
1084	else
1085		rtsx_base_force_power_down(pcr);
1086}
1087
1088void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1089{
1090	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1091
1092	if (pcr->ops->enable_ocp) {
1093		pcr->ops->enable_ocp(pcr);
1094	} else {
1095		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1096		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1097	}
1098
1099}
1100
1101void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1102{
1103	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1104
1105	if (pcr->ops->disable_ocp) {
1106		pcr->ops->disable_ocp(pcr);
1107	} else {
1108		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1109		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1110				OC_POWER_DOWN);
1111	}
1112}
1113
1114void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1115{
1116	if (pcr->ops->init_ocp) {
1117		pcr->ops->init_ocp(pcr);
1118	} else {
1119		struct rtsx_cr_option *option = &(pcr->option);
1120
1121		if (option->ocp_en) {
1122			u8 val = option->sd_800mA_ocp_thd;
1123
1124			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1125			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1126				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1127			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1128				SD_OCP_THD_MASK, val);
1129			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1130				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1131			rtsx_pci_enable_ocp(pcr);
1132		}
1133	}
1134}
1135
1136int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1137{
1138	if (pcr->ops->get_ocpstat)
1139		return pcr->ops->get_ocpstat(pcr, val);
1140	else
1141		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1142}
1143
1144void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1145{
1146	if (pcr->ops->clear_ocpstat) {
1147		pcr->ops->clear_ocpstat(pcr);
1148	} else {
1149		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1150		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1151
1152		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1153		udelay(100);
1154		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1155	}
1156}
1157
1158void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1159{
1160	u16 val;
1161
1162	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1163		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1164		val |= 1<<9;
1165		rtsx_pci_write_phy_register(pcr, 0x01, val);
1166	}
1167	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1168	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1169	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1170	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1171
1172}
1173
1174void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1175{
1176	u16 val;
1177
1178	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1179		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1180		val &= ~(1<<9);
1181		rtsx_pci_write_phy_register(pcr, 0x01, val);
1182	}
1183	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1184	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1185
1186}
1187
1188int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1189{
1190	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1191		MS_CLK_EN | SD40_CLK_EN, 0);
1192	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1193	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1194
1195	msleep(50);
1196
1197	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1198
1199	return 0;
1200}
1201
1202int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1203{
1204	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1205		MS_CLK_EN | SD40_CLK_EN, 0);
1206
1207	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1208
1209	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1210	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1211
1212	return 0;
1213}
1214
1215static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1216{
1217	struct pci_dev *pdev = pcr->pci;
1218	int err;
1219
1220	if (PCI_PID(pcr) == PID_5228)
1221		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1222				RTS5228_LDO1_SR_0_5);
1223
1224	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1225
1226	rtsx_pci_enable_bus_int(pcr);
1227
1228	/* Power on SSC */
1229	if (PCI_PID(pcr) == PID_5261) {
1230		/* Gating real mcu clock */
1231		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1232			RTS5261_MCU_CLOCK_GATING, 0);
1233		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1234			SSC_POWER_DOWN, 0);
1235	} else {
1236		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1237	}
1238	if (err < 0)
1239		return err;
1240
1241	/* Wait SSC power stable */
1242	udelay(200);
1243
1244	rtsx_disable_aspm(pcr);
1245	if (pcr->ops->optimize_phy) {
1246		err = pcr->ops->optimize_phy(pcr);
1247		if (err < 0)
1248			return err;
1249	}
1250
1251	rtsx_pci_init_cmd(pcr);
1252
1253	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1254	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1255
1256	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1257	/* Disable card clock */
1258	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1259	/* Reset delink mode */
1260	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1261	/* Card driving select */
1262	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1263			0xFF, pcr->card_drive_sel);
1264	/* Enable SSC Clock */
1265	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1266			0xFF, SSC_8X_EN | SSC_SEL_4M);
1267	if (PCI_PID(pcr) == PID_5261)
1268		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1269			RTS5261_SSC_DEPTH_2M);
1270	else if (PCI_PID(pcr) == PID_5228)
1271		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1272			RTS5228_SSC_DEPTH_2M);
1273	else
1274		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1275
1276	/* Disable cd_pwr_save */
1277	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1278	/* Clear Link Ready Interrupt */
1279	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1280			LINK_RDY_INT, LINK_RDY_INT);
1281	/* Enlarge the estimation window of PERST# glitch
1282	 * to reduce the chance of invalid card interrupt
1283	 */
1284	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1285	/* Update RC oscillator to 400k
1286	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1287	 *                1: 2M  0: 400k
1288	 */
1289	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1290	/* Set interrupt write clear
1291	 * bit 1: U_elbi_if_rd_clr_en
1292	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1293	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1294	 */
1295	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1296
1297	err = rtsx_pci_send_cmd(pcr, 100);
1298	if (err < 0)
1299		return err;
1300
1301	switch (PCI_PID(pcr)) {
1302	case PID_5250:
1303	case PID_524A:
1304	case PID_525A:
1305	case PID_5260:
1306	case PID_5261:
1307	case PID_5228:
1308		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1309		break;
1310	default:
1311		break;
1312	}
1313
1314	/*init ocp*/
1315	rtsx_pci_init_ocp(pcr);
1316
1317	/* Enable clk_request_n to enable clock power management */
1318	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1319					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1320	/* Enter L1 when host tx idle */
1321	pci_write_config_byte(pdev, 0x70F, 0x5B);
1322
1323	if (pcr->ops->extra_init_hw) {
1324		err = pcr->ops->extra_init_hw(pcr);
1325		if (err < 0)
1326			return err;
1327	}
1328
1329	if (pcr->aspm_mode == ASPM_MODE_REG)
1330		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1331
1332	/* No CD interrupt if probing driver with card inserted.
1333	 * So we need to initialize pcr->card_exist here.
1334	 */
1335	if (pcr->ops->cd_deglitch)
1336		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1337	else
1338		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1339
1340	return 0;
1341}
1342
1343static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1344{
1345	int err;
1346	u16 cfg_val;
1347	u8 val;
1348
1349	spin_lock_init(&pcr->lock);
1350	mutex_init(&pcr->pcr_mutex);
1351
1352	switch (PCI_PID(pcr)) {
1353	default:
1354	case 0x5209:
1355		rts5209_init_params(pcr);
1356		break;
1357
1358	case 0x5229:
1359		rts5229_init_params(pcr);
1360		break;
1361
1362	case 0x5289:
1363		rtl8411_init_params(pcr);
1364		break;
1365
1366	case 0x5227:
1367		rts5227_init_params(pcr);
1368		break;
1369
1370	case 0x522A:
1371		rts522a_init_params(pcr);
1372		break;
1373
1374	case 0x5249:
1375		rts5249_init_params(pcr);
1376		break;
1377
1378	case 0x524A:
1379		rts524a_init_params(pcr);
1380		break;
1381
1382	case 0x525A:
1383		rts525a_init_params(pcr);
1384		break;
1385
1386	case 0x5287:
1387		rtl8411b_init_params(pcr);
1388		break;
1389
1390	case 0x5286:
1391		rtl8402_init_params(pcr);
1392		break;
1393
1394	case 0x5260:
1395		rts5260_init_params(pcr);
1396		break;
1397
1398	case 0x5261:
1399		rts5261_init_params(pcr);
1400		break;
1401
1402	case 0x5228:
1403		rts5228_init_params(pcr);
1404		break;
1405	}
1406
1407	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1408			PCI_PID(pcr), pcr->ic_version);
1409
1410	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1411			GFP_KERNEL);
1412	if (!pcr->slots)
1413		return -ENOMEM;
1414
1415	if (pcr->aspm_mode == ASPM_MODE_CFG) {
1416		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1417		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1418			pcr->aspm_enabled = true;
1419		else
1420			pcr->aspm_enabled = false;
1421
1422	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
1423		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1424		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1425			pcr->aspm_enabled = false;
1426		else
1427			pcr->aspm_enabled = true;
1428	}
1429
1430	if (pcr->ops->fetch_vendor_settings)
1431		pcr->ops->fetch_vendor_settings(pcr);
1432
1433	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1434	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1435			pcr->sd30_drive_sel_1v8);
1436	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1437			pcr->sd30_drive_sel_3v3);
1438	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1439			pcr->card_drive_sel);
1440	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1441
1442	pcr->state = PDEV_STAT_IDLE;
1443	err = rtsx_pci_init_hw(pcr);
1444	if (err < 0) {
1445		kfree(pcr->slots);
1446		return err;
1447	}
1448
1449	return 0;
1450}
1451
1452static int rtsx_pci_probe(struct pci_dev *pcidev,
1453			  const struct pci_device_id *id)
1454{
1455	struct rtsx_pcr *pcr;
1456	struct pcr_handle *handle;
1457	u32 base, len;
1458	int ret, i, bar = 0;
1459
1460	dev_dbg(&(pcidev->dev),
1461		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1462		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1463		(int)pcidev->revision);
1464
1465	ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1466	if (ret < 0)
1467		return ret;
1468
1469	ret = pci_enable_device(pcidev);
1470	if (ret)
1471		return ret;
1472
1473	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1474	if (ret)
1475		goto disable;
1476
1477	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1478	if (!pcr) {
1479		ret = -ENOMEM;
1480		goto release_pci;
1481	}
1482
1483	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1484	if (!handle) {
1485		ret = -ENOMEM;
1486		goto free_pcr;
1487	}
1488	handle->pcr = pcr;
1489
1490	idr_preload(GFP_KERNEL);
1491	spin_lock(&rtsx_pci_lock);
1492	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1493	if (ret >= 0)
1494		pcr->id = ret;
1495	spin_unlock(&rtsx_pci_lock);
1496	idr_preload_end();
1497	if (ret < 0)
1498		goto free_handle;
1499
1500	pcr->pci = pcidev;
1501	dev_set_drvdata(&pcidev->dev, handle);
1502
1503	if (CHK_PCI_PID(pcr, 0x525A))
1504		bar = 1;
1505	len = pci_resource_len(pcidev, bar);
1506	base = pci_resource_start(pcidev, bar);
1507	pcr->remap_addr = ioremap(base, len);
1508	if (!pcr->remap_addr) {
1509		ret = -ENOMEM;
1510		goto free_idr;
1511	}
1512
1513	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1514			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1515			GFP_KERNEL);
1516	if (pcr->rtsx_resv_buf == NULL) {
1517		ret = -ENXIO;
1518		goto unmap;
1519	}
1520	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1521	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1522	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1523	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1524	pcr->card_inserted = 0;
1525	pcr->card_removed = 0;
1526	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
 
1527
1528	pcr->msi_en = msi_en;
1529	if (pcr->msi_en) {
1530		ret = pci_enable_msi(pcidev);
1531		if (ret)
1532			pcr->msi_en = false;
1533	}
1534
1535	ret = rtsx_pci_acquire_irq(pcr);
1536	if (ret < 0)
1537		goto disable_msi;
1538
1539	pci_set_master(pcidev);
1540	synchronize_irq(pcr->irq);
1541
1542	ret = rtsx_pci_init_chip(pcr);
1543	if (ret < 0)
1544		goto disable_irq;
1545
1546	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1547		rtsx_pcr_cells[i].platform_data = handle;
1548		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1549	}
1550
 
 
 
 
 
 
 
1551
1552	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1553			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1554	if (ret < 0)
1555		goto free_slots;
1556
1557	pm_runtime_allow(&pcidev->dev);
1558	pm_runtime_put(&pcidev->dev);
1559
1560	return 0;
1561
1562free_slots:
1563	kfree(pcr->slots);
1564disable_irq:
1565	free_irq(pcr->irq, (void *)pcr);
1566disable_msi:
1567	if (pcr->msi_en)
1568		pci_disable_msi(pcr->pci);
1569	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1570			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1571unmap:
1572	iounmap(pcr->remap_addr);
1573free_idr:
1574	spin_lock(&rtsx_pci_lock);
1575	idr_remove(&rtsx_pci_idr, pcr->id);
1576	spin_unlock(&rtsx_pci_lock);
1577free_handle:
1578	kfree(handle);
1579free_pcr:
1580	kfree(pcr);
1581release_pci:
1582	pci_release_regions(pcidev);
1583disable:
1584	pci_disable_device(pcidev);
1585
1586	return ret;
1587}
1588
1589static void rtsx_pci_remove(struct pci_dev *pcidev)
1590{
1591	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1592	struct rtsx_pcr *pcr = handle->pcr;
1593
1594	pcr->remove_pci = true;
 
1595
1596	pm_runtime_get_sync(&pcidev->dev);
1597	pm_runtime_forbid(&pcidev->dev);
1598
1599	/* Disable interrupts at the pcr level */
1600	spin_lock_irq(&pcr->lock);
1601	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1602	pcr->bier = 0;
1603	spin_unlock_irq(&pcr->lock);
1604
1605	cancel_delayed_work_sync(&pcr->carddet_work);
 
 
 
1606
1607	mfd_remove_devices(&pcidev->dev);
1608
1609	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1610			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1611	free_irq(pcr->irq, (void *)pcr);
1612	if (pcr->msi_en)
1613		pci_disable_msi(pcr->pci);
1614	iounmap(pcr->remap_addr);
1615
1616	pci_release_regions(pcidev);
1617	pci_disable_device(pcidev);
1618
1619	spin_lock(&rtsx_pci_lock);
1620	idr_remove(&rtsx_pci_idr, pcr->id);
1621	spin_unlock(&rtsx_pci_lock);
1622
 
 
 
 
 
1623	kfree(pcr->slots);
1624	kfree(pcr);
1625	kfree(handle);
1626
1627	dev_dbg(&(pcidev->dev),
1628		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1629		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1630}
1631
1632static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1633{
1634	struct pci_dev *pcidev = to_pci_dev(dev_d);
1635	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1636	struct rtsx_pcr *pcr = handle->pcr;
1637
1638	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1639
1640	cancel_delayed_work_sync(&pcr->carddet_work);
 
 
 
 
1641
1642	mutex_lock(&pcr->pcr_mutex);
1643
1644	rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
 
 
1645
1646	mutex_unlock(&pcr->pcr_mutex);
1647	return 0;
1648}
1649
1650static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1651{
1652	struct pci_dev *pcidev = to_pci_dev(dev_d);
1653	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1654	struct rtsx_pcr *pcr = handle->pcr;
1655	int ret = 0;
1656
1657	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1658
 
 
 
1659	mutex_lock(&pcr->pcr_mutex);
1660
1661	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1662	if (ret)
1663		goto out;
1664
1665	ret = rtsx_pci_init_hw(pcr);
1666	if (ret)
1667		goto out;
1668
 
 
1669out:
1670	mutex_unlock(&pcr->pcr_mutex);
1671	return ret;
1672}
1673
1674#ifdef CONFIG_PM
1675
1676static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1677{
1678	if (pcr->ops->set_aspm)
1679		pcr->ops->set_aspm(pcr, true);
1680	else
1681		rtsx_comm_set_aspm(pcr, true);
1682}
1683
1684static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1685{
1686	struct rtsx_cr_option *option = &pcr->option;
1687
1688	if (option->ltr_enabled) {
1689		u32 latency = option->ltr_l1off_latency;
1690
1691		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1692			mdelay(option->l1_snooze_delay);
1693
1694		rtsx_set_ltr_latency(pcr, latency);
1695	}
1696
1697	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1698		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1699
1700	rtsx_enable_aspm(pcr);
1701}
1702
1703static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1704{
1705	rtsx_comm_pm_power_saving(pcr);
1706}
1707
1708static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1709{
1710	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1711	struct rtsx_pcr *pcr = handle->pcr;
1712
1713	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1714
1715	rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
 
 
1716
1717	pci_disable_device(pcidev);
1718	free_irq(pcr->irq, (void *)pcr);
1719	if (pcr->msi_en)
1720		pci_disable_msi(pcr->pci);
1721}
1722
1723static int rtsx_pci_runtime_idle(struct device *device)
1724{
1725	struct pci_dev *pcidev = to_pci_dev(device);
1726	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1727	struct rtsx_pcr *pcr = handle->pcr;
1728
1729	dev_dbg(device, "--> %s\n", __func__);
1730
1731	mutex_lock(&pcr->pcr_mutex);
1732
1733	pcr->state = PDEV_STAT_IDLE;
1734
1735	if (pcr->ops->disable_auto_blink)
1736		pcr->ops->disable_auto_blink(pcr);
1737	if (pcr->ops->turn_off_led)
1738		pcr->ops->turn_off_led(pcr);
1739
1740	rtsx_pm_power_saving(pcr);
1741
1742	mutex_unlock(&pcr->pcr_mutex);
1743
1744	if (pcr->rtd3_en)
1745		pm_schedule_suspend(device, 10000);
1746
1747	return -EBUSY;
1748}
1749
1750static int rtsx_pci_runtime_suspend(struct device *device)
1751{
1752	struct pci_dev *pcidev = to_pci_dev(device);
1753	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1754	struct rtsx_pcr *pcr = handle->pcr;
1755
1756	dev_dbg(device, "--> %s\n", __func__);
 
 
1757
1758	cancel_delayed_work_sync(&pcr->carddet_work);
 
 
1759
1760	mutex_lock(&pcr->pcr_mutex);
1761	rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
 
 
1762
1763	mutex_unlock(&pcr->pcr_mutex);
1764
 
 
1765	return 0;
1766}
1767
1768static int rtsx_pci_runtime_resume(struct device *device)
1769{
1770	struct pci_dev *pcidev = to_pci_dev(device);
1771	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1772	struct rtsx_pcr *pcr = handle->pcr;
1773
1774	dev_dbg(device, "--> %s\n", __func__);
 
 
1775
1776	mutex_lock(&pcr->pcr_mutex);
1777
1778	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
 
 
 
 
 
1779
1780	rtsx_pci_init_hw(pcr);
1781
1782	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1783		pcr->slots[RTSX_SD_CARD].card_event(
1784				pcr->slots[RTSX_SD_CARD].p_dev);
1785	}
1786
 
 
1787	mutex_unlock(&pcr->pcr_mutex);
1788	return 0;
1789}
1790
1791#else /* CONFIG_PM */
1792
1793#define rtsx_pci_shutdown NULL
1794#define rtsx_pci_runtime_suspend NULL
1795#define rtsx_pic_runtime_resume NULL
1796
1797#endif /* CONFIG_PM */
1798
1799static const struct dev_pm_ops rtsx_pci_pm_ops = {
1800	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1801	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1802};
1803
1804static struct pci_driver rtsx_pci_driver = {
1805	.name = DRV_NAME_RTSX_PCI,
1806	.id_table = rtsx_pci_ids,
1807	.probe = rtsx_pci_probe,
1808	.remove = rtsx_pci_remove,
1809	.driver.pm = &rtsx_pci_pm_ops,
1810	.shutdown = rtsx_pci_shutdown,
1811};
1812module_pci_driver(rtsx_pci_driver);
1813
1814MODULE_LICENSE("GPL");
1815MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1816MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Driver for Realtek PCI-Express card reader
   3 *
   4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
   5 *
   6 * Author:
   7 *   Wei WANG <wei_wang@realsil.com.cn>
   8 */
   9
  10#include <linux/pci.h>
  11#include <linux/module.h>
  12#include <linux/slab.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <linux/delay.h>
  17#include <linux/idr.h>
  18#include <linux/platform_device.h>
  19#include <linux/mfd/core.h>
  20#include <linux/rtsx_pci.h>
  21#include <linux/mmc/card.h>
  22#include <asm/unaligned.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25
  26#include "rtsx_pcr.h"
  27#include "rts5261.h"
  28#include "rts5228.h"
  29
  30static bool msi_en = true;
  31module_param(msi_en, bool, S_IRUGO | S_IWUSR);
  32MODULE_PARM_DESC(msi_en, "Enable MSI");
  33
  34static DEFINE_IDR(rtsx_pci_idr);
  35static DEFINE_SPINLOCK(rtsx_pci_lock);
  36
  37static struct mfd_cell rtsx_pcr_cells[] = {
  38	[RTSX_SD_CARD] = {
  39		.name = DRV_NAME_RTSX_PCI_SDMMC,
  40	},
  41};
  42
  43static const struct pci_device_id rtsx_pci_ids[] = {
  44	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  45	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  46	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  47	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  48	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  49	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  50	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  51	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  52	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  53	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  54	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  55	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  56	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  57	{ 0, }
  58};
  59
  60MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
  61
  62static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  63{
  64	rtsx_pci_write_register(pcr, MSGTXDATA0,
  65				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
  66	rtsx_pci_write_register(pcr, MSGTXDATA1,
  67				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
  68	rtsx_pci_write_register(pcr, MSGTXDATA2,
  69				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
  70	rtsx_pci_write_register(pcr, MSGTXDATA3,
  71				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
  72	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
  73		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
  74
  75	return 0;
  76}
  77
  78int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  79{
  80	return rtsx_comm_set_ltr_latency(pcr, latency);
  81}
  82
  83static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
  84{
  85	if (pcr->aspm_enabled == enable)
  86		return;
  87
  88	if (pcr->aspm_mode == ASPM_MODE_CFG) {
  89		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
  90						PCI_EXP_LNKCTL_ASPMC,
  91						enable ? pcr->aspm_en : 0);
  92	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
  93		if (pcr->aspm_en & 0x02)
  94			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  95				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  96		else
  97			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  98				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  99	}
 100
 101	if (!enable && (pcr->aspm_en & 0x02))
 102		mdelay(10);
 103
 104	pcr->aspm_enabled = enable;
 105}
 106
 107static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
 108{
 109	if (pcr->ops->set_aspm)
 110		pcr->ops->set_aspm(pcr, false);
 111	else
 112		rtsx_comm_set_aspm(pcr, false);
 113}
 114
 115int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
 116{
 117	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
 118
 119	return 0;
 120}
 121
 122static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
 123{
 124	if (pcr->ops->set_l1off_cfg_sub_d0)
 125		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
 126}
 127
 128static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
 129{
 130	struct rtsx_cr_option *option = &pcr->option;
 131
 132	rtsx_disable_aspm(pcr);
 133
 134	/* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
 135	msleep(1);
 136
 137	if (option->ltr_enabled)
 138		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
 139
 140	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
 141		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
 142}
 143
 144static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
 145{
 146	rtsx_comm_pm_full_on(pcr);
 147}
 148
 149void rtsx_pci_start_run(struct rtsx_pcr *pcr)
 150{
 151	/* If pci device removed, don't queue idle work any more */
 152	if (pcr->remove_pci)
 153		return;
 154
 155	if (pcr->rtd3_en)
 156		if (pcr->is_runtime_suspended) {
 157			pm_runtime_get(&(pcr->pci->dev));
 158			pcr->is_runtime_suspended = false;
 159		}
 160
 161	if (pcr->state != PDEV_STAT_RUN) {
 162		pcr->state = PDEV_STAT_RUN;
 163		if (pcr->ops->enable_auto_blink)
 164			pcr->ops->enable_auto_blink(pcr);
 165		rtsx_pm_full_on(pcr);
 166	}
 167
 168	mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
 169}
 170EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
 171
 172int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
 173{
 174	int i;
 175	u32 val = HAIMR_WRITE_START;
 176
 177	val |= (u32)(addr & 0x3FFF) << 16;
 178	val |= (u32)mask << 8;
 179	val |= (u32)data;
 180
 181	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 182
 183	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 184		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 185		if ((val & HAIMR_TRANS_END) == 0) {
 186			if (data != (u8)val)
 187				return -EIO;
 188			return 0;
 189		}
 190	}
 191
 192	return -ETIMEDOUT;
 193}
 194EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
 195
 196int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
 197{
 198	u32 val = HAIMR_READ_START;
 199	int i;
 200
 201	val |= (u32)(addr & 0x3FFF) << 16;
 202	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
 203
 204	for (i = 0; i < MAX_RW_REG_CNT; i++) {
 205		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
 206		if ((val & HAIMR_TRANS_END) == 0)
 207			break;
 208	}
 209
 210	if (i >= MAX_RW_REG_CNT)
 211		return -ETIMEDOUT;
 212
 213	if (data)
 214		*data = (u8)(val & 0xFF);
 215
 216	return 0;
 217}
 218EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
 219
 220int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 221{
 222	int err, i, finished = 0;
 223	u8 tmp;
 224
 225	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
 226	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
 227	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 228	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
 229
 230	for (i = 0; i < 100000; i++) {
 231		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 232		if (err < 0)
 233			return err;
 234
 235		if (!(tmp & 0x80)) {
 236			finished = 1;
 237			break;
 238		}
 239	}
 240
 241	if (!finished)
 242		return -ETIMEDOUT;
 243
 244	return 0;
 245}
 246
 247int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
 248{
 249	if (pcr->ops->write_phy)
 250		return pcr->ops->write_phy(pcr, addr, val);
 251
 252	return __rtsx_pci_write_phy_register(pcr, addr, val);
 253}
 254EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
 255
 256int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 257{
 258	int err, i, finished = 0;
 259	u16 data;
 260	u8 tmp, val1, val2;
 261
 262	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
 263	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
 264
 265	for (i = 0; i < 100000; i++) {
 266		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
 267		if (err < 0)
 268			return err;
 269
 270		if (!(tmp & 0x80)) {
 271			finished = 1;
 272			break;
 273		}
 274	}
 275
 276	if (!finished)
 277		return -ETIMEDOUT;
 278
 279	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
 280	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
 281	data = val1 | (val2 << 8);
 282
 283	if (val)
 284		*val = data;
 285
 286	return 0;
 287}
 288
 289int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
 290{
 291	if (pcr->ops->read_phy)
 292		return pcr->ops->read_phy(pcr, addr, val);
 293
 294	return __rtsx_pci_read_phy_register(pcr, addr, val);
 295}
 296EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
 297
 298void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
 299{
 300	if (pcr->ops->stop_cmd)
 301		return pcr->ops->stop_cmd(pcr);
 302
 303	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
 304	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
 305
 306	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
 307	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
 308}
 309EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
 310
 311void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
 312		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
 313{
 314	unsigned long flags;
 315	u32 val = 0;
 316	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
 317
 318	val |= (u32)(cmd_type & 0x03) << 30;
 319	val |= (u32)(reg_addr & 0x3FFF) << 16;
 320	val |= (u32)mask << 8;
 321	val |= (u32)data;
 322
 323	spin_lock_irqsave(&pcr->lock, flags);
 324	ptr += pcr->ci;
 325	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
 326		put_unaligned_le32(val, ptr);
 327		ptr++;
 328		pcr->ci++;
 329	}
 330	spin_unlock_irqrestore(&pcr->lock, flags);
 331}
 332EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
 333
 334void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
 335{
 336	u32 val = 1 << 31;
 337
 338	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 339
 340	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 341	/* Hardware Auto Response */
 342	val |= 0x40000000;
 343	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 344}
 345EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
 346
 347int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
 348{
 349	struct completion trans_done;
 350	u32 val = 1 << 31;
 351	long timeleft;
 352	unsigned long flags;
 353	int err = 0;
 354
 355	spin_lock_irqsave(&pcr->lock, flags);
 356
 357	/* set up data structures for the wakeup system */
 358	pcr->done = &trans_done;
 359	pcr->trans_result = TRANS_NOT_READY;
 360	init_completion(&trans_done);
 361
 362	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
 363
 364	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
 365	/* Hardware Auto Response */
 366	val |= 0x40000000;
 367	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
 368
 369	spin_unlock_irqrestore(&pcr->lock, flags);
 370
 371	/* Wait for TRANS_OK_INT */
 372	timeleft = wait_for_completion_interruptible_timeout(
 373			&trans_done, msecs_to_jiffies(timeout));
 374	if (timeleft <= 0) {
 375		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 376		err = -ETIMEDOUT;
 377		goto finish_send_cmd;
 378	}
 379
 380	spin_lock_irqsave(&pcr->lock, flags);
 381	if (pcr->trans_result == TRANS_RESULT_FAIL)
 382		err = -EINVAL;
 383	else if (pcr->trans_result == TRANS_RESULT_OK)
 384		err = 0;
 385	else if (pcr->trans_result == TRANS_NO_DEVICE)
 386		err = -ENODEV;
 387	spin_unlock_irqrestore(&pcr->lock, flags);
 388
 389finish_send_cmd:
 390	spin_lock_irqsave(&pcr->lock, flags);
 391	pcr->done = NULL;
 392	spin_unlock_irqrestore(&pcr->lock, flags);
 393
 394	if ((err < 0) && (err != -ENODEV))
 395		rtsx_pci_stop_cmd(pcr);
 396
 397	if (pcr->finish_me)
 398		complete(pcr->finish_me);
 399
 400	return err;
 401}
 402EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
 403
 404static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
 405		dma_addr_t addr, unsigned int len, int end)
 406{
 407	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
 408	u64 val;
 409	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
 410
 411	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
 412
 413	if (end)
 414		option |= RTSX_SG_END;
 415
 416	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
 417		if (len > 0xFFFF)
 418			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
 419				| (((u64)len >> 16) << 6) | option;
 420		else
 421			val = ((u64)addr << 32) | ((u64)len << 16) | option;
 422	} else {
 423		val = ((u64)addr << 32) | ((u64)len << 12) | option;
 424	}
 425	put_unaligned_le64(val, ptr);
 426	pcr->sgi++;
 427}
 428
 429int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 430		int num_sg, bool read, int timeout)
 431{
 432	int err = 0, count;
 433
 434	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
 435	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
 436	if (count < 1)
 437		return -EINVAL;
 438	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
 439
 440	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
 441
 442	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
 443
 444	return err;
 445}
 446EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
 447
 448int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 449		int num_sg, bool read)
 450{
 451	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 452
 453	if (pcr->remove_pci)
 454		return -EINVAL;
 455
 456	if ((sglist == NULL) || (num_sg <= 0))
 457		return -EINVAL;
 458
 459	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 460}
 461EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
 462
 463void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 464		int num_sg, bool read)
 465{
 466	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 467
 468	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
 469}
 470EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
 471
 472int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
 473		int count, bool read, int timeout)
 474{
 475	struct completion trans_done;
 476	struct scatterlist *sg;
 477	dma_addr_t addr;
 478	long timeleft;
 479	unsigned long flags;
 480	unsigned int len;
 481	int i, err = 0;
 482	u32 val;
 483	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
 484
 485	if (pcr->remove_pci)
 486		return -ENODEV;
 487
 488	if ((sglist == NULL) || (count < 1))
 489		return -EINVAL;
 490
 491	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
 492	pcr->sgi = 0;
 493	for_each_sg(sglist, sg, count, i) {
 494		addr = sg_dma_address(sg);
 495		len = sg_dma_len(sg);
 496		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
 497	}
 498
 499	spin_lock_irqsave(&pcr->lock, flags);
 500
 501	pcr->done = &trans_done;
 502	pcr->trans_result = TRANS_NOT_READY;
 503	init_completion(&trans_done);
 504	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
 505	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
 506
 507	spin_unlock_irqrestore(&pcr->lock, flags);
 508
 509	timeleft = wait_for_completion_interruptible_timeout(
 510			&trans_done, msecs_to_jiffies(timeout));
 511	if (timeleft <= 0) {
 512		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
 513		err = -ETIMEDOUT;
 514		goto out;
 515	}
 516
 517	spin_lock_irqsave(&pcr->lock, flags);
 518	if (pcr->trans_result == TRANS_RESULT_FAIL) {
 519		err = -EILSEQ;
 520		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
 521			pcr->dma_error_count++;
 522	}
 523
 524	else if (pcr->trans_result == TRANS_NO_DEVICE)
 525		err = -ENODEV;
 526	spin_unlock_irqrestore(&pcr->lock, flags);
 527
 528out:
 529	spin_lock_irqsave(&pcr->lock, flags);
 530	pcr->done = NULL;
 531	spin_unlock_irqrestore(&pcr->lock, flags);
 532
 533	if ((err < 0) && (err != -ENODEV))
 534		rtsx_pci_stop_cmd(pcr);
 535
 536	if (pcr->finish_me)
 537		complete(pcr->finish_me);
 538
 539	return err;
 540}
 541EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
 542
 543int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 544{
 545	int err;
 546	int i, j;
 547	u16 reg;
 548	u8 *ptr;
 549
 550	if (buf_len > 512)
 551		buf_len = 512;
 552
 553	ptr = buf;
 554	reg = PPBUF_BASE2;
 555	for (i = 0; i < buf_len / 256; i++) {
 556		rtsx_pci_init_cmd(pcr);
 557
 558		for (j = 0; j < 256; j++)
 559			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 560
 561		err = rtsx_pci_send_cmd(pcr, 250);
 562		if (err < 0)
 563			return err;
 564
 565		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
 566		ptr += 256;
 567	}
 568
 569	if (buf_len % 256) {
 570		rtsx_pci_init_cmd(pcr);
 571
 572		for (j = 0; j < buf_len % 256; j++)
 573			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
 574
 575		err = rtsx_pci_send_cmd(pcr, 250);
 576		if (err < 0)
 577			return err;
 578	}
 579
 580	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
 581
 582	return 0;
 583}
 584EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
 585
 586int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 587{
 588	int err;
 589	int i, j;
 590	u16 reg;
 591	u8 *ptr;
 592
 593	if (buf_len > 512)
 594		buf_len = 512;
 595
 596	ptr = buf;
 597	reg = PPBUF_BASE2;
 598	for (i = 0; i < buf_len / 256; i++) {
 599		rtsx_pci_init_cmd(pcr);
 600
 601		for (j = 0; j < 256; j++) {
 602			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 603					reg++, 0xFF, *ptr);
 604			ptr++;
 605		}
 606
 607		err = rtsx_pci_send_cmd(pcr, 250);
 608		if (err < 0)
 609			return err;
 610	}
 611
 612	if (buf_len % 256) {
 613		rtsx_pci_init_cmd(pcr);
 614
 615		for (j = 0; j < buf_len % 256; j++) {
 616			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 617					reg++, 0xFF, *ptr);
 618			ptr++;
 619		}
 620
 621		err = rtsx_pci_send_cmd(pcr, 250);
 622		if (err < 0)
 623			return err;
 624	}
 625
 626	return 0;
 627}
 628EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
 629
 630static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
 631{
 632	rtsx_pci_init_cmd(pcr);
 633
 634	while (*tbl & 0xFFFF0000) {
 635		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
 636				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
 637		tbl++;
 638	}
 639
 640	return rtsx_pci_send_cmd(pcr, 100);
 641}
 642
 643int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
 644{
 645	const u32 *tbl;
 646
 647	if (card == RTSX_SD_CARD)
 648		tbl = pcr->sd_pull_ctl_enable_tbl;
 649	else if (card == RTSX_MS_CARD)
 650		tbl = pcr->ms_pull_ctl_enable_tbl;
 651	else
 652		return -EINVAL;
 653
 654	return rtsx_pci_set_pull_ctl(pcr, tbl);
 655}
 656EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
 657
 658int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
 659{
 660	const u32 *tbl;
 661
 662	if (card == RTSX_SD_CARD)
 663		tbl = pcr->sd_pull_ctl_disable_tbl;
 664	else if (card == RTSX_MS_CARD)
 665		tbl = pcr->ms_pull_ctl_disable_tbl;
 666	else
 667		return -EINVAL;
 668
 669	return rtsx_pci_set_pull_ctl(pcr, tbl);
 670}
 671EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
 672
 673static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
 674{
 675	struct rtsx_hw_param *hw_param = &pcr->hw_param;
 676
 677	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
 678		| hw_param->interrupt_en;
 679
 680	if (pcr->num_slots > 1)
 681		pcr->bier |= MS_INT_EN;
 682
 683	/* Enable Bus Interrupt */
 684	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
 685
 686	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
 687}
 688
 689static inline u8 double_ssc_depth(u8 depth)
 690{
 691	return ((depth > 1) ? (depth - 1) : depth);
 692}
 693
 694static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
 695{
 696	if (div > CLK_DIV_1) {
 697		if (ssc_depth > (div - 1))
 698			ssc_depth -= (div - 1);
 699		else
 700			ssc_depth = SSC_DEPTH_4M;
 701	}
 702
 703	return ssc_depth;
 704}
 705
 706int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 707		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
 708{
 709	int err, clk;
 710	u8 n, clk_divider, mcu_cnt, div;
 711	static const u8 depth[] = {
 712		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
 713		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
 714		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
 715		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
 716		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
 717	};
 718
 719	if (PCI_PID(pcr) == PID_5261)
 720		return rts5261_pci_switch_clock(pcr, card_clock,
 721				ssc_depth, initial_mode, double_clk, vpclk);
 722	if (PCI_PID(pcr) == PID_5228)
 723		return rts5228_pci_switch_clock(pcr, card_clock,
 724				ssc_depth, initial_mode, double_clk, vpclk);
 725
 726	if (initial_mode) {
 727		/* We use 250k(around) here, in initial stage */
 728		clk_divider = SD_CLK_DIVIDE_128;
 729		card_clock = 30000000;
 730	} else {
 731		clk_divider = SD_CLK_DIVIDE_0;
 732	}
 733	err = rtsx_pci_write_register(pcr, SD_CFG1,
 734			SD_CLK_DIVIDE_MASK, clk_divider);
 735	if (err < 0)
 736		return err;
 737
 738	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
 739	if (card_clock == UHS_SDR104_MAX_DTR &&
 740	    pcr->dma_error_count &&
 741	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
 742		card_clock = UHS_SDR104_MAX_DTR -
 743			(pcr->dma_error_count * 20000000);
 744
 745	card_clock /= 1000000;
 746	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
 747
 748	clk = card_clock;
 749	if (!initial_mode && double_clk)
 750		clk = card_clock * 2;
 751	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
 752		clk, pcr->cur_clock);
 753
 754	if (clk == pcr->cur_clock)
 755		return 0;
 756
 757	if (pcr->ops->conv_clk_and_div_n)
 758		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
 759	else
 760		n = (u8)(clk - 2);
 761	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
 762		return -EINVAL;
 763
 764	mcu_cnt = (u8)(125/clk + 3);
 765	if (mcu_cnt > 15)
 766		mcu_cnt = 15;
 767
 768	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
 769	div = CLK_DIV_1;
 770	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
 771		if (pcr->ops->conv_clk_and_div_n) {
 772			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
 773					DIV_N_TO_CLK) * 2;
 774			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
 775					CLK_TO_DIV_N);
 776		} else {
 777			n = (n + 2) * 2 - 2;
 778		}
 779		div++;
 780	}
 781	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
 782
 783	ssc_depth = depth[ssc_depth];
 784	if (double_clk)
 785		ssc_depth = double_ssc_depth(ssc_depth);
 786
 787	ssc_depth = revise_ssc_depth(ssc_depth, div);
 788	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
 789
 790	rtsx_pci_init_cmd(pcr);
 791	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
 792			CLK_LOW_FREQ, CLK_LOW_FREQ);
 793	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
 794			0xFF, (div << 4) | mcu_cnt);
 795	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
 796	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
 797			SSC_DEPTH_MASK, ssc_depth);
 798	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
 799	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
 800	if (vpclk) {
 801		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 802				PHASE_NOT_RESET, 0);
 803		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
 804				PHASE_NOT_RESET, PHASE_NOT_RESET);
 805	}
 806
 807	err = rtsx_pci_send_cmd(pcr, 2000);
 808	if (err < 0)
 809		return err;
 810
 811	/* Wait SSC clock stable */
 812	udelay(SSC_CLOCK_STABLE_WAIT);
 813	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
 814	if (err < 0)
 815		return err;
 816
 817	pcr->cur_clock = clk;
 818	return 0;
 819}
 820EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
 821
 822int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
 823{
 824	if (pcr->ops->card_power_on)
 825		return pcr->ops->card_power_on(pcr, card);
 826
 827	return 0;
 828}
 829EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
 830
 831int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
 832{
 833	if (pcr->ops->card_power_off)
 834		return pcr->ops->card_power_off(pcr, card);
 835
 836	return 0;
 837}
 838EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
 839
 840int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
 841{
 842	static const unsigned int cd_mask[] = {
 843		[RTSX_SD_CARD] = SD_EXIST,
 844		[RTSX_MS_CARD] = MS_EXIST
 845	};
 846
 847	if (!(pcr->flags & PCR_MS_PMOS)) {
 848		/* When using single PMOS, accessing card is not permitted
 849		 * if the existing card is not the designated one.
 850		 */
 851		if (pcr->card_exist & (~cd_mask[card]))
 852			return -EIO;
 853	}
 854
 855	return 0;
 856}
 857EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
 858
 859int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 860{
 861	if (pcr->ops->switch_output_voltage)
 862		return pcr->ops->switch_output_voltage(pcr, voltage);
 863
 864	return 0;
 865}
 866EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
 867
 868unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
 869{
 870	unsigned int val;
 871
 872	val = rtsx_pci_readl(pcr, RTSX_BIPR);
 873	if (pcr->ops->cd_deglitch)
 874		val = pcr->ops->cd_deglitch(pcr);
 875
 876	return val;
 877}
 878EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
 879
 880void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
 881{
 882	struct completion finish;
 883
 884	pcr->finish_me = &finish;
 885	init_completion(&finish);
 886
 887	if (pcr->done)
 888		complete(pcr->done);
 889
 890	if (!pcr->remove_pci)
 891		rtsx_pci_stop_cmd(pcr);
 892
 893	wait_for_completion_interruptible_timeout(&finish,
 894			msecs_to_jiffies(2));
 895	pcr->finish_me = NULL;
 896}
 897EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
 898
 899static void rtsx_pci_card_detect(struct work_struct *work)
 900{
 901	struct delayed_work *dwork;
 902	struct rtsx_pcr *pcr;
 903	unsigned long flags;
 904	unsigned int card_detect = 0, card_inserted, card_removed;
 905	u32 irq_status;
 906
 907	dwork = to_delayed_work(work);
 908	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
 909
 910	pcr_dbg(pcr, "--> %s\n", __func__);
 911
 912	mutex_lock(&pcr->pcr_mutex);
 913	spin_lock_irqsave(&pcr->lock, flags);
 914
 915	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
 916	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
 917
 918	irq_status &= CARD_EXIST;
 919	card_inserted = pcr->card_inserted & irq_status;
 920	card_removed = pcr->card_removed;
 921	pcr->card_inserted = 0;
 922	pcr->card_removed = 0;
 923
 924	spin_unlock_irqrestore(&pcr->lock, flags);
 925
 926	if (card_inserted || card_removed) {
 927		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
 928			card_inserted, card_removed);
 929
 930		if (pcr->ops->cd_deglitch)
 931			card_inserted = pcr->ops->cd_deglitch(pcr);
 932
 933		card_detect = card_inserted | card_removed;
 934
 935		pcr->card_exist |= card_inserted;
 936		pcr->card_exist &= ~card_removed;
 937	}
 938
 939	mutex_unlock(&pcr->pcr_mutex);
 940
 941	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
 942		pcr->slots[RTSX_SD_CARD].card_event(
 943				pcr->slots[RTSX_SD_CARD].p_dev);
 944	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
 945		pcr->slots[RTSX_MS_CARD].card_event(
 946				pcr->slots[RTSX_MS_CARD].p_dev);
 947}
 948
 949static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
 950{
 951	if (pcr->ops->process_ocp) {
 952		pcr->ops->process_ocp(pcr);
 953	} else {
 954		if (!pcr->option.ocp_en)
 955			return;
 956		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
 957		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
 958			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
 959			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
 960			rtsx_pci_clear_ocpstat(pcr);
 961			pcr->ocp_stat = 0;
 962		}
 963	}
 964}
 965
 966static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
 967{
 968	if (pcr->option.ocp_en)
 969		rtsx_pci_process_ocp(pcr);
 970
 971	return 0;
 972}
 973
 974static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
 975{
 976	struct rtsx_pcr *pcr = dev_id;
 977	u32 int_reg;
 978
 979	if (!pcr)
 980		return IRQ_NONE;
 981
 982	spin_lock(&pcr->lock);
 983
 984	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
 985	/* Clear interrupt flag */
 986	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
 987	if ((int_reg & pcr->bier) == 0) {
 988		spin_unlock(&pcr->lock);
 989		return IRQ_NONE;
 990	}
 991	if (int_reg == 0xFFFFFFFF) {
 992		spin_unlock(&pcr->lock);
 993		return IRQ_HANDLED;
 994	}
 995
 996	int_reg &= (pcr->bier | 0x7FFFFF);
 997
 998	if (int_reg & SD_OC_INT)
 999		rtsx_pci_process_ocp_interrupt(pcr);
1000
1001	if (int_reg & SD_INT) {
1002		if (int_reg & SD_EXIST) {
1003			pcr->card_inserted |= SD_EXIST;
1004		} else {
1005			pcr->card_removed |= SD_EXIST;
1006			pcr->card_inserted &= ~SD_EXIST;
1007			if (PCI_PID(pcr) == PID_5261) {
1008				rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1009					RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1010				pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1011			}
1012		}
1013		pcr->dma_error_count = 0;
1014	}
1015
1016	if (int_reg & MS_INT) {
1017		if (int_reg & MS_EXIST) {
1018			pcr->card_inserted |= MS_EXIST;
1019		} else {
1020			pcr->card_removed |= MS_EXIST;
1021			pcr->card_inserted &= ~MS_EXIST;
1022		}
1023	}
1024
1025	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1026		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1027			pcr->trans_result = TRANS_RESULT_FAIL;
1028			if (pcr->done)
1029				complete(pcr->done);
1030		} else if (int_reg & TRANS_OK_INT) {
1031			pcr->trans_result = TRANS_RESULT_OK;
1032			if (pcr->done)
1033				complete(pcr->done);
1034		}
1035	}
1036
1037	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1038		schedule_delayed_work(&pcr->carddet_work,
1039				msecs_to_jiffies(200));
1040
1041	spin_unlock(&pcr->lock);
1042	return IRQ_HANDLED;
1043}
1044
1045static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1046{
1047	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1048			__func__, pcr->msi_en, pcr->pci->irq);
1049
1050	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1051			pcr->msi_en ? 0 : IRQF_SHARED,
1052			DRV_NAME_RTSX_PCI, pcr)) {
1053		dev_err(&(pcr->pci->dev),
1054			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1055			pcr->pci->irq);
1056		return -1;
1057	}
1058
1059	pcr->irq = pcr->pci->irq;
1060	pci_intx(pcr->pci, !pcr->msi_en);
1061
1062	return 0;
1063}
1064
1065static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1066{
1067	if (pcr->ops->set_aspm)
1068		pcr->ops->set_aspm(pcr, true);
1069	else
1070		rtsx_comm_set_aspm(pcr, true);
1071}
1072
1073static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1074{
1075	struct rtsx_cr_option *option = &pcr->option;
1076
1077	if (option->ltr_enabled) {
1078		u32 latency = option->ltr_l1off_latency;
1079
1080		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1081			mdelay(option->l1_snooze_delay);
1082
1083		rtsx_set_ltr_latency(pcr, latency);
1084	}
1085
1086	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1087		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1088
1089	rtsx_enable_aspm(pcr);
1090}
1091
1092static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1093{
1094	rtsx_comm_pm_power_saving(pcr);
1095}
1096
1097static void rtsx_pci_rtd3_work(struct work_struct *work)
1098{
1099	struct delayed_work *dwork = to_delayed_work(work);
1100	struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work);
1101
1102	pcr_dbg(pcr, "--> %s\n", __func__);
1103	if (!pcr->is_runtime_suspended)
1104		pm_runtime_put(&(pcr->pci->dev));
1105}
1106
1107static void rtsx_pci_idle_work(struct work_struct *work)
1108{
1109	struct delayed_work *dwork = to_delayed_work(work);
1110	struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1111
1112	pcr_dbg(pcr, "--> %s\n", __func__);
1113
1114	mutex_lock(&pcr->pcr_mutex);
1115
1116	pcr->state = PDEV_STAT_IDLE;
1117
1118	if (pcr->ops->disable_auto_blink)
1119		pcr->ops->disable_auto_blink(pcr);
1120	if (pcr->ops->turn_off_led)
1121		pcr->ops->turn_off_led(pcr);
1122
1123	rtsx_pm_power_saving(pcr);
1124
1125	mutex_unlock(&pcr->pcr_mutex);
1126
1127	if (pcr->rtd3_en)
1128		mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000));
1129}
1130
1131static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
1132{
1133	/* Set relink_time to 0 */
1134	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1135	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1136	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1137			RELINK_TIME_MASK, 0);
1138
1139	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1140			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1141
1142	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1143}
1144
1145static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1146{
1147	if (pcr->ops->turn_off_led)
1148		pcr->ops->turn_off_led(pcr);
1149
1150	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1151	pcr->bier = 0;
1152
1153	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1154	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1155
1156	if (pcr->ops->force_power_down)
1157		pcr->ops->force_power_down(pcr, pm_state);
1158	else
1159		rtsx_base_force_power_down(pcr, pm_state);
1160}
1161
1162void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1163{
1164	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1165
1166	if (pcr->ops->enable_ocp) {
1167		pcr->ops->enable_ocp(pcr);
1168	} else {
1169		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1170		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1171	}
1172
1173}
1174
1175void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1176{
1177	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1178
1179	if (pcr->ops->disable_ocp) {
1180		pcr->ops->disable_ocp(pcr);
1181	} else {
1182		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1183		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1184				OC_POWER_DOWN);
1185	}
1186}
1187
1188void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1189{
1190	if (pcr->ops->init_ocp) {
1191		pcr->ops->init_ocp(pcr);
1192	} else {
1193		struct rtsx_cr_option *option = &(pcr->option);
1194
1195		if (option->ocp_en) {
1196			u8 val = option->sd_800mA_ocp_thd;
1197
1198			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1199			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1200				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1201			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1202				SD_OCP_THD_MASK, val);
1203			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1204				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1205			rtsx_pci_enable_ocp(pcr);
1206		}
1207	}
1208}
1209
1210int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1211{
1212	if (pcr->ops->get_ocpstat)
1213		return pcr->ops->get_ocpstat(pcr, val);
1214	else
1215		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1216}
1217
1218void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1219{
1220	if (pcr->ops->clear_ocpstat) {
1221		pcr->ops->clear_ocpstat(pcr);
1222	} else {
1223		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1224		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1225
1226		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1227		udelay(100);
1228		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1229	}
1230}
1231
1232void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1233{
1234	u16 val;
1235
1236	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1237		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1238		val |= 1<<9;
1239		rtsx_pci_write_phy_register(pcr, 0x01, val);
1240	}
1241	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1242	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1243	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1244	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1245
1246}
1247
1248void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1249{
1250	u16 val;
1251
1252	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1253		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1254		val &= ~(1<<9);
1255		rtsx_pci_write_phy_register(pcr, 0x01, val);
1256	}
1257	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1258	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1259
1260}
1261
1262int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1263{
1264	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1265		MS_CLK_EN | SD40_CLK_EN, 0);
1266	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1267	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1268
1269	msleep(50);
1270
1271	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1272
1273	return 0;
1274}
1275
1276int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1277{
1278	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1279		MS_CLK_EN | SD40_CLK_EN, 0);
1280
1281	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1282
1283	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1284	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1285
1286	return 0;
1287}
1288
1289static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1290{
1291	struct pci_dev *pdev = pcr->pci;
1292	int err;
1293
1294	if (PCI_PID(pcr) == PID_5228)
1295		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1296				RTS5228_LDO1_SR_0_5);
1297
1298	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1299
1300	rtsx_pci_enable_bus_int(pcr);
1301
1302	/* Power on SSC */
1303	if (PCI_PID(pcr) == PID_5261) {
1304		/* Gating real mcu clock */
1305		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1306			RTS5261_MCU_CLOCK_GATING, 0);
1307		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1308			SSC_POWER_DOWN, 0);
1309	} else {
1310		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1311	}
1312	if (err < 0)
1313		return err;
1314
1315	/* Wait SSC power stable */
1316	udelay(200);
1317
1318	rtsx_disable_aspm(pcr);
1319	if (pcr->ops->optimize_phy) {
1320		err = pcr->ops->optimize_phy(pcr);
1321		if (err < 0)
1322			return err;
1323	}
1324
1325	rtsx_pci_init_cmd(pcr);
1326
1327	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1328	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1329
1330	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1331	/* Disable card clock */
1332	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1333	/* Reset delink mode */
1334	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1335	/* Card driving select */
1336	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1337			0xFF, pcr->card_drive_sel);
1338	/* Enable SSC Clock */
1339	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1340			0xFF, SSC_8X_EN | SSC_SEL_4M);
1341	if (PCI_PID(pcr) == PID_5261)
1342		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1343			RTS5261_SSC_DEPTH_2M);
1344	else if (PCI_PID(pcr) == PID_5228)
1345		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1346			RTS5228_SSC_DEPTH_2M);
1347	else
1348		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1349
1350	/* Disable cd_pwr_save */
1351	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1352	/* Clear Link Ready Interrupt */
1353	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1354			LINK_RDY_INT, LINK_RDY_INT);
1355	/* Enlarge the estimation window of PERST# glitch
1356	 * to reduce the chance of invalid card interrupt
1357	 */
1358	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1359	/* Update RC oscillator to 400k
1360	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1361	 *                1: 2M  0: 400k
1362	 */
1363	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1364	/* Set interrupt write clear
1365	 * bit 1: U_elbi_if_rd_clr_en
1366	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1367	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1368	 */
1369	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1370
1371	err = rtsx_pci_send_cmd(pcr, 100);
1372	if (err < 0)
1373		return err;
1374
1375	switch (PCI_PID(pcr)) {
1376	case PID_5250:
1377	case PID_524A:
1378	case PID_525A:
1379	case PID_5260:
1380	case PID_5261:
1381	case PID_5228:
1382		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1383		break;
1384	default:
1385		break;
1386	}
1387
1388	/*init ocp*/
1389	rtsx_pci_init_ocp(pcr);
1390
1391	/* Enable clk_request_n to enable clock power management */
1392	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1393					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1394	/* Enter L1 when host tx idle */
1395	pci_write_config_byte(pdev, 0x70F, 0x5B);
1396
1397	if (pcr->ops->extra_init_hw) {
1398		err = pcr->ops->extra_init_hw(pcr);
1399		if (err < 0)
1400			return err;
1401	}
1402
1403	if (pcr->aspm_mode == ASPM_MODE_REG)
1404		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1405
1406	/* No CD interrupt if probing driver with card inserted.
1407	 * So we need to initialize pcr->card_exist here.
1408	 */
1409	if (pcr->ops->cd_deglitch)
1410		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1411	else
1412		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1413
1414	return 0;
1415}
1416
1417static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1418{
1419	int err;
1420	u16 cfg_val;
1421	u8 val;
1422
1423	spin_lock_init(&pcr->lock);
1424	mutex_init(&pcr->pcr_mutex);
1425
1426	switch (PCI_PID(pcr)) {
1427	default:
1428	case 0x5209:
1429		rts5209_init_params(pcr);
1430		break;
1431
1432	case 0x5229:
1433		rts5229_init_params(pcr);
1434		break;
1435
1436	case 0x5289:
1437		rtl8411_init_params(pcr);
1438		break;
1439
1440	case 0x5227:
1441		rts5227_init_params(pcr);
1442		break;
1443
1444	case 0x522A:
1445		rts522a_init_params(pcr);
1446		break;
1447
1448	case 0x5249:
1449		rts5249_init_params(pcr);
1450		break;
1451
1452	case 0x524A:
1453		rts524a_init_params(pcr);
1454		break;
1455
1456	case 0x525A:
1457		rts525a_init_params(pcr);
1458		break;
1459
1460	case 0x5287:
1461		rtl8411b_init_params(pcr);
1462		break;
1463
1464	case 0x5286:
1465		rtl8402_init_params(pcr);
1466		break;
1467
1468	case 0x5260:
1469		rts5260_init_params(pcr);
1470		break;
1471
1472	case 0x5261:
1473		rts5261_init_params(pcr);
1474		break;
1475
1476	case 0x5228:
1477		rts5228_init_params(pcr);
1478		break;
1479	}
1480
1481	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1482			PCI_PID(pcr), pcr->ic_version);
1483
1484	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1485			GFP_KERNEL);
1486	if (!pcr->slots)
1487		return -ENOMEM;
1488
1489	if (pcr->aspm_mode == ASPM_MODE_CFG) {
1490		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1491		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1492			pcr->aspm_enabled = true;
1493		else
1494			pcr->aspm_enabled = false;
1495
1496	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
1497		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1498		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1499			pcr->aspm_enabled = false;
1500		else
1501			pcr->aspm_enabled = true;
1502	}
1503
1504	if (pcr->ops->fetch_vendor_settings)
1505		pcr->ops->fetch_vendor_settings(pcr);
1506
1507	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1508	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1509			pcr->sd30_drive_sel_1v8);
1510	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1511			pcr->sd30_drive_sel_3v3);
1512	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1513			pcr->card_drive_sel);
1514	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1515
1516	pcr->state = PDEV_STAT_IDLE;
1517	err = rtsx_pci_init_hw(pcr);
1518	if (err < 0) {
1519		kfree(pcr->slots);
1520		return err;
1521	}
1522
1523	return 0;
1524}
1525
1526static int rtsx_pci_probe(struct pci_dev *pcidev,
1527			  const struct pci_device_id *id)
1528{
1529	struct rtsx_pcr *pcr;
1530	struct pcr_handle *handle;
1531	u32 base, len;
1532	int ret, i, bar = 0;
1533
1534	dev_dbg(&(pcidev->dev),
1535		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1536		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1537		(int)pcidev->revision);
1538
1539	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1540	if (ret < 0)
1541		return ret;
1542
1543	ret = pci_enable_device(pcidev);
1544	if (ret)
1545		return ret;
1546
1547	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1548	if (ret)
1549		goto disable;
1550
1551	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1552	if (!pcr) {
1553		ret = -ENOMEM;
1554		goto release_pci;
1555	}
1556
1557	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1558	if (!handle) {
1559		ret = -ENOMEM;
1560		goto free_pcr;
1561	}
1562	handle->pcr = pcr;
1563
1564	idr_preload(GFP_KERNEL);
1565	spin_lock(&rtsx_pci_lock);
1566	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1567	if (ret >= 0)
1568		pcr->id = ret;
1569	spin_unlock(&rtsx_pci_lock);
1570	idr_preload_end();
1571	if (ret < 0)
1572		goto free_handle;
1573
1574	pcr->pci = pcidev;
1575	dev_set_drvdata(&pcidev->dev, handle);
1576
1577	if (CHK_PCI_PID(pcr, 0x525A))
1578		bar = 1;
1579	len = pci_resource_len(pcidev, bar);
1580	base = pci_resource_start(pcidev, bar);
1581	pcr->remap_addr = ioremap(base, len);
1582	if (!pcr->remap_addr) {
1583		ret = -ENOMEM;
1584		goto free_handle;
1585	}
1586
1587	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1588			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1589			GFP_KERNEL);
1590	if (pcr->rtsx_resv_buf == NULL) {
1591		ret = -ENXIO;
1592		goto unmap;
1593	}
1594	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1595	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1596	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1597	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1598	pcr->card_inserted = 0;
1599	pcr->card_removed = 0;
1600	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1601	INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1602
1603	pcr->msi_en = msi_en;
1604	if (pcr->msi_en) {
1605		ret = pci_enable_msi(pcidev);
1606		if (ret)
1607			pcr->msi_en = false;
1608	}
1609
1610	ret = rtsx_pci_acquire_irq(pcr);
1611	if (ret < 0)
1612		goto disable_msi;
1613
1614	pci_set_master(pcidev);
1615	synchronize_irq(pcr->irq);
1616
1617	ret = rtsx_pci_init_chip(pcr);
1618	if (ret < 0)
1619		goto disable_irq;
1620
1621	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1622		rtsx_pcr_cells[i].platform_data = handle;
1623		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1624	}
1625
1626	if (pcr->rtd3_en) {
1627		INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work);
1628		pm_runtime_allow(&pcidev->dev);
1629		pm_runtime_enable(&pcidev->dev);
1630		pcr->is_runtime_suspended = false;
1631	}
1632
1633
1634	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1635			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1636	if (ret < 0)
1637		goto free_slots;
1638
1639	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
 
1640
1641	return 0;
1642
1643free_slots:
1644	kfree(pcr->slots);
1645disable_irq:
1646	free_irq(pcr->irq, (void *)pcr);
1647disable_msi:
1648	if (pcr->msi_en)
1649		pci_disable_msi(pcr->pci);
1650	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1651			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1652unmap:
1653	iounmap(pcr->remap_addr);
 
 
 
 
1654free_handle:
1655	kfree(handle);
1656free_pcr:
1657	kfree(pcr);
1658release_pci:
1659	pci_release_regions(pcidev);
1660disable:
1661	pci_disable_device(pcidev);
1662
1663	return ret;
1664}
1665
1666static void rtsx_pci_remove(struct pci_dev *pcidev)
1667{
1668	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1669	struct rtsx_pcr *pcr = handle->pcr;
1670
1671	if (pcr->rtd3_en)
1672		pm_runtime_get_noresume(&pcr->pci->dev);
1673
1674	pcr->remove_pci = true;
 
1675
1676	/* Disable interrupts at the pcr level */
1677	spin_lock_irq(&pcr->lock);
1678	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1679	pcr->bier = 0;
1680	spin_unlock_irq(&pcr->lock);
1681
1682	cancel_delayed_work_sync(&pcr->carddet_work);
1683	cancel_delayed_work_sync(&pcr->idle_work);
1684	if (pcr->rtd3_en)
1685		cancel_delayed_work_sync(&pcr->rtd3_work);
1686
1687	mfd_remove_devices(&pcidev->dev);
1688
1689	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1690			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1691	free_irq(pcr->irq, (void *)pcr);
1692	if (pcr->msi_en)
1693		pci_disable_msi(pcr->pci);
1694	iounmap(pcr->remap_addr);
1695
1696	pci_release_regions(pcidev);
1697	pci_disable_device(pcidev);
1698
1699	spin_lock(&rtsx_pci_lock);
1700	idr_remove(&rtsx_pci_idr, pcr->id);
1701	spin_unlock(&rtsx_pci_lock);
1702
1703	if (pcr->rtd3_en) {
1704		pm_runtime_disable(&pcr->pci->dev);
1705		pm_runtime_put_noidle(&pcr->pci->dev);
1706	}
1707
1708	kfree(pcr->slots);
1709	kfree(pcr);
1710	kfree(handle);
1711
1712	dev_dbg(&(pcidev->dev),
1713		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1714		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1715}
1716
1717static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1718{
1719	struct pci_dev *pcidev = to_pci_dev(dev_d);
1720	struct pcr_handle *handle;
1721	struct rtsx_pcr *pcr;
1722
1723	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1724
1725	handle = pci_get_drvdata(pcidev);
1726	pcr = handle->pcr;
1727
1728	cancel_delayed_work(&pcr->carddet_work);
1729	cancel_delayed_work(&pcr->idle_work);
1730
1731	mutex_lock(&pcr->pcr_mutex);
1732
1733	rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1734
1735	device_wakeup_disable(dev_d);
1736
1737	mutex_unlock(&pcr->pcr_mutex);
1738	return 0;
1739}
1740
1741static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1742{
1743	struct pci_dev *pcidev = to_pci_dev(dev_d);
1744	struct pcr_handle *handle;
1745	struct rtsx_pcr *pcr;
1746	int ret = 0;
1747
1748	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1749
1750	handle = pci_get_drvdata(pcidev);
1751	pcr = handle->pcr;
1752
1753	mutex_lock(&pcr->pcr_mutex);
1754
1755	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1756	if (ret)
1757		goto out;
1758
1759	ret = rtsx_pci_init_hw(pcr);
1760	if (ret)
1761		goto out;
1762
1763	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1764
1765out:
1766	mutex_unlock(&pcr->pcr_mutex);
1767	return ret;
1768}
1769
1770#ifdef CONFIG_PM
1771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1772static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1773{
1774	struct pcr_handle *handle;
1775	struct rtsx_pcr *pcr;
1776
1777	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1778
1779	handle = pci_get_drvdata(pcidev);
1780	pcr = handle->pcr;
1781	rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1782
1783	pci_disable_device(pcidev);
1784	free_irq(pcr->irq, (void *)pcr);
1785	if (pcr->msi_en)
1786		pci_disable_msi(pcr->pci);
1787}
1788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1789static int rtsx_pci_runtime_suspend(struct device *device)
1790{
1791	struct pci_dev *pcidev = to_pci_dev(device);
1792	struct pcr_handle *handle;
1793	struct rtsx_pcr *pcr;
1794
1795	handle = pci_get_drvdata(pcidev);
1796	pcr = handle->pcr;
1797	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1798
1799	cancel_delayed_work(&pcr->carddet_work);
1800	cancel_delayed_work(&pcr->rtd3_work);
1801	cancel_delayed_work(&pcr->idle_work);
1802
1803	mutex_lock(&pcr->pcr_mutex);
1804	rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1805
1806	free_irq(pcr->irq, (void *)pcr);
1807
1808	mutex_unlock(&pcr->pcr_mutex);
1809
1810	pcr->is_runtime_suspended = true;
1811
1812	return 0;
1813}
1814
1815static int rtsx_pci_runtime_resume(struct device *device)
1816{
1817	struct pci_dev *pcidev = to_pci_dev(device);
1818	struct pcr_handle *handle;
1819	struct rtsx_pcr *pcr;
1820
1821	handle = pci_get_drvdata(pcidev);
1822	pcr = handle->pcr;
1823	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1824
1825	mutex_lock(&pcr->pcr_mutex);
1826
1827	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1828	rtsx_pci_acquire_irq(pcr);
1829	synchronize_irq(pcr->irq);
1830
1831	if (pcr->ops->fetch_vendor_settings)
1832		pcr->ops->fetch_vendor_settings(pcr);
1833
1834	rtsx_pci_init_hw(pcr);
1835
1836	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1837		pcr->slots[RTSX_SD_CARD].card_event(
1838				pcr->slots[RTSX_SD_CARD].p_dev);
1839	}
1840
1841	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1842
1843	mutex_unlock(&pcr->pcr_mutex);
1844	return 0;
1845}
1846
1847#else /* CONFIG_PM */
1848
1849#define rtsx_pci_shutdown NULL
1850#define rtsx_pci_runtime_suspend NULL
1851#define rtsx_pic_runtime_resume NULL
1852
1853#endif /* CONFIG_PM */
1854
1855static const struct dev_pm_ops rtsx_pci_pm_ops = {
1856	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1857	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL)
1858};
1859
1860static struct pci_driver rtsx_pci_driver = {
1861	.name = DRV_NAME_RTSX_PCI,
1862	.id_table = rtsx_pci_ids,
1863	.probe = rtsx_pci_probe,
1864	.remove = rtsx_pci_remove,
1865	.driver.pm = &rtsx_pci_pm_ops,
1866	.shutdown = rtsx_pci_shutdown,
1867};
1868module_pci_driver(rtsx_pci_driver);
1869
1870MODULE_LICENSE("GPL");
1871MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1872MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");