Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * Block driver for media (i.e., flash cards)
   3 *
   4 * Copyright 2002 Hewlett-Packard Company
   5 * Copyright 2005-2008 Pierre Ossman
   6 *
   7 * Use consistent with the GNU GPL is permitted,
   8 * provided that this copyright notice is
   9 * preserved in its entirety in all copies and derived works.
  10 *
  11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13 * FITNESS FOR ANY PARTICULAR PURPOSE.
  14 *
  15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16 *
  17 * Author:  Andrew Christian
  18 *          28 May 2002
  19 */
  20#include <linux/moduleparam.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23
  24#include <linux/kernel.h>
  25#include <linux/fs.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/hdreg.h>
  29#include <linux/kdev_t.h>
  30#include <linux/blkdev.h>
  31#include <linux/mutex.h>
  32#include <linux/scatterlist.h>
  33#include <linux/string_helpers.h>
  34#include <linux/delay.h>
  35#include <linux/capability.h>
  36#include <linux/compat.h>
  37#include <linux/pm_runtime.h>
  38
  39#include <linux/mmc/ioctl.h>
  40#include <linux/mmc/card.h>
  41#include <linux/mmc/host.h>
  42#include <linux/mmc/mmc.h>
  43#include <linux/mmc/sd.h>
  44
  45#include <asm/uaccess.h>
  46
  47#include "queue.h"
  48
  49MODULE_ALIAS("mmc:block");
  50#ifdef MODULE_PARAM_PREFIX
  51#undef MODULE_PARAM_PREFIX
  52#endif
  53#define MODULE_PARAM_PREFIX "mmcblk."
  54
  55#define INAND_CMD38_ARG_EXT_CSD  113
  56#define INAND_CMD38_ARG_ERASE    0x00
  57#define INAND_CMD38_ARG_TRIM     0x01
  58#define INAND_CMD38_ARG_SECERASE 0x80
  59#define INAND_CMD38_ARG_SECTRIM1 0x81
  60#define INAND_CMD38_ARG_SECTRIM2 0x88
  61#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
  62#define MMC_SANITIZE_REQ_TIMEOUT 240000
  63#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
  64
  65#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
  66				  (rq_data_dir(req) == WRITE))
  67#define PACKED_CMD_VER	0x01
  68#define PACKED_CMD_WR	0x02
  69
  70static DEFINE_MUTEX(block_mutex);
  71
  72/*
  73 * The defaults come from config options but can be overriden by module
  74 * or bootarg options.
  75 */
  76static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  77
  78/*
  79 * We've only got one major, so number of mmcblk devices is
  80 * limited to (1 << 20) / number of minors per device.  It is also
  81 * currently limited by the size of the static bitmaps below.
  82 */
  83static int max_devices;
  84
  85#define MAX_DEVICES 256
  86
  87/* TODO: Replace these with struct ida */
  88static DECLARE_BITMAP(dev_use, MAX_DEVICES);
  89
  90/*
  91 * There is one mmc_blk_data per slot.
  92 */
  93struct mmc_blk_data {
  94	spinlock_t	lock;
  95	struct gendisk	*disk;
  96	struct mmc_queue queue;
  97	struct list_head part;
  98
  99	unsigned int	flags;
 100#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
 101#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
 102#define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */
 103
 104	unsigned int	usage;
 105	unsigned int	read_only;
 106	unsigned int	part_type;
 107	unsigned int	reset_done;
 108#define MMC_BLK_READ		BIT(0)
 109#define MMC_BLK_WRITE		BIT(1)
 110#define MMC_BLK_DISCARD		BIT(2)
 111#define MMC_BLK_SECDISCARD	BIT(3)
 112
 113	/*
 114	 * Only set in main mmc_blk_data associated
 115	 * with mmc_card with dev_set_drvdata, and keeps
 116	 * track of the current selected device partition.
 117	 */
 118	unsigned int	part_curr;
 119	struct device_attribute force_ro;
 120	struct device_attribute power_ro_lock;
 121	int	area_type;
 122};
 123
 124static DEFINE_MUTEX(open_lock);
 125
 126enum {
 127	MMC_PACKED_NR_IDX = -1,
 128	MMC_PACKED_NR_ZERO,
 129	MMC_PACKED_NR_SINGLE,
 130};
 131
 132module_param(perdev_minors, int, 0444);
 133MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 134
 135static inline int mmc_blk_part_switch(struct mmc_card *card,
 136				      struct mmc_blk_data *md);
 137static int get_card_status(struct mmc_card *card, u32 *status, int retries);
 138
 139static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
 140{
 141	struct mmc_packed *packed = mqrq->packed;
 142
 143	BUG_ON(!packed);
 144
 145	mqrq->cmd_type = MMC_PACKED_NONE;
 146	packed->nr_entries = MMC_PACKED_NR_ZERO;
 147	packed->idx_failure = MMC_PACKED_NR_IDX;
 148	packed->retries = 0;
 149	packed->blocks = 0;
 150}
 151
 152static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 153{
 154	struct mmc_blk_data *md;
 155
 156	mutex_lock(&open_lock);
 157	md = disk->private_data;
 158	if (md && md->usage == 0)
 159		md = NULL;
 160	if (md)
 161		md->usage++;
 162	mutex_unlock(&open_lock);
 163
 164	return md;
 165}
 166
 167static inline int mmc_get_devidx(struct gendisk *disk)
 168{
 169	int devidx = disk->first_minor / perdev_minors;
 170	return devidx;
 171}
 172
 173static void mmc_blk_put(struct mmc_blk_data *md)
 174{
 175	mutex_lock(&open_lock);
 176	md->usage--;
 177	if (md->usage == 0) {
 178		int devidx = mmc_get_devidx(md->disk);
 179		blk_cleanup_queue(md->queue.queue);
 180
 181		__clear_bit(devidx, dev_use);
 182
 183		put_disk(md->disk);
 184		kfree(md);
 185	}
 186	mutex_unlock(&open_lock);
 187}
 188
 189static ssize_t power_ro_lock_show(struct device *dev,
 190		struct device_attribute *attr, char *buf)
 191{
 192	int ret;
 193	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 194	struct mmc_card *card = md->queue.card;
 195	int locked = 0;
 196
 197	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
 198		locked = 2;
 199	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
 200		locked = 1;
 201
 202	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 203
 204	mmc_blk_put(md);
 205
 206	return ret;
 207}
 208
 209static ssize_t power_ro_lock_store(struct device *dev,
 210		struct device_attribute *attr, const char *buf, size_t count)
 211{
 212	int ret;
 213	struct mmc_blk_data *md, *part_md;
 214	struct mmc_card *card;
 215	unsigned long set;
 216
 217	if (kstrtoul(buf, 0, &set))
 218		return -EINVAL;
 219
 220	if (set != 1)
 221		return count;
 222
 223	md = mmc_blk_get(dev_to_disk(dev));
 224	card = md->queue.card;
 225
 226	mmc_get_card(card);
 227
 228	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
 229				card->ext_csd.boot_ro_lock |
 230				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
 231				card->ext_csd.part_time);
 232	if (ret)
 233		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
 234	else
 235		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
 236
 237	mmc_put_card(card);
 238
 239	if (!ret) {
 240		pr_info("%s: Locking boot partition ro until next power on\n",
 241			md->disk->disk_name);
 242		set_disk_ro(md->disk, 1);
 243
 244		list_for_each_entry(part_md, &md->part, part)
 245			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
 246				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
 247				set_disk_ro(part_md->disk, 1);
 248			}
 249	}
 250
 251	mmc_blk_put(md);
 252	return count;
 253}
 254
 255static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
 256			     char *buf)
 257{
 258	int ret;
 259	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 260
 261	ret = snprintf(buf, PAGE_SIZE, "%d\n",
 262		       get_disk_ro(dev_to_disk(dev)) ^
 263		       md->read_only);
 264	mmc_blk_put(md);
 265	return ret;
 266}
 267
 268static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
 269			      const char *buf, size_t count)
 270{
 271	int ret;
 272	char *end;
 273	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 274	unsigned long set = simple_strtoul(buf, &end, 0);
 275	if (end == buf) {
 276		ret = -EINVAL;
 277		goto out;
 278	}
 279
 280	set_disk_ro(dev_to_disk(dev), set || md->read_only);
 281	ret = count;
 282out:
 283	mmc_blk_put(md);
 284	return ret;
 285}
 286
 287static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 288{
 289	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
 290	int ret = -ENXIO;
 291
 292	mutex_lock(&block_mutex);
 293	if (md) {
 294		if (md->usage == 2)
 295			check_disk_change(bdev);
 296		ret = 0;
 297
 298		if ((mode & FMODE_WRITE) && md->read_only) {
 299			mmc_blk_put(md);
 300			ret = -EROFS;
 301		}
 302	}
 303	mutex_unlock(&block_mutex);
 304
 305	return ret;
 306}
 307
 308static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
 309{
 310	struct mmc_blk_data *md = disk->private_data;
 311
 312	mutex_lock(&block_mutex);
 313	mmc_blk_put(md);
 314	mutex_unlock(&block_mutex);
 315}
 316
 317static int
 318mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 319{
 320	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
 321	geo->heads = 4;
 322	geo->sectors = 16;
 323	return 0;
 324}
 325
 326struct mmc_blk_ioc_data {
 327	struct mmc_ioc_cmd ic;
 328	unsigned char *buf;
 329	u64 buf_bytes;
 330};
 331
 332static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
 333	struct mmc_ioc_cmd __user *user)
 334{
 335	struct mmc_blk_ioc_data *idata;
 336	int err;
 337
 338	idata = kmalloc(sizeof(*idata), GFP_KERNEL);
 339	if (!idata) {
 340		err = -ENOMEM;
 341		goto out;
 342	}
 343
 344	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
 345		err = -EFAULT;
 346		goto idata_err;
 347	}
 348
 349	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
 350	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
 351		err = -EOVERFLOW;
 352		goto idata_err;
 353	}
 354
 355	if (!idata->buf_bytes)
 356		return idata;
 357
 358	idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
 359	if (!idata->buf) {
 360		err = -ENOMEM;
 361		goto idata_err;
 362	}
 363
 364	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
 365					idata->ic.data_ptr, idata->buf_bytes)) {
 366		err = -EFAULT;
 367		goto copy_err;
 368	}
 369
 370	return idata;
 371
 372copy_err:
 373	kfree(idata->buf);
 374idata_err:
 375	kfree(idata);
 376out:
 377	return ERR_PTR(err);
 378}
 379
 380static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
 381				      struct mmc_blk_ioc_data *idata)
 382{
 383	struct mmc_ioc_cmd *ic = &idata->ic;
 384
 385	if (copy_to_user(&(ic_ptr->response), ic->response,
 386			 sizeof(ic->response)))
 387		return -EFAULT;
 388
 389	if (!idata->ic.write_flag) {
 390		if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
 391				 idata->buf, idata->buf_bytes))
 392			return -EFAULT;
 393	}
 394
 395	return 0;
 396}
 397
 398static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
 399				       u32 retries_max)
 400{
 401	int err;
 402	u32 retry_count = 0;
 403
 404	if (!status || !retries_max)
 405		return -EINVAL;
 406
 407	do {
 408		err = get_card_status(card, status, 5);
 409		if (err)
 410			break;
 411
 412		if (!R1_STATUS(*status) &&
 413				(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
 414			break; /* RPMB programming operation complete */
 415
 416		/*
 417		 * Rechedule to give the MMC device a chance to continue
 418		 * processing the previous command without being polled too
 419		 * frequently.
 420		 */
 421		usleep_range(1000, 5000);
 422	} while (++retry_count < retries_max);
 423
 424	if (retry_count == retries_max)
 425		err = -EPERM;
 426
 427	return err;
 428}
 429
 430static int ioctl_do_sanitize(struct mmc_card *card)
 431{
 432	int err;
 433
 434	if (!mmc_can_sanitize(card)) {
 435			pr_warn("%s: %s - SANITIZE is not supported\n",
 436				mmc_hostname(card->host), __func__);
 437			err = -EOPNOTSUPP;
 438			goto out;
 439	}
 440
 441	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
 442		mmc_hostname(card->host), __func__);
 443
 444	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 445					EXT_CSD_SANITIZE_START, 1,
 446					MMC_SANITIZE_REQ_TIMEOUT);
 447
 448	if (err)
 449		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
 450		       mmc_hostname(card->host), __func__, err);
 451
 452	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
 453					     __func__);
 454out:
 455	return err;
 456}
 457
 458static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
 459			       struct mmc_blk_ioc_data *idata)
 460{
 461	struct mmc_command cmd = {0};
 462	struct mmc_data data = {0};
 463	struct mmc_request mrq = {NULL};
 464	struct scatterlist sg;
 465	int err;
 466	int is_rpmb = false;
 467	u32 status = 0;
 468
 469	if (!card || !md || !idata)
 470		return -EINVAL;
 471
 472	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
 473		is_rpmb = true;
 474
 475	cmd.opcode = idata->ic.opcode;
 476	cmd.arg = idata->ic.arg;
 477	cmd.flags = idata->ic.flags;
 478
 479	if (idata->buf_bytes) {
 480		data.sg = &sg;
 481		data.sg_len = 1;
 482		data.blksz = idata->ic.blksz;
 483		data.blocks = idata->ic.blocks;
 484
 485		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
 486
 487		if (idata->ic.write_flag)
 488			data.flags = MMC_DATA_WRITE;
 489		else
 490			data.flags = MMC_DATA_READ;
 491
 492		/* data.flags must already be set before doing this. */
 493		mmc_set_data_timeout(&data, card);
 494
 495		/* Allow overriding the timeout_ns for empirical tuning. */
 496		if (idata->ic.data_timeout_ns)
 497			data.timeout_ns = idata->ic.data_timeout_ns;
 498
 499		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
 500			/*
 501			 * Pretend this is a data transfer and rely on the
 502			 * host driver to compute timeout.  When all host
 503			 * drivers support cmd.cmd_timeout for R1B, this
 504			 * can be changed to:
 505			 *
 506			 *     mrq.data = NULL;
 507			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
 508			 */
 509			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
 510		}
 511
 512		mrq.data = &data;
 513	}
 514
 515	mrq.cmd = &cmd;
 516
 517	err = mmc_blk_part_switch(card, md);
 518	if (err)
 519		return err;
 520
 521	if (idata->ic.is_acmd) {
 522		err = mmc_app_cmd(card->host, card);
 523		if (err)
 524			return err;
 525	}
 526
 527	if (is_rpmb) {
 528		err = mmc_set_blockcount(card, data.blocks,
 529			idata->ic.write_flag & (1 << 31));
 530		if (err)
 531			return err;
 532	}
 533
 534	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
 535	    (cmd.opcode == MMC_SWITCH)) {
 536		err = ioctl_do_sanitize(card);
 537
 538		if (err)
 539			pr_err("%s: ioctl_do_sanitize() failed. err = %d",
 540			       __func__, err);
 541
 542		return err;
 543	}
 544
 545	mmc_wait_for_req(card->host, &mrq);
 546
 547	if (cmd.error) {
 548		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
 549						__func__, cmd.error);
 550		return cmd.error;
 551	}
 552	if (data.error) {
 553		dev_err(mmc_dev(card->host), "%s: data error %d\n",
 554						__func__, data.error);
 555		return data.error;
 556	}
 557
 558	/*
 559	 * According to the SD specs, some commands require a delay after
 560	 * issuing the command.
 561	 */
 562	if (idata->ic.postsleep_min_us)
 563		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 564
 565	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
 566
 567	if (is_rpmb) {
 568		/*
 569		 * Ensure RPMB command has completed by polling CMD13
 570		 * "Send Status".
 571		 */
 572		err = ioctl_rpmb_card_status_poll(card, &status, 5);
 573		if (err)
 574			dev_err(mmc_dev(card->host),
 575					"%s: Card Status=0x%08X, error %d\n",
 576					__func__, status, err);
 577	}
 578
 579	return err;
 580}
 581
 582static int mmc_blk_ioctl_cmd(struct block_device *bdev,
 583			     struct mmc_ioc_cmd __user *ic_ptr)
 584{
 585	struct mmc_blk_ioc_data *idata;
 586	struct mmc_blk_data *md;
 587	struct mmc_card *card;
 588	int err = 0, ioc_err = 0;
 589
 590	/*
 591	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
 592	 * whole block device, not on a partition.  This prevents overspray
 593	 * between sibling partitions.
 594	 */
 595	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
 596		return -EPERM;
 597
 598	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
 599	if (IS_ERR(idata))
 600		return PTR_ERR(idata);
 601
 602	md = mmc_blk_get(bdev->bd_disk);
 603	if (!md) {
 604		err = -EINVAL;
 605		goto cmd_err;
 606	}
 607
 608	card = md->queue.card;
 609	if (IS_ERR(card)) {
 610		err = PTR_ERR(card);
 611		goto cmd_done;
 612	}
 613
 614	mmc_get_card(card);
 615
 616	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
 617
 618	mmc_put_card(card);
 619
 620	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
 621
 622cmd_done:
 623	mmc_blk_put(md);
 624cmd_err:
 625	kfree(idata->buf);
 626	kfree(idata);
 627	return ioc_err ? ioc_err : err;
 628}
 629
 630static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
 631				   struct mmc_ioc_multi_cmd __user *user)
 632{
 633	struct mmc_blk_ioc_data **idata = NULL;
 634	struct mmc_ioc_cmd __user *cmds = user->cmds;
 635	struct mmc_card *card;
 636	struct mmc_blk_data *md;
 637	int i, err = 0, ioc_err = 0;
 638	__u64 num_of_cmds;
 639
 640	/*
 641	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
 642	 * whole block device, not on a partition.  This prevents overspray
 643	 * between sibling partitions.
 644	 */
 645	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
 646		return -EPERM;
 647
 648	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
 649			   sizeof(num_of_cmds)))
 650		return -EFAULT;
 651
 652	if (num_of_cmds > MMC_IOC_MAX_CMDS)
 653		return -EINVAL;
 654
 655	idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
 656	if (!idata)
 657		return -ENOMEM;
 658
 659	for (i = 0; i < num_of_cmds; i++) {
 660		idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
 661		if (IS_ERR(idata[i])) {
 662			err = PTR_ERR(idata[i]);
 663			num_of_cmds = i;
 664			goto cmd_err;
 665		}
 666	}
 667
 668	md = mmc_blk_get(bdev->bd_disk);
 669	if (!md) {
 670		err = -EINVAL;
 671		goto cmd_err;
 672	}
 673
 674	card = md->queue.card;
 675	if (IS_ERR(card)) {
 676		err = PTR_ERR(card);
 677		goto cmd_done;
 678	}
 679
 680	mmc_get_card(card);
 681
 682	for (i = 0; i < num_of_cmds && !ioc_err; i++)
 683		ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
 684
 685	mmc_put_card(card);
 686
 687	/* copy to user if data and response */
 688	for (i = 0; i < num_of_cmds && !err; i++)
 689		err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
 690
 691cmd_done:
 692	mmc_blk_put(md);
 693cmd_err:
 694	for (i = 0; i < num_of_cmds; i++) {
 695		kfree(idata[i]->buf);
 696		kfree(idata[i]);
 697	}
 698	kfree(idata);
 699	return ioc_err ? ioc_err : err;
 700}
 701
 702static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
 703	unsigned int cmd, unsigned long arg)
 704{
 705	switch (cmd) {
 706	case MMC_IOC_CMD:
 707		return mmc_blk_ioctl_cmd(bdev,
 708				(struct mmc_ioc_cmd __user *)arg);
 709	case MMC_IOC_MULTI_CMD:
 710		return mmc_blk_ioctl_multi_cmd(bdev,
 711				(struct mmc_ioc_multi_cmd __user *)arg);
 712	default:
 713		return -EINVAL;
 714	}
 715}
 716
 717#ifdef CONFIG_COMPAT
 718static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
 719	unsigned int cmd, unsigned long arg)
 720{
 721	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
 722}
 723#endif
 724
 725static const struct block_device_operations mmc_bdops = {
 726	.open			= mmc_blk_open,
 727	.release		= mmc_blk_release,
 728	.getgeo			= mmc_blk_getgeo,
 729	.owner			= THIS_MODULE,
 730	.ioctl			= mmc_blk_ioctl,
 731#ifdef CONFIG_COMPAT
 732	.compat_ioctl		= mmc_blk_compat_ioctl,
 733#endif
 734};
 735
 736static inline int mmc_blk_part_switch(struct mmc_card *card,
 737				      struct mmc_blk_data *md)
 738{
 739	int ret;
 740	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
 741
 742	if (main_md->part_curr == md->part_type)
 743		return 0;
 744
 745	if (mmc_card_mmc(card)) {
 746		u8 part_config = card->ext_csd.part_config;
 747
 748		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 749		part_config |= md->part_type;
 750
 751		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 752				 EXT_CSD_PART_CONFIG, part_config,
 753				 card->ext_csd.part_time);
 754		if (ret)
 755			return ret;
 756
 757		card->ext_csd.part_config = part_config;
 758	}
 759
 760	main_md->part_curr = md->part_type;
 761	return 0;
 762}
 763
 764static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 765{
 766	int err;
 767	u32 result;
 768	__be32 *blocks;
 769
 770	struct mmc_request mrq = {NULL};
 771	struct mmc_command cmd = {0};
 772	struct mmc_data data = {0};
 773
 774	struct scatterlist sg;
 775
 776	cmd.opcode = MMC_APP_CMD;
 777	cmd.arg = card->rca << 16;
 778	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 779
 780	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 781	if (err)
 782		return (u32)-1;
 783	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
 784		return (u32)-1;
 785
 786	memset(&cmd, 0, sizeof(struct mmc_command));
 787
 788	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
 789	cmd.arg = 0;
 790	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 791
 792	data.blksz = 4;
 793	data.blocks = 1;
 794	data.flags = MMC_DATA_READ;
 795	data.sg = &sg;
 796	data.sg_len = 1;
 797	mmc_set_data_timeout(&data, card);
 798
 799	mrq.cmd = &cmd;
 800	mrq.data = &data;
 801
 802	blocks = kmalloc(4, GFP_KERNEL);
 803	if (!blocks)
 804		return (u32)-1;
 805
 806	sg_init_one(&sg, blocks, 4);
 807
 808	mmc_wait_for_req(card->host, &mrq);
 809
 810	result = ntohl(*blocks);
 811	kfree(blocks);
 812
 813	if (cmd.error || data.error)
 814		result = (u32)-1;
 815
 816	return result;
 817}
 818
 819static int get_card_status(struct mmc_card *card, u32 *status, int retries)
 820{
 821	struct mmc_command cmd = {0};
 822	int err;
 823
 824	cmd.opcode = MMC_SEND_STATUS;
 825	if (!mmc_host_is_spi(card->host))
 826		cmd.arg = card->rca << 16;
 827	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 828	err = mmc_wait_for_cmd(card->host, &cmd, retries);
 829	if (err == 0)
 830		*status = cmd.resp[0];
 831	return err;
 832}
 833
 834static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
 835		bool hw_busy_detect, struct request *req, int *gen_err)
 836{
 837	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 838	int err = 0;
 839	u32 status;
 840
 841	do {
 842		err = get_card_status(card, &status, 5);
 843		if (err) {
 844			pr_err("%s: error %d requesting status\n",
 845			       req->rq_disk->disk_name, err);
 846			return err;
 847		}
 848
 849		if (status & R1_ERROR) {
 850			pr_err("%s: %s: error sending status cmd, status %#x\n",
 851				req->rq_disk->disk_name, __func__, status);
 852			*gen_err = 1;
 853		}
 854
 855		/* We may rely on the host hw to handle busy detection.*/
 856		if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
 857			hw_busy_detect)
 858			break;
 859
 860		/*
 861		 * Timeout if the device never becomes ready for data and never
 862		 * leaves the program state.
 863		 */
 864		if (time_after(jiffies, timeout)) {
 865			pr_err("%s: Card stuck in programming state! %s %s\n",
 866				mmc_hostname(card->host),
 867				req->rq_disk->disk_name, __func__);
 868			return -ETIMEDOUT;
 869		}
 870
 871		/*
 872		 * Some cards mishandle the status bits,
 873		 * so make sure to check both the busy
 874		 * indication and the card state.
 875		 */
 876	} while (!(status & R1_READY_FOR_DATA) ||
 877		 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
 878
 879	return err;
 880}
 881
 882static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
 883		struct request *req, int *gen_err, u32 *stop_status)
 884{
 885	struct mmc_host *host = card->host;
 886	struct mmc_command cmd = {0};
 887	int err;
 888	bool use_r1b_resp = rq_data_dir(req) == WRITE;
 889
 890	/*
 891	 * Normally we use R1B responses for WRITE, but in cases where the host
 892	 * has specified a max_busy_timeout we need to validate it. A failure
 893	 * means we need to prevent the host from doing hw busy detection, which
 894	 * is done by converting to a R1 response instead.
 895	 */
 896	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
 897		use_r1b_resp = false;
 898
 899	cmd.opcode = MMC_STOP_TRANSMISSION;
 900	if (use_r1b_resp) {
 901		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 902		cmd.busy_timeout = timeout_ms;
 903	} else {
 904		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 905	}
 906
 907	err = mmc_wait_for_cmd(host, &cmd, 5);
 908	if (err)
 909		return err;
 910
 911	*stop_status = cmd.resp[0];
 912
 913	/* No need to check card status in case of READ. */
 914	if (rq_data_dir(req) == READ)
 915		return 0;
 916
 917	if (!mmc_host_is_spi(host) &&
 918		(*stop_status & R1_ERROR)) {
 919		pr_err("%s: %s: general error sending stop command, resp %#x\n",
 920			req->rq_disk->disk_name, __func__, *stop_status);
 921		*gen_err = 1;
 922	}
 923
 924	return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
 925}
 926
 927#define ERR_NOMEDIUM	3
 928#define ERR_RETRY	2
 929#define ERR_ABORT	1
 930#define ERR_CONTINUE	0
 931
 932static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
 933	bool status_valid, u32 status)
 934{
 935	switch (error) {
 936	case -EILSEQ:
 937		/* response crc error, retry the r/w cmd */
 938		pr_err("%s: %s sending %s command, card status %#x\n",
 939			req->rq_disk->disk_name, "response CRC error",
 940			name, status);
 941		return ERR_RETRY;
 942
 943	case -ETIMEDOUT:
 944		pr_err("%s: %s sending %s command, card status %#x\n",
 945			req->rq_disk->disk_name, "timed out", name, status);
 946
 947		/* If the status cmd initially failed, retry the r/w cmd */
 948		if (!status_valid)
 949			return ERR_RETRY;
 950
 951		/*
 952		 * If it was a r/w cmd crc error, or illegal command
 953		 * (eg, issued in wrong state) then retry - we should
 954		 * have corrected the state problem above.
 955		 */
 956		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
 957			return ERR_RETRY;
 958
 959		/* Otherwise abort the command */
 960		return ERR_ABORT;
 961
 962	default:
 963		/* We don't understand the error code the driver gave us */
 964		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
 965		       req->rq_disk->disk_name, error, status);
 966		return ERR_ABORT;
 967	}
 968}
 969
 970/*
 971 * Initial r/w and stop cmd error recovery.
 972 * We don't know whether the card received the r/w cmd or not, so try to
 973 * restore things back to a sane state.  Essentially, we do this as follows:
 974 * - Obtain card status.  If the first attempt to obtain card status fails,
 975 *   the status word will reflect the failed status cmd, not the failed
 976 *   r/w cmd.  If we fail to obtain card status, it suggests we can no
 977 *   longer communicate with the card.
 978 * - Check the card state.  If the card received the cmd but there was a
 979 *   transient problem with the response, it might still be in a data transfer
 980 *   mode.  Try to send it a stop command.  If this fails, we can't recover.
 981 * - If the r/w cmd failed due to a response CRC error, it was probably
 982 *   transient, so retry the cmd.
 983 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
 984 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
 985 *   illegal cmd, retry.
 986 * Otherwise we don't understand what happened, so abort.
 987 */
 988static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
 989	struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
 990{
 991	bool prev_cmd_status_valid = true;
 992	u32 status, stop_status = 0;
 993	int err, retry;
 994
 995	if (mmc_card_removed(card))
 996		return ERR_NOMEDIUM;
 997
 998	/*
 999	 * Try to get card status which indicates both the card state
1000	 * and why there was no response.  If the first attempt fails,
1001	 * we can't be sure the returned status is for the r/w command.
1002	 */
1003	for (retry = 2; retry >= 0; retry--) {
1004		err = get_card_status(card, &status, 0);
1005		if (!err)
1006			break;
1007
1008		/* Re-tune if needed */
1009		mmc_retune_recheck(card->host);
1010
1011		prev_cmd_status_valid = false;
1012		pr_err("%s: error %d sending status command, %sing\n",
1013		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1014	}
1015
1016	/* We couldn't get a response from the card.  Give up. */
1017	if (err) {
1018		/* Check if the card is removed */
1019		if (mmc_detect_card_removed(card->host))
1020			return ERR_NOMEDIUM;
1021		return ERR_ABORT;
1022	}
1023
1024	/* Flag ECC errors */
1025	if ((status & R1_CARD_ECC_FAILED) ||
1026	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1027	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1028		*ecc_err = 1;
1029
1030	/* Flag General errors */
1031	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1032		if ((status & R1_ERROR) ||
1033			(brq->stop.resp[0] & R1_ERROR)) {
1034			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1035			       req->rq_disk->disk_name, __func__,
1036			       brq->stop.resp[0], status);
1037			*gen_err = 1;
1038		}
1039
1040	/*
1041	 * Check the current card state.  If it is in some data transfer
1042	 * mode, tell it to stop (and hopefully transition back to TRAN.)
1043	 */
1044	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1045	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1046		err = send_stop(card,
1047			DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1048			req, gen_err, &stop_status);
1049		if (err) {
1050			pr_err("%s: error %d sending stop command\n",
1051			       req->rq_disk->disk_name, err);
1052			/*
1053			 * If the stop cmd also timed out, the card is probably
1054			 * not present, so abort. Other errors are bad news too.
1055			 */
1056			return ERR_ABORT;
1057		}
1058
1059		if (stop_status & R1_CARD_ECC_FAILED)
1060			*ecc_err = 1;
1061	}
1062
1063	/* Check for set block count errors */
1064	if (brq->sbc.error)
1065		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1066				prev_cmd_status_valid, status);
1067
1068	/* Check for r/w command errors */
1069	if (brq->cmd.error)
1070		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1071				prev_cmd_status_valid, status);
1072
1073	/* Data errors */
1074	if (!brq->stop.error)
1075		return ERR_CONTINUE;
1076
1077	/* Now for stop errors.  These aren't fatal to the transfer. */
1078	pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1079	       req->rq_disk->disk_name, brq->stop.error,
1080	       brq->cmd.resp[0], status);
1081
1082	/*
1083	 * Subsitute in our own stop status as this will give the error
1084	 * state which happened during the execution of the r/w command.
1085	 */
1086	if (stop_status) {
1087		brq->stop.resp[0] = stop_status;
1088		brq->stop.error = 0;
1089	}
1090	return ERR_CONTINUE;
1091}
1092
1093static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1094			 int type)
1095{
1096	int err;
1097
1098	if (md->reset_done & type)
1099		return -EEXIST;
1100
1101	md->reset_done |= type;
1102	err = mmc_hw_reset(host);
1103	/* Ensure we switch back to the correct partition */
1104	if (err != -EOPNOTSUPP) {
1105		struct mmc_blk_data *main_md =
1106			dev_get_drvdata(&host->card->dev);
1107		int part_err;
1108
1109		main_md->part_curr = main_md->part_type;
1110		part_err = mmc_blk_part_switch(host->card, md);
1111		if (part_err) {
1112			/*
1113			 * We have failed to get back into the correct
1114			 * partition, so we need to abort the whole request.
1115			 */
1116			return -ENODEV;
1117		}
1118	}
1119	return err;
1120}
1121
1122static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1123{
1124	md->reset_done &= ~type;
1125}
1126
1127int mmc_access_rpmb(struct mmc_queue *mq)
1128{
1129	struct mmc_blk_data *md = mq->data;
1130	/*
1131	 * If this is a RPMB partition access, return ture
1132	 */
1133	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1134		return true;
1135
1136	return false;
1137}
1138
1139static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1140{
1141	struct mmc_blk_data *md = mq->data;
1142	struct mmc_card *card = md->queue.card;
1143	unsigned int from, nr, arg;
1144	int err = 0, type = MMC_BLK_DISCARD;
1145
1146	if (!mmc_can_erase(card)) {
1147		err = -EOPNOTSUPP;
1148		goto out;
1149	}
1150
1151	from = blk_rq_pos(req);
1152	nr = blk_rq_sectors(req);
1153
1154	if (mmc_can_discard(card))
1155		arg = MMC_DISCARD_ARG;
1156	else if (mmc_can_trim(card))
1157		arg = MMC_TRIM_ARG;
1158	else
1159		arg = MMC_ERASE_ARG;
1160retry:
1161	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1162		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1163				 INAND_CMD38_ARG_EXT_CSD,
1164				 arg == MMC_TRIM_ARG ?
1165				 INAND_CMD38_ARG_TRIM :
1166				 INAND_CMD38_ARG_ERASE,
1167				 0);
1168		if (err)
1169			goto out;
1170	}
1171	err = mmc_erase(card, from, nr, arg);
1172out:
1173	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1174		goto retry;
1175	if (!err)
1176		mmc_blk_reset_success(md, type);
1177	blk_end_request(req, err, blk_rq_bytes(req));
1178
1179	return err ? 0 : 1;
1180}
1181
1182static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1183				       struct request *req)
1184{
1185	struct mmc_blk_data *md = mq->data;
1186	struct mmc_card *card = md->queue.card;
1187	unsigned int from, nr, arg;
1188	int err = 0, type = MMC_BLK_SECDISCARD;
1189
1190	if (!(mmc_can_secure_erase_trim(card))) {
1191		err = -EOPNOTSUPP;
1192		goto out;
1193	}
1194
1195	from = blk_rq_pos(req);
1196	nr = blk_rq_sectors(req);
1197
1198	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1199		arg = MMC_SECURE_TRIM1_ARG;
1200	else
1201		arg = MMC_SECURE_ERASE_ARG;
1202
1203retry:
1204	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1205		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1206				 INAND_CMD38_ARG_EXT_CSD,
1207				 arg == MMC_SECURE_TRIM1_ARG ?
1208				 INAND_CMD38_ARG_SECTRIM1 :
1209				 INAND_CMD38_ARG_SECERASE,
1210				 0);
1211		if (err)
1212			goto out_retry;
1213	}
1214
1215	err = mmc_erase(card, from, nr, arg);
1216	if (err == -EIO)
1217		goto out_retry;
1218	if (err)
1219		goto out;
1220
1221	if (arg == MMC_SECURE_TRIM1_ARG) {
1222		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1223			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1224					 INAND_CMD38_ARG_EXT_CSD,
1225					 INAND_CMD38_ARG_SECTRIM2,
1226					 0);
1227			if (err)
1228				goto out_retry;
1229		}
1230
1231		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1232		if (err == -EIO)
1233			goto out_retry;
1234		if (err)
1235			goto out;
1236	}
1237
1238out_retry:
1239	if (err && !mmc_blk_reset(md, card->host, type))
1240		goto retry;
1241	if (!err)
1242		mmc_blk_reset_success(md, type);
1243out:
1244	blk_end_request(req, err, blk_rq_bytes(req));
1245
1246	return err ? 0 : 1;
1247}
1248
1249static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1250{
1251	struct mmc_blk_data *md = mq->data;
1252	struct mmc_card *card = md->queue.card;
1253	int ret = 0;
1254
1255	ret = mmc_flush_cache(card);
1256	if (ret)
1257		ret = -EIO;
1258
1259	blk_end_request_all(req, ret);
1260
1261	return ret ? 0 : 1;
1262}
1263
1264/*
1265 * Reformat current write as a reliable write, supporting
1266 * both legacy and the enhanced reliable write MMC cards.
1267 * In each transfer we'll handle only as much as a single
1268 * reliable write can handle, thus finish the request in
1269 * partial completions.
1270 */
1271static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1272				    struct mmc_card *card,
1273				    struct request *req)
1274{
1275	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1276		/* Legacy mode imposes restrictions on transfers. */
1277		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1278			brq->data.blocks = 1;
1279
1280		if (brq->data.blocks > card->ext_csd.rel_sectors)
1281			brq->data.blocks = card->ext_csd.rel_sectors;
1282		else if (brq->data.blocks < card->ext_csd.rel_sectors)
1283			brq->data.blocks = 1;
1284	}
1285}
1286
1287#define CMD_ERRORS							\
1288	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
1289	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
1290	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
1291	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
1292	 R1_CC_ERROR |		/* Card controller error */		\
1293	 R1_ERROR)		/* General/unknown error */
1294
1295static int mmc_blk_err_check(struct mmc_card *card,
1296			     struct mmc_async_req *areq)
1297{
1298	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1299						    mmc_active);
1300	struct mmc_blk_request *brq = &mq_mrq->brq;
1301	struct request *req = mq_mrq->req;
1302	int need_retune = card->host->need_retune;
1303	int ecc_err = 0, gen_err = 0;
1304
1305	/*
1306	 * sbc.error indicates a problem with the set block count
1307	 * command.  No data will have been transferred.
1308	 *
1309	 * cmd.error indicates a problem with the r/w command.  No
1310	 * data will have been transferred.
1311	 *
1312	 * stop.error indicates a problem with the stop command.  Data
1313	 * may have been transferred, or may still be transferring.
1314	 */
1315	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1316	    brq->data.error) {
1317		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1318		case ERR_RETRY:
1319			return MMC_BLK_RETRY;
1320		case ERR_ABORT:
1321			return MMC_BLK_ABORT;
1322		case ERR_NOMEDIUM:
1323			return MMC_BLK_NOMEDIUM;
1324		case ERR_CONTINUE:
1325			break;
1326		}
1327	}
1328
1329	/*
1330	 * Check for errors relating to the execution of the
1331	 * initial command - such as address errors.  No data
1332	 * has been transferred.
1333	 */
1334	if (brq->cmd.resp[0] & CMD_ERRORS) {
1335		pr_err("%s: r/w command failed, status = %#x\n",
1336		       req->rq_disk->disk_name, brq->cmd.resp[0]);
1337		return MMC_BLK_ABORT;
1338	}
1339
1340	/*
1341	 * Everything else is either success, or a data error of some
1342	 * kind.  If it was a write, we may have transitioned to
1343	 * program mode, which we have to wait for it to complete.
1344	 */
1345	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1346		int err;
1347
1348		/* Check stop command response */
1349		if (brq->stop.resp[0] & R1_ERROR) {
1350			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1351			       req->rq_disk->disk_name, __func__,
1352			       brq->stop.resp[0]);
1353			gen_err = 1;
1354		}
1355
1356		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1357					&gen_err);
1358		if (err)
1359			return MMC_BLK_CMD_ERR;
1360	}
1361
1362	/* if general error occurs, retry the write operation. */
1363	if (gen_err) {
1364		pr_warn("%s: retrying write for general error\n",
1365				req->rq_disk->disk_name);
1366		return MMC_BLK_RETRY;
1367	}
1368
1369	if (brq->data.error) {
1370		if (need_retune && !brq->retune_retry_done) {
1371			pr_debug("%s: retrying because a re-tune was needed\n",
1372				 req->rq_disk->disk_name);
1373			brq->retune_retry_done = 1;
1374			return MMC_BLK_RETRY;
1375		}
1376		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1377		       req->rq_disk->disk_name, brq->data.error,
1378		       (unsigned)blk_rq_pos(req),
1379		       (unsigned)blk_rq_sectors(req),
1380		       brq->cmd.resp[0], brq->stop.resp[0]);
1381
1382		if (rq_data_dir(req) == READ) {
1383			if (ecc_err)
1384				return MMC_BLK_ECC_ERR;
1385			return MMC_BLK_DATA_ERR;
1386		} else {
1387			return MMC_BLK_CMD_ERR;
1388		}
1389	}
1390
1391	if (!brq->data.bytes_xfered)
1392		return MMC_BLK_RETRY;
1393
1394	if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1395		if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1396			return MMC_BLK_PARTIAL;
1397		else
1398			return MMC_BLK_SUCCESS;
1399	}
1400
1401	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1402		return MMC_BLK_PARTIAL;
1403
1404	return MMC_BLK_SUCCESS;
1405}
1406
1407static int mmc_blk_packed_err_check(struct mmc_card *card,
1408				    struct mmc_async_req *areq)
1409{
1410	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1411			mmc_active);
1412	struct request *req = mq_rq->req;
1413	struct mmc_packed *packed = mq_rq->packed;
1414	int err, check, status;
1415	u8 *ext_csd;
1416
1417	BUG_ON(!packed);
1418
1419	packed->retries--;
1420	check = mmc_blk_err_check(card, areq);
1421	err = get_card_status(card, &status, 0);
1422	if (err) {
1423		pr_err("%s: error %d sending status command\n",
1424		       req->rq_disk->disk_name, err);
1425		return MMC_BLK_ABORT;
1426	}
1427
1428	if (status & R1_EXCEPTION_EVENT) {
1429		err = mmc_get_ext_csd(card, &ext_csd);
1430		if (err) {
1431			pr_err("%s: error %d sending ext_csd\n",
1432			       req->rq_disk->disk_name, err);
1433			return MMC_BLK_ABORT;
1434		}
1435
1436		if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1437		     EXT_CSD_PACKED_FAILURE) &&
1438		    (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1439		     EXT_CSD_PACKED_GENERIC_ERROR)) {
1440			if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1441			    EXT_CSD_PACKED_INDEXED_ERROR) {
1442				packed->idx_failure =
1443				  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1444				check = MMC_BLK_PARTIAL;
1445			}
1446			pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1447			       "failure index: %d\n",
1448			       req->rq_disk->disk_name, packed->nr_entries,
1449			       packed->blocks, packed->idx_failure);
1450		}
1451		kfree(ext_csd);
1452	}
1453
1454	return check;
1455}
1456
1457static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1458			       struct mmc_card *card,
1459			       int disable_multi,
1460			       struct mmc_queue *mq)
1461{
1462	u32 readcmd, writecmd;
1463	struct mmc_blk_request *brq = &mqrq->brq;
1464	struct request *req = mqrq->req;
1465	struct mmc_blk_data *md = mq->data;
1466	bool do_data_tag;
1467
1468	/*
1469	 * Reliable writes are used to implement Forced Unit Access and
1470	 * are supported only on MMCs.
1471	 */
1472	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1473		(rq_data_dir(req) == WRITE) &&
1474		(md->flags & MMC_BLK_REL_WR);
1475
1476	memset(brq, 0, sizeof(struct mmc_blk_request));
1477	brq->mrq.cmd = &brq->cmd;
1478	brq->mrq.data = &brq->data;
1479
1480	brq->cmd.arg = blk_rq_pos(req);
1481	if (!mmc_card_blockaddr(card))
1482		brq->cmd.arg <<= 9;
1483	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1484	brq->data.blksz = 512;
1485	brq->stop.opcode = MMC_STOP_TRANSMISSION;
1486	brq->stop.arg = 0;
1487	brq->data.blocks = blk_rq_sectors(req);
1488
1489	/*
1490	 * The block layer doesn't support all sector count
1491	 * restrictions, so we need to be prepared for too big
1492	 * requests.
1493	 */
1494	if (brq->data.blocks > card->host->max_blk_count)
1495		brq->data.blocks = card->host->max_blk_count;
1496
1497	if (brq->data.blocks > 1) {
1498		/*
1499		 * After a read error, we redo the request one sector
1500		 * at a time in order to accurately determine which
1501		 * sectors can be read successfully.
1502		 */
1503		if (disable_multi)
1504			brq->data.blocks = 1;
1505
1506		/*
1507		 * Some controllers have HW issues while operating
1508		 * in multiple I/O mode
1509		 */
1510		if (card->host->ops->multi_io_quirk)
1511			brq->data.blocks = card->host->ops->multi_io_quirk(card,
1512						(rq_data_dir(req) == READ) ?
1513						MMC_DATA_READ : MMC_DATA_WRITE,
1514						brq->data.blocks);
1515	}
1516
1517	if (brq->data.blocks > 1 || do_rel_wr) {
1518		/* SPI multiblock writes terminate using a special
1519		 * token, not a STOP_TRANSMISSION request.
1520		 */
1521		if (!mmc_host_is_spi(card->host) ||
1522		    rq_data_dir(req) == READ)
1523			brq->mrq.stop = &brq->stop;
1524		readcmd = MMC_READ_MULTIPLE_BLOCK;
1525		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1526	} else {
1527		brq->mrq.stop = NULL;
1528		readcmd = MMC_READ_SINGLE_BLOCK;
1529		writecmd = MMC_WRITE_BLOCK;
1530	}
1531	if (rq_data_dir(req) == READ) {
1532		brq->cmd.opcode = readcmd;
1533		brq->data.flags = MMC_DATA_READ;
1534		if (brq->mrq.stop)
1535			brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1536					MMC_CMD_AC;
1537	} else {
1538		brq->cmd.opcode = writecmd;
1539		brq->data.flags = MMC_DATA_WRITE;
1540		if (brq->mrq.stop)
1541			brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1542					MMC_CMD_AC;
1543	}
1544
1545	if (do_rel_wr)
1546		mmc_apply_rel_rw(brq, card, req);
1547
1548	/*
1549	 * Data tag is used only during writing meta data to speed
1550	 * up write and any subsequent read of this meta data
1551	 */
1552	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1553		(req->cmd_flags & REQ_META) &&
1554		(rq_data_dir(req) == WRITE) &&
1555		((brq->data.blocks * brq->data.blksz) >=
1556		 card->ext_csd.data_tag_unit_size);
1557
1558	/*
1559	 * Pre-defined multi-block transfers are preferable to
1560	 * open ended-ones (and necessary for reliable writes).
1561	 * However, it is not sufficient to just send CMD23,
1562	 * and avoid the final CMD12, as on an error condition
1563	 * CMD12 (stop) needs to be sent anyway. This, coupled
1564	 * with Auto-CMD23 enhancements provided by some
1565	 * hosts, means that the complexity of dealing
1566	 * with this is best left to the host. If CMD23 is
1567	 * supported by card and host, we'll fill sbc in and let
1568	 * the host deal with handling it correctly. This means
1569	 * that for hosts that don't expose MMC_CAP_CMD23, no
1570	 * change of behavior will be observed.
1571	 *
1572	 * N.B: Some MMC cards experience perf degradation.
1573	 * We'll avoid using CMD23-bounded multiblock writes for
1574	 * these, while retaining features like reliable writes.
1575	 */
1576	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1577	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1578	     do_data_tag)) {
1579		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1580		brq->sbc.arg = brq->data.blocks |
1581			(do_rel_wr ? (1 << 31) : 0) |
1582			(do_data_tag ? (1 << 29) : 0);
1583		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1584		brq->mrq.sbc = &brq->sbc;
1585	}
1586
1587	mmc_set_data_timeout(&brq->data, card);
1588
1589	brq->data.sg = mqrq->sg;
1590	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1591
1592	/*
1593	 * Adjust the sg list so it is the same size as the
1594	 * request.
1595	 */
1596	if (brq->data.blocks != blk_rq_sectors(req)) {
1597		int i, data_size = brq->data.blocks << 9;
1598		struct scatterlist *sg;
1599
1600		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1601			data_size -= sg->length;
1602			if (data_size <= 0) {
1603				sg->length += data_size;
1604				i++;
1605				break;
1606			}
1607		}
1608		brq->data.sg_len = i;
1609	}
1610
1611	mqrq->mmc_active.mrq = &brq->mrq;
1612	mqrq->mmc_active.err_check = mmc_blk_err_check;
1613
1614	mmc_queue_bounce_pre(mqrq);
1615}
1616
1617static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1618					  struct mmc_card *card)
1619{
1620	unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1621	unsigned int max_seg_sz = queue_max_segment_size(q);
1622	unsigned int len, nr_segs = 0;
1623
1624	do {
1625		len = min(hdr_sz, max_seg_sz);
1626		hdr_sz -= len;
1627		nr_segs++;
1628	} while (hdr_sz);
1629
1630	return nr_segs;
1631}
1632
1633static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1634{
1635	struct request_queue *q = mq->queue;
1636	struct mmc_card *card = mq->card;
1637	struct request *cur = req, *next = NULL;
1638	struct mmc_blk_data *md = mq->data;
1639	struct mmc_queue_req *mqrq = mq->mqrq_cur;
1640	bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1641	unsigned int req_sectors = 0, phys_segments = 0;
1642	unsigned int max_blk_count, max_phys_segs;
1643	bool put_back = true;
1644	u8 max_packed_rw = 0;
1645	u8 reqs = 0;
1646
1647	if (!(md->flags & MMC_BLK_PACKED_CMD))
1648		goto no_packed;
1649
1650	if ((rq_data_dir(cur) == WRITE) &&
1651	    mmc_host_packed_wr(card->host))
1652		max_packed_rw = card->ext_csd.max_packed_writes;
1653
1654	if (max_packed_rw == 0)
1655		goto no_packed;
1656
1657	if (mmc_req_rel_wr(cur) &&
1658	    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1659		goto no_packed;
1660
1661	if (mmc_large_sector(card) &&
1662	    !IS_ALIGNED(blk_rq_sectors(cur), 8))
1663		goto no_packed;
1664
1665	mmc_blk_clear_packed(mqrq);
1666
1667	max_blk_count = min(card->host->max_blk_count,
1668			    card->host->max_req_size >> 9);
1669	if (unlikely(max_blk_count > 0xffff))
1670		max_blk_count = 0xffff;
1671
1672	max_phys_segs = queue_max_segments(q);
1673	req_sectors += blk_rq_sectors(cur);
1674	phys_segments += cur->nr_phys_segments;
1675
1676	if (rq_data_dir(cur) == WRITE) {
1677		req_sectors += mmc_large_sector(card) ? 8 : 1;
1678		phys_segments += mmc_calc_packed_hdr_segs(q, card);
1679	}
1680
1681	do {
1682		if (reqs >= max_packed_rw - 1) {
1683			put_back = false;
1684			break;
1685		}
1686
1687		spin_lock_irq(q->queue_lock);
1688		next = blk_fetch_request(q);
1689		spin_unlock_irq(q->queue_lock);
1690		if (!next) {
1691			put_back = false;
1692			break;
1693		}
1694
1695		if (mmc_large_sector(card) &&
1696		    !IS_ALIGNED(blk_rq_sectors(next), 8))
1697			break;
1698
1699		if (next->cmd_flags & REQ_DISCARD ||
1700		    next->cmd_flags & REQ_FLUSH)
1701			break;
1702
1703		if (rq_data_dir(cur) != rq_data_dir(next))
1704			break;
1705
1706		if (mmc_req_rel_wr(next) &&
1707		    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1708			break;
1709
1710		req_sectors += blk_rq_sectors(next);
1711		if (req_sectors > max_blk_count)
1712			break;
1713
1714		phys_segments +=  next->nr_phys_segments;
1715		if (phys_segments > max_phys_segs)
1716			break;
1717
1718		list_add_tail(&next->queuelist, &mqrq->packed->list);
1719		cur = next;
1720		reqs++;
1721	} while (1);
1722
1723	if (put_back) {
1724		spin_lock_irq(q->queue_lock);
1725		blk_requeue_request(q, next);
1726		spin_unlock_irq(q->queue_lock);
1727	}
1728
1729	if (reqs > 0) {
1730		list_add(&req->queuelist, &mqrq->packed->list);
1731		mqrq->packed->nr_entries = ++reqs;
1732		mqrq->packed->retries = reqs;
1733		return reqs;
1734	}
1735
1736no_packed:
1737	mqrq->cmd_type = MMC_PACKED_NONE;
1738	return 0;
1739}
1740
1741static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1742					struct mmc_card *card,
1743					struct mmc_queue *mq)
1744{
1745	struct mmc_blk_request *brq = &mqrq->brq;
1746	struct request *req = mqrq->req;
1747	struct request *prq;
1748	struct mmc_blk_data *md = mq->data;
1749	struct mmc_packed *packed = mqrq->packed;
1750	bool do_rel_wr, do_data_tag;
1751	u32 *packed_cmd_hdr;
1752	u8 hdr_blocks;
1753	u8 i = 1;
1754
1755	BUG_ON(!packed);
1756
1757	mqrq->cmd_type = MMC_PACKED_WRITE;
1758	packed->blocks = 0;
1759	packed->idx_failure = MMC_PACKED_NR_IDX;
1760
1761	packed_cmd_hdr = packed->cmd_hdr;
1762	memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1763	packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1764		(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1765	hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1766
1767	/*
1768	 * Argument for each entry of packed group
1769	 */
1770	list_for_each_entry(prq, &packed->list, queuelist) {
1771		do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1772		do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1773			(prq->cmd_flags & REQ_META) &&
1774			(rq_data_dir(prq) == WRITE) &&
1775			((brq->data.blocks * brq->data.blksz) >=
1776			 card->ext_csd.data_tag_unit_size);
1777		/* Argument of CMD23 */
1778		packed_cmd_hdr[(i * 2)] =
1779			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1780			(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1781			blk_rq_sectors(prq);
1782		/* Argument of CMD18 or CMD25 */
1783		packed_cmd_hdr[((i * 2)) + 1] =
1784			mmc_card_blockaddr(card) ?
1785			blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1786		packed->blocks += blk_rq_sectors(prq);
1787		i++;
1788	}
1789
1790	memset(brq, 0, sizeof(struct mmc_blk_request));
1791	brq->mrq.cmd = &brq->cmd;
1792	brq->mrq.data = &brq->data;
1793	brq->mrq.sbc = &brq->sbc;
1794	brq->mrq.stop = &brq->stop;
1795
1796	brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1797	brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1798	brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1799
1800	brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1801	brq->cmd.arg = blk_rq_pos(req);
1802	if (!mmc_card_blockaddr(card))
1803		brq->cmd.arg <<= 9;
1804	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1805
1806	brq->data.blksz = 512;
1807	brq->data.blocks = packed->blocks + hdr_blocks;
1808	brq->data.flags = MMC_DATA_WRITE;
1809
1810	brq->stop.opcode = MMC_STOP_TRANSMISSION;
1811	brq->stop.arg = 0;
1812	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1813
1814	mmc_set_data_timeout(&brq->data, card);
1815
1816	brq->data.sg = mqrq->sg;
1817	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1818
1819	mqrq->mmc_active.mrq = &brq->mrq;
1820	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1821
1822	mmc_queue_bounce_pre(mqrq);
1823}
1824
1825static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1826			   struct mmc_blk_request *brq, struct request *req,
1827			   int ret)
1828{
1829	struct mmc_queue_req *mq_rq;
1830	mq_rq = container_of(brq, struct mmc_queue_req, brq);
1831
1832	/*
1833	 * If this is an SD card and we're writing, we can first
1834	 * mark the known good sectors as ok.
1835	 *
1836	 * If the card is not SD, we can still ok written sectors
1837	 * as reported by the controller (which might be less than
1838	 * the real number of written sectors, but never more).
1839	 */
1840	if (mmc_card_sd(card)) {
1841		u32 blocks;
1842
1843		blocks = mmc_sd_num_wr_blocks(card);
1844		if (blocks != (u32)-1) {
1845			ret = blk_end_request(req, 0, blocks << 9);
1846		}
1847	} else {
1848		if (!mmc_packed_cmd(mq_rq->cmd_type))
1849			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1850	}
1851	return ret;
1852}
1853
1854static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1855{
1856	struct request *prq;
1857	struct mmc_packed *packed = mq_rq->packed;
1858	int idx = packed->idx_failure, i = 0;
1859	int ret = 0;
1860
1861	BUG_ON(!packed);
1862
1863	while (!list_empty(&packed->list)) {
1864		prq = list_entry_rq(packed->list.next);
1865		if (idx == i) {
1866			/* retry from error index */
1867			packed->nr_entries -= idx;
1868			mq_rq->req = prq;
1869			ret = 1;
1870
1871			if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1872				list_del_init(&prq->queuelist);
1873				mmc_blk_clear_packed(mq_rq);
1874			}
1875			return ret;
1876		}
1877		list_del_init(&prq->queuelist);
1878		blk_end_request(prq, 0, blk_rq_bytes(prq));
1879		i++;
1880	}
1881
1882	mmc_blk_clear_packed(mq_rq);
1883	return ret;
1884}
1885
1886static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1887{
1888	struct request *prq;
1889	struct mmc_packed *packed = mq_rq->packed;
1890
1891	BUG_ON(!packed);
1892
1893	while (!list_empty(&packed->list)) {
1894		prq = list_entry_rq(packed->list.next);
1895		list_del_init(&prq->queuelist);
1896		blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1897	}
1898
1899	mmc_blk_clear_packed(mq_rq);
1900}
1901
1902static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1903				      struct mmc_queue_req *mq_rq)
1904{
1905	struct request *prq;
1906	struct request_queue *q = mq->queue;
1907	struct mmc_packed *packed = mq_rq->packed;
1908
1909	BUG_ON(!packed);
1910
1911	while (!list_empty(&packed->list)) {
1912		prq = list_entry_rq(packed->list.prev);
1913		if (prq->queuelist.prev != &packed->list) {
1914			list_del_init(&prq->queuelist);
1915			spin_lock_irq(q->queue_lock);
1916			blk_requeue_request(mq->queue, prq);
1917			spin_unlock_irq(q->queue_lock);
1918		} else {
1919			list_del_init(&prq->queuelist);
1920		}
1921	}
1922
1923	mmc_blk_clear_packed(mq_rq);
1924}
1925
1926static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1927{
1928	struct mmc_blk_data *md = mq->data;
1929	struct mmc_card *card = md->queue.card;
1930	struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1931	int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1932	enum mmc_blk_status status;
1933	struct mmc_queue_req *mq_rq;
1934	struct request *req = rqc;
1935	struct mmc_async_req *areq;
1936	const u8 packed_nr = 2;
1937	u8 reqs = 0;
1938
1939	if (!rqc && !mq->mqrq_prev->req)
1940		return 0;
1941
1942	if (rqc)
1943		reqs = mmc_blk_prep_packed_list(mq, rqc);
1944
1945	do {
1946		if (rqc) {
1947			/*
1948			 * When 4KB native sector is enabled, only 8 blocks
1949			 * multiple read or write is allowed
1950			 */
1951			if ((brq->data.blocks & 0x07) &&
1952			    (card->ext_csd.data_sector_size == 4096)) {
1953				pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1954					req->rq_disk->disk_name);
1955				mq_rq = mq->mqrq_cur;
1956				goto cmd_abort;
1957			}
1958
1959			if (reqs >= packed_nr)
1960				mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1961							    card, mq);
1962			else
1963				mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1964			areq = &mq->mqrq_cur->mmc_active;
1965		} else
1966			areq = NULL;
1967		areq = mmc_start_req(card->host, areq, (int *) &status);
1968		if (!areq) {
1969			if (status == MMC_BLK_NEW_REQUEST)
1970				mq->flags |= MMC_QUEUE_NEW_REQUEST;
1971			return 0;
1972		}
1973
1974		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1975		brq = &mq_rq->brq;
1976		req = mq_rq->req;
1977		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1978		mmc_queue_bounce_post(mq_rq);
1979
1980		switch (status) {
1981		case MMC_BLK_SUCCESS:
1982		case MMC_BLK_PARTIAL:
1983			/*
1984			 * A block was successfully transferred.
1985			 */
1986			mmc_blk_reset_success(md, type);
1987
1988			if (mmc_packed_cmd(mq_rq->cmd_type)) {
1989				ret = mmc_blk_end_packed_req(mq_rq);
1990				break;
1991			} else {
1992				ret = blk_end_request(req, 0,
1993						brq->data.bytes_xfered);
1994			}
1995
1996			/*
1997			 * If the blk_end_request function returns non-zero even
1998			 * though all data has been transferred and no errors
1999			 * were returned by the host controller, it's a bug.
2000			 */
2001			if (status == MMC_BLK_SUCCESS && ret) {
2002				pr_err("%s BUG rq_tot %d d_xfer %d\n",
2003				       __func__, blk_rq_bytes(req),
2004				       brq->data.bytes_xfered);
2005				rqc = NULL;
2006				goto cmd_abort;
2007			}
2008			break;
2009		case MMC_BLK_CMD_ERR:
2010			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2011			if (mmc_blk_reset(md, card->host, type))
2012				goto cmd_abort;
2013			if (!ret)
2014				goto start_new_req;
2015			break;
2016		case MMC_BLK_RETRY:
2017			retune_retry_done = brq->retune_retry_done;
2018			if (retry++ < 5)
2019				break;
2020			/* Fall through */
2021		case MMC_BLK_ABORT:
2022			if (!mmc_blk_reset(md, card->host, type))
2023				break;
2024			goto cmd_abort;
2025		case MMC_BLK_DATA_ERR: {
2026			int err;
2027
2028			err = mmc_blk_reset(md, card->host, type);
2029			if (!err)
2030				break;
2031			if (err == -ENODEV ||
2032				mmc_packed_cmd(mq_rq->cmd_type))
2033				goto cmd_abort;
2034			/* Fall through */
2035		}
2036		case MMC_BLK_ECC_ERR:
2037			if (brq->data.blocks > 1) {
2038				/* Redo read one sector at a time */
2039				pr_warn("%s: retrying using single block read\n",
2040					req->rq_disk->disk_name);
2041				disable_multi = 1;
2042				break;
2043			}
2044			/*
2045			 * After an error, we redo I/O one sector at a
2046			 * time, so we only reach here after trying to
2047			 * read a single sector.
2048			 */
2049			ret = blk_end_request(req, -EIO,
2050						brq->data.blksz);
2051			if (!ret)
2052				goto start_new_req;
2053			break;
2054		case MMC_BLK_NOMEDIUM:
2055			goto cmd_abort;
2056		default:
2057			pr_err("%s: Unhandled return value (%d)",
2058					req->rq_disk->disk_name, status);
2059			goto cmd_abort;
2060		}
2061
2062		if (ret) {
2063			if (mmc_packed_cmd(mq_rq->cmd_type)) {
2064				if (!mq_rq->packed->retries)
2065					goto cmd_abort;
2066				mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2067				mmc_start_req(card->host,
2068					      &mq_rq->mmc_active, NULL);
2069			} else {
2070
2071				/*
2072				 * In case of a incomplete request
2073				 * prepare it again and resend.
2074				 */
2075				mmc_blk_rw_rq_prep(mq_rq, card,
2076						disable_multi, mq);
2077				mmc_start_req(card->host,
2078						&mq_rq->mmc_active, NULL);
2079			}
2080			mq_rq->brq.retune_retry_done = retune_retry_done;
2081		}
2082	} while (ret);
2083
2084	return 1;
2085
2086 cmd_abort:
2087	if (mmc_packed_cmd(mq_rq->cmd_type)) {
2088		mmc_blk_abort_packed_req(mq_rq);
2089	} else {
2090		if (mmc_card_removed(card))
2091			req->cmd_flags |= REQ_QUIET;
2092		while (ret)
2093			ret = blk_end_request(req, -EIO,
2094					blk_rq_cur_bytes(req));
2095	}
2096
2097 start_new_req:
2098	if (rqc) {
2099		if (mmc_card_removed(card)) {
2100			rqc->cmd_flags |= REQ_QUIET;
2101			blk_end_request_all(rqc, -EIO);
2102		} else {
2103			/*
2104			 * If current request is packed, it needs to put back.
2105			 */
2106			if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2107				mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2108
2109			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2110			mmc_start_req(card->host,
2111				      &mq->mqrq_cur->mmc_active, NULL);
2112		}
2113	}
2114
2115	return 0;
2116}
2117
2118static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2119{
2120	int ret;
2121	struct mmc_blk_data *md = mq->data;
2122	struct mmc_card *card = md->queue.card;
2123	struct mmc_host *host = card->host;
2124	unsigned long flags;
2125	unsigned int cmd_flags = req ? req->cmd_flags : 0;
2126
2127	if (req && !mq->mqrq_prev->req)
2128		/* claim host only for the first request */
2129		mmc_get_card(card);
2130
2131	ret = mmc_blk_part_switch(card, md);
2132	if (ret) {
2133		if (req) {
2134			blk_end_request_all(req, -EIO);
2135		}
2136		ret = 0;
2137		goto out;
2138	}
2139
2140	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2141	if (cmd_flags & REQ_DISCARD) {
2142		/* complete ongoing async transfer before issuing discard */
2143		if (card->host->areq)
2144			mmc_blk_issue_rw_rq(mq, NULL);
2145		if (req->cmd_flags & REQ_SECURE)
2146			ret = mmc_blk_issue_secdiscard_rq(mq, req);
2147		else
2148			ret = mmc_blk_issue_discard_rq(mq, req);
2149	} else if (cmd_flags & REQ_FLUSH) {
2150		/* complete ongoing async transfer before issuing flush */
2151		if (card->host->areq)
2152			mmc_blk_issue_rw_rq(mq, NULL);
2153		ret = mmc_blk_issue_flush(mq, req);
2154	} else {
2155		if (!req && host->areq) {
2156			spin_lock_irqsave(&host->context_info.lock, flags);
2157			host->context_info.is_waiting_last_req = true;
2158			spin_unlock_irqrestore(&host->context_info.lock, flags);
2159		}
2160		ret = mmc_blk_issue_rw_rq(mq, req);
2161	}
2162
2163out:
2164	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2165	     (cmd_flags & MMC_REQ_SPECIAL_MASK))
2166		/*
2167		 * Release host when there are no more requests
2168		 * and after special request(discard, flush) is done.
2169		 * In case sepecial request, there is no reentry to
2170		 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2171		 */
2172		mmc_put_card(card);
2173	return ret;
2174}
2175
2176static inline int mmc_blk_readonly(struct mmc_card *card)
2177{
2178	return mmc_card_readonly(card) ||
2179	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2180}
2181
2182static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2183					      struct device *parent,
2184					      sector_t size,
2185					      bool default_ro,
2186					      const char *subname,
2187					      int area_type)
2188{
2189	struct mmc_blk_data *md;
2190	int devidx, ret;
2191
2192	devidx = find_first_zero_bit(dev_use, max_devices);
2193	if (devidx >= max_devices)
2194		return ERR_PTR(-ENOSPC);
2195	__set_bit(devidx, dev_use);
2196
2197	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2198	if (!md) {
2199		ret = -ENOMEM;
2200		goto out;
2201	}
2202
2203	md->area_type = area_type;
2204
2205	/*
2206	 * Set the read-only status based on the supported commands
2207	 * and the write protect switch.
2208	 */
2209	md->read_only = mmc_blk_readonly(card);
2210
2211	md->disk = alloc_disk(perdev_minors);
2212	if (md->disk == NULL) {
2213		ret = -ENOMEM;
2214		goto err_kfree;
2215	}
2216
2217	spin_lock_init(&md->lock);
2218	INIT_LIST_HEAD(&md->part);
2219	md->usage = 1;
2220
2221	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2222	if (ret)
2223		goto err_putdisk;
2224
2225	md->queue.issue_fn = mmc_blk_issue_rq;
2226	md->queue.data = md;
2227
2228	md->disk->major	= MMC_BLOCK_MAJOR;
2229	md->disk->first_minor = devidx * perdev_minors;
2230	md->disk->fops = &mmc_bdops;
2231	md->disk->private_data = md;
2232	md->disk->queue = md->queue.queue;
2233	md->disk->driverfs_dev = parent;
2234	set_disk_ro(md->disk, md->read_only || default_ro);
2235	md->disk->flags = GENHD_FL_EXT_DEVT;
2236	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2237		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2238
2239	/*
2240	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2241	 *
2242	 * - be set for removable media with permanent block devices
2243	 * - be unset for removable block devices with permanent media
2244	 *
2245	 * Since MMC block devices clearly fall under the second
2246	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2247	 * should use the block device creation/destruction hotplug
2248	 * messages to tell when the card is present.
2249	 */
2250
2251	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2252		 "mmcblk%u%s", card->host->index, subname ? subname : "");
2253
2254	if (mmc_card_mmc(card))
2255		blk_queue_logical_block_size(md->queue.queue,
2256					     card->ext_csd.data_sector_size);
2257	else
2258		blk_queue_logical_block_size(md->queue.queue, 512);
2259
2260	set_capacity(md->disk, size);
2261
2262	if (mmc_host_cmd23(card->host)) {
2263		if (mmc_card_mmc(card) ||
2264		    (mmc_card_sd(card) &&
2265		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2266			md->flags |= MMC_BLK_CMD23;
2267	}
2268
2269	if (mmc_card_mmc(card) &&
2270	    md->flags & MMC_BLK_CMD23 &&
2271	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2272	     card->ext_csd.rel_sectors)) {
2273		md->flags |= MMC_BLK_REL_WR;
2274		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2275	}
2276
2277	if (mmc_card_mmc(card) &&
2278	    (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2279	    (md->flags & MMC_BLK_CMD23) &&
2280	    card->ext_csd.packed_event_en) {
2281		if (!mmc_packed_init(&md->queue, card))
2282			md->flags |= MMC_BLK_PACKED_CMD;
2283	}
2284
2285	return md;
2286
2287 err_putdisk:
2288	put_disk(md->disk);
2289 err_kfree:
2290	kfree(md);
2291 out:
2292	return ERR_PTR(ret);
2293}
2294
2295static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2296{
2297	sector_t size;
2298
2299	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2300		/*
2301		 * The EXT_CSD sector count is in number or 512 byte
2302		 * sectors.
2303		 */
2304		size = card->ext_csd.sectors;
2305	} else {
2306		/*
2307		 * The CSD capacity field is in units of read_blkbits.
2308		 * set_capacity takes units of 512 bytes.
2309		 */
2310		size = (typeof(sector_t))card->csd.capacity
2311			<< (card->csd.read_blkbits - 9);
2312	}
2313
2314	return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2315					MMC_BLK_DATA_AREA_MAIN);
2316}
2317
2318static int mmc_blk_alloc_part(struct mmc_card *card,
2319			      struct mmc_blk_data *md,
2320			      unsigned int part_type,
2321			      sector_t size,
2322			      bool default_ro,
2323			      const char *subname,
2324			      int area_type)
2325{
2326	char cap_str[10];
2327	struct mmc_blk_data *part_md;
2328
2329	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2330				    subname, area_type);
2331	if (IS_ERR(part_md))
2332		return PTR_ERR(part_md);
2333	part_md->part_type = part_type;
2334	list_add(&part_md->part, &md->part);
2335
2336	string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2337			cap_str, sizeof(cap_str));
2338	pr_info("%s: %s %s partition %u %s\n",
2339	       part_md->disk->disk_name, mmc_card_id(card),
2340	       mmc_card_name(card), part_md->part_type, cap_str);
2341	return 0;
2342}
2343
2344/* MMC Physical partitions consist of two boot partitions and
2345 * up to four general purpose partitions.
2346 * For each partition enabled in EXT_CSD a block device will be allocatedi
2347 * to provide access to the partition.
2348 */
2349
2350static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2351{
2352	int idx, ret = 0;
2353
2354	if (!mmc_card_mmc(card))
2355		return 0;
2356
2357	for (idx = 0; idx < card->nr_parts; idx++) {
2358		if (card->part[idx].size) {
2359			ret = mmc_blk_alloc_part(card, md,
2360				card->part[idx].part_cfg,
2361				card->part[idx].size >> 9,
2362				card->part[idx].force_ro,
2363				card->part[idx].name,
2364				card->part[idx].area_type);
2365			if (ret)
2366				return ret;
2367		}
2368	}
2369
2370	return ret;
2371}
2372
2373static void mmc_blk_remove_req(struct mmc_blk_data *md)
2374{
2375	struct mmc_card *card;
2376
2377	if (md) {
2378		/*
2379		 * Flush remaining requests and free queues. It
2380		 * is freeing the queue that stops new requests
2381		 * from being accepted.
2382		 */
2383		card = md->queue.card;
2384		mmc_cleanup_queue(&md->queue);
2385		if (md->flags & MMC_BLK_PACKED_CMD)
2386			mmc_packed_clean(&md->queue);
2387		if (md->disk->flags & GENHD_FL_UP) {
2388			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2389			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2390					card->ext_csd.boot_ro_lockable)
2391				device_remove_file(disk_to_dev(md->disk),
2392					&md->power_ro_lock);
2393
2394			del_gendisk(md->disk);
2395		}
2396		mmc_blk_put(md);
2397	}
2398}
2399
2400static void mmc_blk_remove_parts(struct mmc_card *card,
2401				 struct mmc_blk_data *md)
2402{
2403	struct list_head *pos, *q;
2404	struct mmc_blk_data *part_md;
2405
2406	list_for_each_safe(pos, q, &md->part) {
2407		part_md = list_entry(pos, struct mmc_blk_data, part);
2408		list_del(pos);
2409		mmc_blk_remove_req(part_md);
2410	}
2411}
2412
2413static int mmc_add_disk(struct mmc_blk_data *md)
2414{
2415	int ret;
2416	struct mmc_card *card = md->queue.card;
2417
2418	add_disk(md->disk);
2419	md->force_ro.show = force_ro_show;
2420	md->force_ro.store = force_ro_store;
2421	sysfs_attr_init(&md->force_ro.attr);
2422	md->force_ro.attr.name = "force_ro";
2423	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2424	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2425	if (ret)
2426		goto force_ro_fail;
2427
2428	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2429	     card->ext_csd.boot_ro_lockable) {
2430		umode_t mode;
2431
2432		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2433			mode = S_IRUGO;
2434		else
2435			mode = S_IRUGO | S_IWUSR;
2436
2437		md->power_ro_lock.show = power_ro_lock_show;
2438		md->power_ro_lock.store = power_ro_lock_store;
2439		sysfs_attr_init(&md->power_ro_lock.attr);
2440		md->power_ro_lock.attr.mode = mode;
2441		md->power_ro_lock.attr.name =
2442					"ro_lock_until_next_power_on";
2443		ret = device_create_file(disk_to_dev(md->disk),
2444				&md->power_ro_lock);
2445		if (ret)
2446			goto power_ro_lock_fail;
2447	}
2448	return ret;
2449
2450power_ro_lock_fail:
2451	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2452force_ro_fail:
2453	del_gendisk(md->disk);
2454
2455	return ret;
2456}
2457
2458#define CID_MANFID_SANDISK	0x2
2459#define CID_MANFID_TOSHIBA	0x11
2460#define CID_MANFID_MICRON	0x13
2461#define CID_MANFID_SAMSUNG	0x15
2462#define CID_MANFID_KINGSTON	0x70
2463
2464static const struct mmc_fixup blk_fixups[] =
2465{
2466	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2467		  MMC_QUIRK_INAND_CMD38),
2468	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2469		  MMC_QUIRK_INAND_CMD38),
2470	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2471		  MMC_QUIRK_INAND_CMD38),
2472	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2473		  MMC_QUIRK_INAND_CMD38),
2474	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2475		  MMC_QUIRK_INAND_CMD38),
2476
2477	/*
2478	 * Some MMC cards experience performance degradation with CMD23
2479	 * instead of CMD12-bounded multiblock transfers. For now we'll
2480	 * black list what's bad...
2481	 * - Certain Toshiba cards.
2482	 *
2483	 * N.B. This doesn't affect SD cards.
2484	 */
2485	MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2486		  MMC_QUIRK_BLK_NO_CMD23),
2487	MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2488		  MMC_QUIRK_BLK_NO_CMD23),
2489	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2490		  MMC_QUIRK_BLK_NO_CMD23),
2491	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2492		  MMC_QUIRK_BLK_NO_CMD23),
2493	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2494		  MMC_QUIRK_BLK_NO_CMD23),
2495
2496	/*
2497	 * Some Micron MMC cards needs longer data read timeout than
2498	 * indicated in CSD.
2499	 */
2500	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2501		  MMC_QUIRK_LONG_READ_TIME),
2502
2503	/*
2504	 * On these Samsung MoviNAND parts, performing secure erase or
2505	 * secure trim can result in unrecoverable corruption due to a
2506	 * firmware bug.
2507	 */
2508	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2509		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2510	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2511		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2512	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2513		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2514	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2515		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2516	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2517		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2518	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2519		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2520	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2521		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2522	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2523		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2524
2525	/*
2526	 *  On Some Kingston eMMCs, performing trim can result in
2527	 *  unrecoverable data conrruption occasionally due to a firmware bug.
2528	 */
2529	MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2530		  MMC_QUIRK_TRIM_BROKEN),
2531	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2532		  MMC_QUIRK_TRIM_BROKEN),
2533
2534	END_FIXUP
2535};
2536
2537static int mmc_blk_probe(struct mmc_card *card)
2538{
2539	struct mmc_blk_data *md, *part_md;
2540	char cap_str[10];
2541
2542	/*
2543	 * Check that the card supports the command class(es) we need.
2544	 */
2545	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2546		return -ENODEV;
2547
2548	mmc_fixup_device(card, blk_fixups);
2549
2550	md = mmc_blk_alloc(card);
2551	if (IS_ERR(md))
2552		return PTR_ERR(md);
2553
2554	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2555			cap_str, sizeof(cap_str));
2556	pr_info("%s: %s %s %s %s\n",
2557		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2558		cap_str, md->read_only ? "(ro)" : "");
2559
2560	if (mmc_blk_alloc_parts(card, md))
2561		goto out;
2562
2563	dev_set_drvdata(&card->dev, md);
2564
2565	if (mmc_add_disk(md))
2566		goto out;
2567
2568	list_for_each_entry(part_md, &md->part, part) {
2569		if (mmc_add_disk(part_md))
2570			goto out;
2571	}
2572
2573	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2574	pm_runtime_use_autosuspend(&card->dev);
2575
2576	/*
2577	 * Don't enable runtime PM for SD-combo cards here. Leave that
2578	 * decision to be taken during the SDIO init sequence instead.
2579	 */
2580	if (card->type != MMC_TYPE_SD_COMBO) {
2581		pm_runtime_set_active(&card->dev);
2582		pm_runtime_enable(&card->dev);
2583	}
2584
2585	return 0;
2586
2587 out:
2588	mmc_blk_remove_parts(card, md);
2589	mmc_blk_remove_req(md);
2590	return 0;
2591}
2592
2593static void mmc_blk_remove(struct mmc_card *card)
2594{
2595	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2596
2597	mmc_blk_remove_parts(card, md);
2598	pm_runtime_get_sync(&card->dev);
2599	mmc_claim_host(card->host);
2600	mmc_blk_part_switch(card, md);
2601	mmc_release_host(card->host);
2602	if (card->type != MMC_TYPE_SD_COMBO)
2603		pm_runtime_disable(&card->dev);
2604	pm_runtime_put_noidle(&card->dev);
2605	mmc_blk_remove_req(md);
2606	dev_set_drvdata(&card->dev, NULL);
2607}
2608
2609static int _mmc_blk_suspend(struct mmc_card *card)
2610{
2611	struct mmc_blk_data *part_md;
2612	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2613
2614	if (md) {
2615		mmc_queue_suspend(&md->queue);
2616		list_for_each_entry(part_md, &md->part, part) {
2617			mmc_queue_suspend(&part_md->queue);
2618		}
2619	}
2620	return 0;
2621}
2622
2623static void mmc_blk_shutdown(struct mmc_card *card)
2624{
2625	_mmc_blk_suspend(card);
2626}
2627
2628#ifdef CONFIG_PM_SLEEP
2629static int mmc_blk_suspend(struct device *dev)
2630{
2631	struct mmc_card *card = mmc_dev_to_card(dev);
2632
2633	return _mmc_blk_suspend(card);
2634}
2635
2636static int mmc_blk_resume(struct device *dev)
2637{
2638	struct mmc_blk_data *part_md;
2639	struct mmc_blk_data *md = dev_get_drvdata(dev);
2640
2641	if (md) {
2642		/*
2643		 * Resume involves the card going into idle state,
2644		 * so current partition is always the main one.
2645		 */
2646		md->part_curr = md->part_type;
2647		mmc_queue_resume(&md->queue);
2648		list_for_each_entry(part_md, &md->part, part) {
2649			mmc_queue_resume(&part_md->queue);
2650		}
2651	}
2652	return 0;
2653}
2654#endif
2655
2656static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2657
2658static struct mmc_driver mmc_driver = {
2659	.drv		= {
2660		.name	= "mmcblk",
2661		.pm	= &mmc_blk_pm_ops,
2662	},
2663	.probe		= mmc_blk_probe,
2664	.remove		= mmc_blk_remove,
2665	.shutdown	= mmc_blk_shutdown,
2666};
2667
2668static int __init mmc_blk_init(void)
2669{
2670	int res;
2671
2672	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2673		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2674
2675	max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2676
2677	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2678	if (res)
2679		goto out;
2680
2681	res = mmc_register_driver(&mmc_driver);
2682	if (res)
2683		goto out2;
2684
2685	return 0;
2686 out2:
2687	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2688 out:
2689	return res;
2690}
2691
2692static void __exit mmc_blk_exit(void)
2693{
2694	mmc_unregister_driver(&mmc_driver);
2695	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2696}
2697
2698module_init(mmc_blk_init);
2699module_exit(mmc_blk_exit);
2700
2701MODULE_LICENSE("GPL");
2702MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2703