Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 *  linux/drivers/mmc/card/mmc_test.c
   3 *
   4 *  Copyright 2007-2008 Pierre Ossman
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 */
  11
  12#include <linux/mmc/core.h>
  13#include <linux/mmc/card.h>
  14#include <linux/mmc/host.h>
  15#include <linux/mmc/mmc.h>
  16#include <linux/slab.h>
  17
  18#include <linux/scatterlist.h>
  19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
  20#include <linux/list.h>
  21
  22#include <linux/debugfs.h>
  23#include <linux/uaccess.h>
  24#include <linux/seq_file.h>
  25#include <linux/module.h>
  26
  27#define RESULT_OK		0
  28#define RESULT_FAIL		1
  29#define RESULT_UNSUP_HOST	2
  30#define RESULT_UNSUP_CARD	3
  31
  32#define BUFFER_ORDER		2
  33#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
  34
  35/*
  36 * Limit the test area size to the maximum MMC HC erase group size.  Note that
  37 * the maximum SD allocation unit size is just 4MiB.
  38 */
  39#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
  40
  41/**
  42 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
  43 * @page: first page in the allocation
  44 * @order: order of the number of pages allocated
  45 */
  46struct mmc_test_pages {
  47	struct page *page;
  48	unsigned int order;
  49};
  50
  51/**
  52 * struct mmc_test_mem - allocated memory.
  53 * @arr: array of allocations
  54 * @cnt: number of allocations
  55 */
  56struct mmc_test_mem {
  57	struct mmc_test_pages *arr;
  58	unsigned int cnt;
  59};
  60
  61/**
  62 * struct mmc_test_area - information for performance tests.
  63 * @max_sz: test area size (in bytes)
  64 * @dev_addr: address on card at which to do performance tests
  65 * @max_tfr: maximum transfer size allowed by driver (in bytes)
  66 * @max_segs: maximum segments allowed by driver in scatterlist @sg
  67 * @max_seg_sz: maximum segment size allowed by driver
  68 * @blocks: number of (512 byte) blocks currently mapped by @sg
  69 * @sg_len: length of currently mapped scatterlist @sg
  70 * @mem: allocated memory
  71 * @sg: scatterlist
  72 */
  73struct mmc_test_area {
  74	unsigned long max_sz;
  75	unsigned int dev_addr;
  76	unsigned int max_tfr;
  77	unsigned int max_segs;
  78	unsigned int max_seg_sz;
  79	unsigned int blocks;
  80	unsigned int sg_len;
  81	struct mmc_test_mem *mem;
  82	struct scatterlist *sg;
  83};
  84
  85/**
  86 * struct mmc_test_transfer_result - transfer results for performance tests.
  87 * @link: double-linked list
  88 * @count: amount of group of sectors to check
  89 * @sectors: amount of sectors to check in one group
  90 * @ts: time values of transfer
  91 * @rate: calculated transfer rate
  92 * @iops: I/O operations per second (times 100)
  93 */
  94struct mmc_test_transfer_result {
  95	struct list_head link;
  96	unsigned int count;
  97	unsigned int sectors;
  98	struct timespec ts;
  99	unsigned int rate;
 100	unsigned int iops;
 101};
 102
 103/**
 104 * struct mmc_test_general_result - results for tests.
 105 * @link: double-linked list
 106 * @card: card under test
 107 * @testcase: number of test case
 108 * @result: result of test run
 109 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
 110 */
 111struct mmc_test_general_result {
 112	struct list_head link;
 113	struct mmc_card *card;
 114	int testcase;
 115	int result;
 116	struct list_head tr_lst;
 117};
 118
 119/**
 120 * struct mmc_test_dbgfs_file - debugfs related file.
 121 * @link: double-linked list
 122 * @card: card under test
 123 * @file: file created under debugfs
 124 */
 125struct mmc_test_dbgfs_file {
 126	struct list_head link;
 127	struct mmc_card *card;
 128	struct dentry *file;
 129};
 130
 131/**
 132 * struct mmc_test_card - test information.
 133 * @card: card under test
 134 * @scratch: transfer buffer
 135 * @buffer: transfer buffer
 136 * @highmem: buffer for highmem tests
 137 * @area: information for performance tests
 138 * @gr: pointer to results of current testcase
 139 */
 140struct mmc_test_card {
 141	struct mmc_card	*card;
 142
 143	u8		scratch[BUFFER_SIZE];
 144	u8		*buffer;
 145#ifdef CONFIG_HIGHMEM
 146	struct page	*highmem;
 147#endif
 148	struct mmc_test_area		area;
 149	struct mmc_test_general_result	*gr;
 150};
 151
 152enum mmc_test_prep_media {
 153	MMC_TEST_PREP_NONE = 0,
 154	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
 155	MMC_TEST_PREP_ERASE = 1 << 1,
 156};
 157
 158struct mmc_test_multiple_rw {
 159	unsigned int *sg_len;
 160	unsigned int *bs;
 161	unsigned int len;
 162	unsigned int size;
 163	bool do_write;
 164	bool do_nonblock_req;
 165	enum mmc_test_prep_media prepare;
 166};
 167
 168struct mmc_test_async_req {
 169	struct mmc_async_req areq;
 170	struct mmc_test_card *test;
 171};
 172
 173/*******************************************************************/
 174/*  General helper functions                                       */
 175/*******************************************************************/
 176
 177/*
 178 * Configure correct block size in card
 179 */
 180static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
 181{
 182	return mmc_set_blocklen(test->card, size);
 183}
 184
 185/*
 186 * Fill in the mmc_request structure given a set of transfer parameters.
 187 */
 188static void mmc_test_prepare_mrq(struct mmc_test_card *test,
 189	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
 190	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
 191{
 192	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
 193
 194	if (blocks > 1) {
 195		mrq->cmd->opcode = write ?
 196			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
 197	} else {
 198		mrq->cmd->opcode = write ?
 199			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 200	}
 201
 202	mrq->cmd->arg = dev_addr;
 203	if (!mmc_card_blockaddr(test->card))
 204		mrq->cmd->arg <<= 9;
 205
 206	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 207
 208	if (blocks == 1)
 209		mrq->stop = NULL;
 210	else {
 211		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
 212		mrq->stop->arg = 0;
 213		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 214	}
 215
 216	mrq->data->blksz = blksz;
 217	mrq->data->blocks = blocks;
 218	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
 219	mrq->data->sg = sg;
 220	mrq->data->sg_len = sg_len;
 221
 222	mmc_set_data_timeout(mrq->data, test->card);
 223}
 224
 225static int mmc_test_busy(struct mmc_command *cmd)
 226{
 227	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
 228		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
 229}
 230
 231/*
 232 * Wait for the card to finish the busy state
 233 */
 234static int mmc_test_wait_busy(struct mmc_test_card *test)
 235{
 236	int ret, busy;
 237	struct mmc_command cmd = {0};
 238
 239	busy = 0;
 240	do {
 241		memset(&cmd, 0, sizeof(struct mmc_command));
 242
 243		cmd.opcode = MMC_SEND_STATUS;
 244		cmd.arg = test->card->rca << 16;
 245		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 246
 247		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
 248		if (ret)
 249			break;
 250
 251		if (!busy && mmc_test_busy(&cmd)) {
 252			busy = 1;
 253			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 254				pr_info("%s: Warning: Host did not "
 255					"wait for busy state to end.\n",
 256					mmc_hostname(test->card->host));
 257		}
 258	} while (mmc_test_busy(&cmd));
 259
 260	return ret;
 261}
 262
 263/*
 264 * Transfer a single sector of kernel addressable data
 265 */
 266static int mmc_test_buffer_transfer(struct mmc_test_card *test,
 267	u8 *buffer, unsigned addr, unsigned blksz, int write)
 268{
 269	int ret;
 270
 271	struct mmc_request mrq = {0};
 272	struct mmc_command cmd = {0};
 273	struct mmc_command stop = {0};
 274	struct mmc_data data = {0};
 275
 276	struct scatterlist sg;
 277
 278	mrq.cmd = &cmd;
 279	mrq.data = &data;
 280	mrq.stop = &stop;
 281
 282	sg_init_one(&sg, buffer, blksz);
 283
 284	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
 285
 286	mmc_wait_for_req(test->card->host, &mrq);
 287
 288	if (cmd.error)
 289		return cmd.error;
 290	if (data.error)
 291		return data.error;
 292
 293	ret = mmc_test_wait_busy(test);
 294	if (ret)
 295		return ret;
 296
 297	return 0;
 298}
 299
 300static void mmc_test_free_mem(struct mmc_test_mem *mem)
 301{
 302	if (!mem)
 303		return;
 304	while (mem->cnt--)
 305		__free_pages(mem->arr[mem->cnt].page,
 306			     mem->arr[mem->cnt].order);
 307	kfree(mem->arr);
 308	kfree(mem);
 309}
 310
 311/*
 312 * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
 313 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
 314 * not exceed a maximum number of segments and try not to make segments much
 315 * bigger than maximum segment size.
 316 */
 317static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
 318					       unsigned long max_sz,
 319					       unsigned int max_segs,
 320					       unsigned int max_seg_sz)
 321{
 322	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
 323	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
 324	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
 325	unsigned long page_cnt = 0;
 326	unsigned long limit = nr_free_buffer_pages() >> 4;
 327	struct mmc_test_mem *mem;
 328
 329	if (max_page_cnt > limit)
 330		max_page_cnt = limit;
 331	if (min_page_cnt > max_page_cnt)
 332		min_page_cnt = max_page_cnt;
 333
 334	if (max_seg_page_cnt > max_page_cnt)
 335		max_seg_page_cnt = max_page_cnt;
 336
 337	if (max_segs > max_page_cnt)
 338		max_segs = max_page_cnt;
 339
 340	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
 341	if (!mem)
 342		return NULL;
 343
 344	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
 345			   GFP_KERNEL);
 346	if (!mem->arr)
 347		goto out_free;
 348
 349	while (max_page_cnt) {
 350		struct page *page;
 351		unsigned int order;
 352		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
 353				__GFP_NORETRY;
 354
 355		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
 356		while (1) {
 357			page = alloc_pages(flags, order);
 358			if (page || !order)
 359				break;
 360			order -= 1;
 361		}
 362		if (!page) {
 363			if (page_cnt < min_page_cnt)
 364				goto out_free;
 365			break;
 366		}
 367		mem->arr[mem->cnt].page = page;
 368		mem->arr[mem->cnt].order = order;
 369		mem->cnt += 1;
 370		if (max_page_cnt <= (1UL << order))
 371			break;
 372		max_page_cnt -= 1UL << order;
 373		page_cnt += 1UL << order;
 374		if (mem->cnt >= max_segs) {
 375			if (page_cnt < min_page_cnt)
 376				goto out_free;
 377			break;
 378		}
 379	}
 380
 381	return mem;
 382
 383out_free:
 384	mmc_test_free_mem(mem);
 385	return NULL;
 386}
 387
 388/*
 389 * Map memory into a scatterlist.  Optionally allow the same memory to be
 390 * mapped more than once.
 391 */
 392static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
 393			   struct scatterlist *sglist, int repeat,
 394			   unsigned int max_segs, unsigned int max_seg_sz,
 395			   unsigned int *sg_len, int min_sg_len)
 396{
 397	struct scatterlist *sg = NULL;
 398	unsigned int i;
 399	unsigned long sz = size;
 400
 401	sg_init_table(sglist, max_segs);
 402	if (min_sg_len > max_segs)
 403		min_sg_len = max_segs;
 404
 405	*sg_len = 0;
 406	do {
 407		for (i = 0; i < mem->cnt; i++) {
 408			unsigned long len = PAGE_SIZE << mem->arr[i].order;
 409
 410			if (min_sg_len && (size / min_sg_len < len))
 411				len = ALIGN(size / min_sg_len, 512);
 412			if (len > sz)
 413				len = sz;
 414			if (len > max_seg_sz)
 415				len = max_seg_sz;
 416			if (sg)
 417				sg = sg_next(sg);
 418			else
 419				sg = sglist;
 420			if (!sg)
 421				return -EINVAL;
 422			sg_set_page(sg, mem->arr[i].page, len, 0);
 423			sz -= len;
 424			*sg_len += 1;
 425			if (!sz)
 426				break;
 427		}
 428	} while (sz && repeat);
 429
 430	if (sz)
 431		return -EINVAL;
 432
 433	if (sg)
 434		sg_mark_end(sg);
 435
 436	return 0;
 437}
 438
 439/*
 440 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
 441 * same memory to be mapped more than once.
 442 */
 443static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
 444				       unsigned long sz,
 445				       struct scatterlist *sglist,
 446				       unsigned int max_segs,
 447				       unsigned int max_seg_sz,
 448				       unsigned int *sg_len)
 449{
 450	struct scatterlist *sg = NULL;
 451	unsigned int i = mem->cnt, cnt;
 452	unsigned long len;
 453	void *base, *addr, *last_addr = NULL;
 454
 455	sg_init_table(sglist, max_segs);
 456
 457	*sg_len = 0;
 458	while (sz) {
 459		base = page_address(mem->arr[--i].page);
 460		cnt = 1 << mem->arr[i].order;
 461		while (sz && cnt) {
 462			addr = base + PAGE_SIZE * --cnt;
 463			if (last_addr && last_addr + PAGE_SIZE == addr)
 464				continue;
 465			last_addr = addr;
 466			len = PAGE_SIZE;
 467			if (len > max_seg_sz)
 468				len = max_seg_sz;
 469			if (len > sz)
 470				len = sz;
 471			if (sg)
 472				sg = sg_next(sg);
 473			else
 474				sg = sglist;
 475			if (!sg)
 476				return -EINVAL;
 477			sg_set_page(sg, virt_to_page(addr), len, 0);
 478			sz -= len;
 479			*sg_len += 1;
 480		}
 481		if (i == 0)
 482			i = mem->cnt;
 483	}
 484
 485	if (sg)
 486		sg_mark_end(sg);
 487
 488	return 0;
 489}
 490
 491/*
 492 * Calculate transfer rate in bytes per second.
 493 */
 494static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
 495{
 496	uint64_t ns;
 497
 498	ns = ts->tv_sec;
 499	ns *= 1000000000;
 500	ns += ts->tv_nsec;
 501
 502	bytes *= 1000000000;
 503
 504	while (ns > UINT_MAX) {
 505		bytes >>= 1;
 506		ns >>= 1;
 507	}
 508
 509	if (!ns)
 510		return 0;
 511
 512	do_div(bytes, (uint32_t)ns);
 513
 514	return bytes;
 515}
 516
 517/*
 518 * Save transfer results for future usage
 519 */
 520static void mmc_test_save_transfer_result(struct mmc_test_card *test,
 521	unsigned int count, unsigned int sectors, struct timespec ts,
 522	unsigned int rate, unsigned int iops)
 523{
 524	struct mmc_test_transfer_result *tr;
 525
 526	if (!test->gr)
 527		return;
 528
 529	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
 530	if (!tr)
 531		return;
 532
 533	tr->count = count;
 534	tr->sectors = sectors;
 535	tr->ts = ts;
 536	tr->rate = rate;
 537	tr->iops = iops;
 538
 539	list_add_tail(&tr->link, &test->gr->tr_lst);
 540}
 541
 542/*
 543 * Print the transfer rate.
 544 */
 545static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
 546				struct timespec *ts1, struct timespec *ts2)
 547{
 548	unsigned int rate, iops, sectors = bytes >> 9;
 549	struct timespec ts;
 550
 551	ts = timespec_sub(*ts2, *ts1);
 552
 553	rate = mmc_test_rate(bytes, &ts);
 554	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
 555
 556	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
 557			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
 558			 mmc_hostname(test->card->host), sectors, sectors >> 1,
 559			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
 560			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
 561			 iops / 100, iops % 100);
 562
 563	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
 564}
 565
 566/*
 567 * Print the average transfer rate.
 568 */
 569static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
 570				    unsigned int count, struct timespec *ts1,
 571				    struct timespec *ts2)
 572{
 573	unsigned int rate, iops, sectors = bytes >> 9;
 574	uint64_t tot = bytes * count;
 575	struct timespec ts;
 576
 577	ts = timespec_sub(*ts2, *ts1);
 578
 579	rate = mmc_test_rate(tot, &ts);
 580	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
 581
 582	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
 583			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
 584			 "%u.%02u IOPS, sg_len %d)\n",
 585			 mmc_hostname(test->card->host), count, sectors, count,
 586			 sectors >> 1, (sectors & 1 ? ".5" : ""),
 587			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
 588			 rate / 1000, rate / 1024, iops / 100, iops % 100,
 589			 test->area.sg_len);
 590
 591	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
 592}
 593
 594/*
 595 * Return the card size in sectors.
 596 */
 597static unsigned int mmc_test_capacity(struct mmc_card *card)
 598{
 599	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
 600		return card->ext_csd.sectors;
 601	else
 602		return card->csd.capacity << (card->csd.read_blkbits - 9);
 603}
 604
 605/*******************************************************************/
 606/*  Test preparation and cleanup                                   */
 607/*******************************************************************/
 608
 609/*
 610 * Fill the first couple of sectors of the card with known data
 611 * so that bad reads/writes can be detected
 612 */
 613static int __mmc_test_prepare(struct mmc_test_card *test, int write)
 614{
 615	int ret, i;
 616
 617	ret = mmc_test_set_blksize(test, 512);
 618	if (ret)
 619		return ret;
 620
 621	if (write)
 622		memset(test->buffer, 0xDF, 512);
 623	else {
 624		for (i = 0;i < 512;i++)
 625			test->buffer[i] = i;
 626	}
 627
 628	for (i = 0;i < BUFFER_SIZE / 512;i++) {
 629		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 630		if (ret)
 631			return ret;
 632	}
 633
 634	return 0;
 635}
 636
 637static int mmc_test_prepare_write(struct mmc_test_card *test)
 638{
 639	return __mmc_test_prepare(test, 1);
 640}
 641
 642static int mmc_test_prepare_read(struct mmc_test_card *test)
 643{
 644	return __mmc_test_prepare(test, 0);
 645}
 646
 647static int mmc_test_cleanup(struct mmc_test_card *test)
 648{
 649	int ret, i;
 650
 651	ret = mmc_test_set_blksize(test, 512);
 652	if (ret)
 653		return ret;
 654
 655	memset(test->buffer, 0, 512);
 656
 657	for (i = 0;i < BUFFER_SIZE / 512;i++) {
 658		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 659		if (ret)
 660			return ret;
 661	}
 662
 663	return 0;
 664}
 665
 666/*******************************************************************/
 667/*  Test execution helpers                                         */
 668/*******************************************************************/
 669
 670/*
 671 * Modifies the mmc_request to perform the "short transfer" tests
 672 */
 673static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
 674	struct mmc_request *mrq, int write)
 675{
 676	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 677
 678	if (mrq->data->blocks > 1) {
 679		mrq->cmd->opcode = write ?
 680			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 681		mrq->stop = NULL;
 682	} else {
 683		mrq->cmd->opcode = MMC_SEND_STATUS;
 684		mrq->cmd->arg = test->card->rca << 16;
 685	}
 686}
 687
 688/*
 689 * Checks that a normal transfer didn't have any errors
 690 */
 691static int mmc_test_check_result(struct mmc_test_card *test,
 692				 struct mmc_request *mrq)
 693{
 694	int ret;
 695
 696	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 697
 698	ret = 0;
 699
 700	if (!ret && mrq->cmd->error)
 701		ret = mrq->cmd->error;
 702	if (!ret && mrq->data->error)
 703		ret = mrq->data->error;
 704	if (!ret && mrq->stop && mrq->stop->error)
 705		ret = mrq->stop->error;
 706	if (!ret && mrq->data->bytes_xfered !=
 707		mrq->data->blocks * mrq->data->blksz)
 708		ret = RESULT_FAIL;
 709
 710	if (ret == -EINVAL)
 711		ret = RESULT_UNSUP_HOST;
 712
 713	return ret;
 714}
 715
 716static int mmc_test_check_result_async(struct mmc_card *card,
 717				       struct mmc_async_req *areq)
 718{
 719	struct mmc_test_async_req *test_async =
 720		container_of(areq, struct mmc_test_async_req, areq);
 721
 722	mmc_test_wait_busy(test_async->test);
 723
 724	return mmc_test_check_result(test_async->test, areq->mrq);
 725}
 726
 727/*
 728 * Checks that a "short transfer" behaved as expected
 729 */
 730static int mmc_test_check_broken_result(struct mmc_test_card *test,
 731	struct mmc_request *mrq)
 732{
 733	int ret;
 734
 735	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 736
 737	ret = 0;
 738
 739	if (!ret && mrq->cmd->error)
 740		ret = mrq->cmd->error;
 741	if (!ret && mrq->data->error == 0)
 742		ret = RESULT_FAIL;
 743	if (!ret && mrq->data->error != -ETIMEDOUT)
 744		ret = mrq->data->error;
 745	if (!ret && mrq->stop && mrq->stop->error)
 746		ret = mrq->stop->error;
 747	if (mrq->data->blocks > 1) {
 748		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
 749			ret = RESULT_FAIL;
 750	} else {
 751		if (!ret && mrq->data->bytes_xfered > 0)
 752			ret = RESULT_FAIL;
 753	}
 754
 755	if (ret == -EINVAL)
 756		ret = RESULT_UNSUP_HOST;
 757
 758	return ret;
 759}
 760
 761/*
 762 * Tests nonblock transfer with certain parameters
 763 */
 764static void mmc_test_nonblock_reset(struct mmc_request *mrq,
 765				    struct mmc_command *cmd,
 766				    struct mmc_command *stop,
 767				    struct mmc_data *data)
 768{
 769	memset(mrq, 0, sizeof(struct mmc_request));
 770	memset(cmd, 0, sizeof(struct mmc_command));
 771	memset(data, 0, sizeof(struct mmc_data));
 772	memset(stop, 0, sizeof(struct mmc_command));
 773
 774	mrq->cmd = cmd;
 775	mrq->data = data;
 776	mrq->stop = stop;
 777}
 778static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
 779				      struct scatterlist *sg, unsigned sg_len,
 780				      unsigned dev_addr, unsigned blocks,
 781				      unsigned blksz, int write, int count)
 782{
 783	struct mmc_request mrq1;
 784	struct mmc_command cmd1;
 785	struct mmc_command stop1;
 786	struct mmc_data data1;
 787
 788	struct mmc_request mrq2;
 789	struct mmc_command cmd2;
 790	struct mmc_command stop2;
 791	struct mmc_data data2;
 792
 793	struct mmc_test_async_req test_areq[2];
 794	struct mmc_async_req *done_areq;
 795	struct mmc_async_req *cur_areq = &test_areq[0].areq;
 796	struct mmc_async_req *other_areq = &test_areq[1].areq;
 797	int i;
 798	int ret;
 799
 800	test_areq[0].test = test;
 801	test_areq[1].test = test;
 802
 803	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
 804	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
 805
 806	cur_areq->mrq = &mrq1;
 807	cur_areq->err_check = mmc_test_check_result_async;
 808	other_areq->mrq = &mrq2;
 809	other_areq->err_check = mmc_test_check_result_async;
 810
 811	for (i = 0; i < count; i++) {
 812		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
 813				     blocks, blksz, write);
 814		done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
 815
 816		if (ret || (!done_areq && i > 0))
 817			goto err;
 818
 819		if (done_areq) {
 820			if (done_areq->mrq == &mrq2)
 821				mmc_test_nonblock_reset(&mrq2, &cmd2,
 822							&stop2, &data2);
 823			else
 824				mmc_test_nonblock_reset(&mrq1, &cmd1,
 825							&stop1, &data1);
 826		}
 827		done_areq = cur_areq;
 828		cur_areq = other_areq;
 829		other_areq = done_areq;
 830		dev_addr += blocks;
 831	}
 832
 833	done_areq = mmc_start_req(test->card->host, NULL, &ret);
 834
 835	return ret;
 836err:
 837	return ret;
 838}
 839
 840/*
 841 * Tests a basic transfer with certain parameters
 842 */
 843static int mmc_test_simple_transfer(struct mmc_test_card *test,
 844	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 845	unsigned blocks, unsigned blksz, int write)
 846{
 847	struct mmc_request mrq = {0};
 848	struct mmc_command cmd = {0};
 849	struct mmc_command stop = {0};
 850	struct mmc_data data = {0};
 851
 852	mrq.cmd = &cmd;
 853	mrq.data = &data;
 854	mrq.stop = &stop;
 855
 856	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
 857		blocks, blksz, write);
 858
 859	mmc_wait_for_req(test->card->host, &mrq);
 860
 861	mmc_test_wait_busy(test);
 862
 863	return mmc_test_check_result(test, &mrq);
 864}
 865
 866/*
 867 * Tests a transfer where the card will fail completely or partly
 868 */
 869static int mmc_test_broken_transfer(struct mmc_test_card *test,
 870	unsigned blocks, unsigned blksz, int write)
 871{
 872	struct mmc_request mrq = {0};
 873	struct mmc_command cmd = {0};
 874	struct mmc_command stop = {0};
 875	struct mmc_data data = {0};
 876
 877	struct scatterlist sg;
 878
 879	mrq.cmd = &cmd;
 880	mrq.data = &data;
 881	mrq.stop = &stop;
 882
 883	sg_init_one(&sg, test->buffer, blocks * blksz);
 884
 885	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
 886	mmc_test_prepare_broken_mrq(test, &mrq, write);
 887
 888	mmc_wait_for_req(test->card->host, &mrq);
 889
 890	mmc_test_wait_busy(test);
 891
 892	return mmc_test_check_broken_result(test, &mrq);
 893}
 894
 895/*
 896 * Does a complete transfer test where data is also validated
 897 *
 898 * Note: mmc_test_prepare() must have been done before this call
 899 */
 900static int mmc_test_transfer(struct mmc_test_card *test,
 901	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 902	unsigned blocks, unsigned blksz, int write)
 903{
 904	int ret, i;
 905	unsigned long flags;
 906
 907	if (write) {
 908		for (i = 0;i < blocks * blksz;i++)
 909			test->scratch[i] = i;
 910	} else {
 911		memset(test->scratch, 0, BUFFER_SIZE);
 912	}
 913	local_irq_save(flags);
 914	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 915	local_irq_restore(flags);
 916
 917	ret = mmc_test_set_blksize(test, blksz);
 918	if (ret)
 919		return ret;
 920
 921	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
 922		blocks, blksz, write);
 923	if (ret)
 924		return ret;
 925
 926	if (write) {
 927		int sectors;
 928
 929		ret = mmc_test_set_blksize(test, 512);
 930		if (ret)
 931			return ret;
 932
 933		sectors = (blocks * blksz + 511) / 512;
 934		if ((sectors * 512) == (blocks * blksz))
 935			sectors++;
 936
 937		if ((sectors * 512) > BUFFER_SIZE)
 938			return -EINVAL;
 939
 940		memset(test->buffer, 0, sectors * 512);
 941
 942		for (i = 0;i < sectors;i++) {
 943			ret = mmc_test_buffer_transfer(test,
 944				test->buffer + i * 512,
 945				dev_addr + i, 512, 0);
 946			if (ret)
 947				return ret;
 948		}
 949
 950		for (i = 0;i < blocks * blksz;i++) {
 951			if (test->buffer[i] != (u8)i)
 952				return RESULT_FAIL;
 953		}
 954
 955		for (;i < sectors * 512;i++) {
 956			if (test->buffer[i] != 0xDF)
 957				return RESULT_FAIL;
 958		}
 959	} else {
 960		local_irq_save(flags);
 961		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 962		local_irq_restore(flags);
 963		for (i = 0;i < blocks * blksz;i++) {
 964			if (test->scratch[i] != (u8)i)
 965				return RESULT_FAIL;
 966		}
 967	}
 968
 969	return 0;
 970}
 971
 972/*******************************************************************/
 973/*  Tests                                                          */
 974/*******************************************************************/
 975
 976struct mmc_test_case {
 977	const char *name;
 978
 979	int (*prepare)(struct mmc_test_card *);
 980	int (*run)(struct mmc_test_card *);
 981	int (*cleanup)(struct mmc_test_card *);
 982};
 983
 984static int mmc_test_basic_write(struct mmc_test_card *test)
 985{
 986	int ret;
 987	struct scatterlist sg;
 988
 989	ret = mmc_test_set_blksize(test, 512);
 990	if (ret)
 991		return ret;
 992
 993	sg_init_one(&sg, test->buffer, 512);
 994
 995	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
 996	if (ret)
 997		return ret;
 998
 999	return 0;
1000}
1001
1002static int mmc_test_basic_read(struct mmc_test_card *test)
1003{
1004	int ret;
1005	struct scatterlist sg;
1006
1007	ret = mmc_test_set_blksize(test, 512);
1008	if (ret)
1009		return ret;
1010
1011	sg_init_one(&sg, test->buffer, 512);
1012
1013	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1014	if (ret)
1015		return ret;
1016
1017	return 0;
1018}
1019
1020static int mmc_test_verify_write(struct mmc_test_card *test)
1021{
1022	int ret;
1023	struct scatterlist sg;
1024
1025	sg_init_one(&sg, test->buffer, 512);
1026
1027	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1028	if (ret)
1029		return ret;
1030
1031	return 0;
1032}
1033
1034static int mmc_test_verify_read(struct mmc_test_card *test)
1035{
1036	int ret;
1037	struct scatterlist sg;
1038
1039	sg_init_one(&sg, test->buffer, 512);
1040
1041	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1042	if (ret)
1043		return ret;
1044
1045	return 0;
1046}
1047
1048static int mmc_test_multi_write(struct mmc_test_card *test)
1049{
1050	int ret;
1051	unsigned int size;
1052	struct scatterlist sg;
1053
1054	if (test->card->host->max_blk_count == 1)
1055		return RESULT_UNSUP_HOST;
1056
1057	size = PAGE_SIZE * 2;
1058	size = min(size, test->card->host->max_req_size);
1059	size = min(size, test->card->host->max_seg_size);
1060	size = min(size, test->card->host->max_blk_count * 512);
1061
1062	if (size < 1024)
1063		return RESULT_UNSUP_HOST;
1064
1065	sg_init_one(&sg, test->buffer, size);
1066
1067	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1068	if (ret)
1069		return ret;
1070
1071	return 0;
1072}
1073
1074static int mmc_test_multi_read(struct mmc_test_card *test)
1075{
1076	int ret;
1077	unsigned int size;
1078	struct scatterlist sg;
1079
1080	if (test->card->host->max_blk_count == 1)
1081		return RESULT_UNSUP_HOST;
1082
1083	size = PAGE_SIZE * 2;
1084	size = min(size, test->card->host->max_req_size);
1085	size = min(size, test->card->host->max_seg_size);
1086	size = min(size, test->card->host->max_blk_count * 512);
1087
1088	if (size < 1024)
1089		return RESULT_UNSUP_HOST;
1090
1091	sg_init_one(&sg, test->buffer, size);
1092
1093	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1094	if (ret)
1095		return ret;
1096
1097	return 0;
1098}
1099
1100static int mmc_test_pow2_write(struct mmc_test_card *test)
1101{
1102	int ret, i;
1103	struct scatterlist sg;
1104
1105	if (!test->card->csd.write_partial)
1106		return RESULT_UNSUP_CARD;
1107
1108	for (i = 1; i < 512;i <<= 1) {
1109		sg_init_one(&sg, test->buffer, i);
1110		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1111		if (ret)
1112			return ret;
1113	}
1114
1115	return 0;
1116}
1117
1118static int mmc_test_pow2_read(struct mmc_test_card *test)
1119{
1120	int ret, i;
1121	struct scatterlist sg;
1122
1123	if (!test->card->csd.read_partial)
1124		return RESULT_UNSUP_CARD;
1125
1126	for (i = 1; i < 512;i <<= 1) {
1127		sg_init_one(&sg, test->buffer, i);
1128		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1129		if (ret)
1130			return ret;
1131	}
1132
1133	return 0;
1134}
1135
1136static int mmc_test_weird_write(struct mmc_test_card *test)
1137{
1138	int ret, i;
1139	struct scatterlist sg;
1140
1141	if (!test->card->csd.write_partial)
1142		return RESULT_UNSUP_CARD;
1143
1144	for (i = 3; i < 512;i += 7) {
1145		sg_init_one(&sg, test->buffer, i);
1146		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1147		if (ret)
1148			return ret;
1149	}
1150
1151	return 0;
1152}
1153
1154static int mmc_test_weird_read(struct mmc_test_card *test)
1155{
1156	int ret, i;
1157	struct scatterlist sg;
1158
1159	if (!test->card->csd.read_partial)
1160		return RESULT_UNSUP_CARD;
1161
1162	for (i = 3; i < 512;i += 7) {
1163		sg_init_one(&sg, test->buffer, i);
1164		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1165		if (ret)
1166			return ret;
1167	}
1168
1169	return 0;
1170}
1171
1172static int mmc_test_align_write(struct mmc_test_card *test)
1173{
1174	int ret, i;
1175	struct scatterlist sg;
1176
1177	for (i = 1;i < 4;i++) {
1178		sg_init_one(&sg, test->buffer + i, 512);
1179		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1180		if (ret)
1181			return ret;
1182	}
1183
1184	return 0;
1185}
1186
1187static int mmc_test_align_read(struct mmc_test_card *test)
1188{
1189	int ret, i;
1190	struct scatterlist sg;
1191
1192	for (i = 1;i < 4;i++) {
1193		sg_init_one(&sg, test->buffer + i, 512);
1194		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1195		if (ret)
1196			return ret;
1197	}
1198
1199	return 0;
1200}
1201
1202static int mmc_test_align_multi_write(struct mmc_test_card *test)
1203{
1204	int ret, i;
1205	unsigned int size;
1206	struct scatterlist sg;
1207
1208	if (test->card->host->max_blk_count == 1)
1209		return RESULT_UNSUP_HOST;
1210
1211	size = PAGE_SIZE * 2;
1212	size = min(size, test->card->host->max_req_size);
1213	size = min(size, test->card->host->max_seg_size);
1214	size = min(size, test->card->host->max_blk_count * 512);
1215
1216	if (size < 1024)
1217		return RESULT_UNSUP_HOST;
1218
1219	for (i = 1;i < 4;i++) {
1220		sg_init_one(&sg, test->buffer + i, size);
1221		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1222		if (ret)
1223			return ret;
1224	}
1225
1226	return 0;
1227}
1228
1229static int mmc_test_align_multi_read(struct mmc_test_card *test)
1230{
1231	int ret, i;
1232	unsigned int size;
1233	struct scatterlist sg;
1234
1235	if (test->card->host->max_blk_count == 1)
1236		return RESULT_UNSUP_HOST;
1237
1238	size = PAGE_SIZE * 2;
1239	size = min(size, test->card->host->max_req_size);
1240	size = min(size, test->card->host->max_seg_size);
1241	size = min(size, test->card->host->max_blk_count * 512);
1242
1243	if (size < 1024)
1244		return RESULT_UNSUP_HOST;
1245
1246	for (i = 1;i < 4;i++) {
1247		sg_init_one(&sg, test->buffer + i, size);
1248		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1249		if (ret)
1250			return ret;
1251	}
1252
1253	return 0;
1254}
1255
1256static int mmc_test_xfersize_write(struct mmc_test_card *test)
1257{
1258	int ret;
1259
1260	ret = mmc_test_set_blksize(test, 512);
1261	if (ret)
1262		return ret;
1263
1264	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1265	if (ret)
1266		return ret;
1267
1268	return 0;
1269}
1270
1271static int mmc_test_xfersize_read(struct mmc_test_card *test)
1272{
1273	int ret;
1274
1275	ret = mmc_test_set_blksize(test, 512);
1276	if (ret)
1277		return ret;
1278
1279	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1280	if (ret)
1281		return ret;
1282
1283	return 0;
1284}
1285
1286static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1287{
1288	int ret;
1289
1290	if (test->card->host->max_blk_count == 1)
1291		return RESULT_UNSUP_HOST;
1292
1293	ret = mmc_test_set_blksize(test, 512);
1294	if (ret)
1295		return ret;
1296
1297	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1298	if (ret)
1299		return ret;
1300
1301	return 0;
1302}
1303
1304static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1305{
1306	int ret;
1307
1308	if (test->card->host->max_blk_count == 1)
1309		return RESULT_UNSUP_HOST;
1310
1311	ret = mmc_test_set_blksize(test, 512);
1312	if (ret)
1313		return ret;
1314
1315	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1316	if (ret)
1317		return ret;
1318
1319	return 0;
1320}
1321
1322#ifdef CONFIG_HIGHMEM
1323
1324static int mmc_test_write_high(struct mmc_test_card *test)
1325{
1326	int ret;
1327	struct scatterlist sg;
1328
1329	sg_init_table(&sg, 1);
1330	sg_set_page(&sg, test->highmem, 512, 0);
1331
1332	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1333	if (ret)
1334		return ret;
1335
1336	return 0;
1337}
1338
1339static int mmc_test_read_high(struct mmc_test_card *test)
1340{
1341	int ret;
1342	struct scatterlist sg;
1343
1344	sg_init_table(&sg, 1);
1345	sg_set_page(&sg, test->highmem, 512, 0);
1346
1347	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1348	if (ret)
1349		return ret;
1350
1351	return 0;
1352}
1353
1354static int mmc_test_multi_write_high(struct mmc_test_card *test)
1355{
1356	int ret;
1357	unsigned int size;
1358	struct scatterlist sg;
1359
1360	if (test->card->host->max_blk_count == 1)
1361		return RESULT_UNSUP_HOST;
1362
1363	size = PAGE_SIZE * 2;
1364	size = min(size, test->card->host->max_req_size);
1365	size = min(size, test->card->host->max_seg_size);
1366	size = min(size, test->card->host->max_blk_count * 512);
1367
1368	if (size < 1024)
1369		return RESULT_UNSUP_HOST;
1370
1371	sg_init_table(&sg, 1);
1372	sg_set_page(&sg, test->highmem, size, 0);
1373
1374	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1375	if (ret)
1376		return ret;
1377
1378	return 0;
1379}
1380
1381static int mmc_test_multi_read_high(struct mmc_test_card *test)
1382{
1383	int ret;
1384	unsigned int size;
1385	struct scatterlist sg;
1386
1387	if (test->card->host->max_blk_count == 1)
1388		return RESULT_UNSUP_HOST;
1389
1390	size = PAGE_SIZE * 2;
1391	size = min(size, test->card->host->max_req_size);
1392	size = min(size, test->card->host->max_seg_size);
1393	size = min(size, test->card->host->max_blk_count * 512);
1394
1395	if (size < 1024)
1396		return RESULT_UNSUP_HOST;
1397
1398	sg_init_table(&sg, 1);
1399	sg_set_page(&sg, test->highmem, size, 0);
1400
1401	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1402	if (ret)
1403		return ret;
1404
1405	return 0;
1406}
1407
1408#else
1409
1410static int mmc_test_no_highmem(struct mmc_test_card *test)
1411{
1412	pr_info("%s: Highmem not configured - test skipped\n",
1413	       mmc_hostname(test->card->host));
1414	return 0;
1415}
1416
1417#endif /* CONFIG_HIGHMEM */
1418
1419/*
1420 * Map sz bytes so that it can be transferred.
1421 */
1422static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1423			     int max_scatter, int min_sg_len)
1424{
1425	struct mmc_test_area *t = &test->area;
1426	int err;
1427
1428	t->blocks = sz >> 9;
1429
1430	if (max_scatter) {
1431		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1432						  t->max_segs, t->max_seg_sz,
1433				       &t->sg_len);
1434	} else {
1435		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1436				      t->max_seg_sz, &t->sg_len, min_sg_len);
1437	}
1438	if (err)
1439		pr_info("%s: Failed to map sg list\n",
1440		       mmc_hostname(test->card->host));
1441	return err;
1442}
1443
1444/*
1445 * Transfer bytes mapped by mmc_test_area_map().
1446 */
1447static int mmc_test_area_transfer(struct mmc_test_card *test,
1448				  unsigned int dev_addr, int write)
1449{
1450	struct mmc_test_area *t = &test->area;
1451
1452	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1453					t->blocks, 512, write);
1454}
1455
1456/*
1457 * Map and transfer bytes for multiple transfers.
1458 */
1459static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1460				unsigned int dev_addr, int write,
1461				int max_scatter, int timed, int count,
1462				bool nonblock, int min_sg_len)
1463{
1464	struct timespec ts1, ts2;
1465	int ret = 0;
1466	int i;
1467	struct mmc_test_area *t = &test->area;
1468
1469	/*
1470	 * In the case of a maximally scattered transfer, the maximum transfer
1471	 * size is further limited by using PAGE_SIZE segments.
1472	 */
1473	if (max_scatter) {
1474		struct mmc_test_area *t = &test->area;
1475		unsigned long max_tfr;
1476
1477		if (t->max_seg_sz >= PAGE_SIZE)
1478			max_tfr = t->max_segs * PAGE_SIZE;
1479		else
1480			max_tfr = t->max_segs * t->max_seg_sz;
1481		if (sz > max_tfr)
1482			sz = max_tfr;
1483	}
1484
1485	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1486	if (ret)
1487		return ret;
1488
1489	if (timed)
1490		getnstimeofday(&ts1);
1491	if (nonblock)
1492		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1493				 dev_addr, t->blocks, 512, write, count);
1494	else
1495		for (i = 0; i < count && ret == 0; i++) {
1496			ret = mmc_test_area_transfer(test, dev_addr, write);
1497			dev_addr += sz >> 9;
1498		}
1499
1500	if (ret)
1501		return ret;
1502
1503	if (timed)
1504		getnstimeofday(&ts2);
1505
1506	if (timed)
1507		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1508
1509	return 0;
1510}
1511
1512static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1513			    unsigned int dev_addr, int write, int max_scatter,
1514			    int timed)
1515{
1516	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1517				    timed, 1, false, 0);
1518}
1519
1520/*
1521 * Write the test area entirely.
1522 */
1523static int mmc_test_area_fill(struct mmc_test_card *test)
1524{
1525	struct mmc_test_area *t = &test->area;
1526
1527	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1528}
1529
1530/*
1531 * Erase the test area entirely.
1532 */
1533static int mmc_test_area_erase(struct mmc_test_card *test)
1534{
1535	struct mmc_test_area *t = &test->area;
1536
1537	if (!mmc_can_erase(test->card))
1538		return 0;
1539
1540	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1541			 MMC_ERASE_ARG);
1542}
1543
1544/*
1545 * Cleanup struct mmc_test_area.
1546 */
1547static int mmc_test_area_cleanup(struct mmc_test_card *test)
1548{
1549	struct mmc_test_area *t = &test->area;
1550
1551	kfree(t->sg);
1552	mmc_test_free_mem(t->mem);
1553
1554	return 0;
1555}
1556
1557/*
1558 * Initialize an area for testing large transfers.  The test area is set to the
1559 * middle of the card because cards may have different charateristics at the
1560 * front (for FAT file system optimization).  Optionally, the area is erased
1561 * (if the card supports it) which may improve write performance.  Optionally,
1562 * the area is filled with data for subsequent read tests.
1563 */
1564static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1565{
1566	struct mmc_test_area *t = &test->area;
1567	unsigned long min_sz = 64 * 1024, sz;
1568	int ret;
1569
1570	ret = mmc_test_set_blksize(test, 512);
1571	if (ret)
1572		return ret;
1573
1574	/* Make the test area size about 4MiB */
1575	sz = (unsigned long)test->card->pref_erase << 9;
1576	t->max_sz = sz;
1577	while (t->max_sz < 4 * 1024 * 1024)
1578		t->max_sz += sz;
1579	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1580		t->max_sz -= sz;
1581
1582	t->max_segs = test->card->host->max_segs;
1583	t->max_seg_sz = test->card->host->max_seg_size;
1584	t->max_seg_sz -= t->max_seg_sz % 512;
1585
1586	t->max_tfr = t->max_sz;
1587	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1588		t->max_tfr = test->card->host->max_blk_count << 9;
1589	if (t->max_tfr > test->card->host->max_req_size)
1590		t->max_tfr = test->card->host->max_req_size;
1591	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1592		t->max_tfr = t->max_segs * t->max_seg_sz;
1593
1594	/*
1595	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1596	 * because the same memory can be mapped into the scatterlist more than
1597	 * once.  Also, take into account the limits imposed on scatterlist
1598	 * segments by the host driver.
1599	 */
1600	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1601				    t->max_seg_sz);
1602	if (!t->mem)
1603		return -ENOMEM;
1604
1605	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1606	if (!t->sg) {
1607		ret = -ENOMEM;
1608		goto out_free;
1609	}
1610
1611	t->dev_addr = mmc_test_capacity(test->card) / 2;
1612	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1613
1614	if (erase) {
1615		ret = mmc_test_area_erase(test);
1616		if (ret)
1617			goto out_free;
1618	}
1619
1620	if (fill) {
1621		ret = mmc_test_area_fill(test);
1622		if (ret)
1623			goto out_free;
1624	}
1625
1626	return 0;
1627
1628out_free:
1629	mmc_test_area_cleanup(test);
1630	return ret;
1631}
1632
1633/*
1634 * Prepare for large transfers.  Do not erase the test area.
1635 */
1636static int mmc_test_area_prepare(struct mmc_test_card *test)
1637{
1638	return mmc_test_area_init(test, 0, 0);
1639}
1640
1641/*
1642 * Prepare for large transfers.  Do erase the test area.
1643 */
1644static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1645{
1646	return mmc_test_area_init(test, 1, 0);
1647}
1648
1649/*
1650 * Prepare for large transfers.  Erase and fill the test area.
1651 */
1652static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1653{
1654	return mmc_test_area_init(test, 1, 1);
1655}
1656
1657/*
1658 * Test best-case performance.  Best-case performance is expected from
1659 * a single large transfer.
1660 *
1661 * An additional option (max_scatter) allows the measurement of the same
1662 * transfer but with no contiguous pages in the scatter list.  This tests
1663 * the efficiency of DMA to handle scattered pages.
1664 */
1665static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1666				     int max_scatter)
1667{
1668	struct mmc_test_area *t = &test->area;
1669
1670	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1671				max_scatter, 1);
1672}
1673
1674/*
1675 * Best-case read performance.
1676 */
1677static int mmc_test_best_read_performance(struct mmc_test_card *test)
1678{
1679	return mmc_test_best_performance(test, 0, 0);
1680}
1681
1682/*
1683 * Best-case write performance.
1684 */
1685static int mmc_test_best_write_performance(struct mmc_test_card *test)
1686{
1687	return mmc_test_best_performance(test, 1, 0);
1688}
1689
1690/*
1691 * Best-case read performance into scattered pages.
1692 */
1693static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1694{
1695	return mmc_test_best_performance(test, 0, 1);
1696}
1697
1698/*
1699 * Best-case write performance from scattered pages.
1700 */
1701static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1702{
1703	return mmc_test_best_performance(test, 1, 1);
1704}
1705
1706/*
1707 * Single read performance by transfer size.
1708 */
1709static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1710{
1711	struct mmc_test_area *t = &test->area;
1712	unsigned long sz;
1713	unsigned int dev_addr;
1714	int ret;
1715
1716	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1717		dev_addr = t->dev_addr + (sz >> 9);
1718		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1719		if (ret)
1720			return ret;
1721	}
1722	sz = t->max_tfr;
1723	dev_addr = t->dev_addr;
1724	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1725}
1726
1727/*
1728 * Single write performance by transfer size.
1729 */
1730static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1731{
1732	struct mmc_test_area *t = &test->area;
1733	unsigned long sz;
1734	unsigned int dev_addr;
1735	int ret;
1736
1737	ret = mmc_test_area_erase(test);
1738	if (ret)
1739		return ret;
1740	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1741		dev_addr = t->dev_addr + (sz >> 9);
1742		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1743		if (ret)
1744			return ret;
1745	}
1746	ret = mmc_test_area_erase(test);
1747	if (ret)
1748		return ret;
1749	sz = t->max_tfr;
1750	dev_addr = t->dev_addr;
1751	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1752}
1753
1754/*
1755 * Single trim performance by transfer size.
1756 */
1757static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1758{
1759	struct mmc_test_area *t = &test->area;
1760	unsigned long sz;
1761	unsigned int dev_addr;
1762	struct timespec ts1, ts2;
1763	int ret;
1764
1765	if (!mmc_can_trim(test->card))
1766		return RESULT_UNSUP_CARD;
1767
1768	if (!mmc_can_erase(test->card))
1769		return RESULT_UNSUP_HOST;
1770
1771	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1772		dev_addr = t->dev_addr + (sz >> 9);
1773		getnstimeofday(&ts1);
1774		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1775		if (ret)
1776			return ret;
1777		getnstimeofday(&ts2);
1778		mmc_test_print_rate(test, sz, &ts1, &ts2);
1779	}
1780	dev_addr = t->dev_addr;
1781	getnstimeofday(&ts1);
1782	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1783	if (ret)
1784		return ret;
1785	getnstimeofday(&ts2);
1786	mmc_test_print_rate(test, sz, &ts1, &ts2);
1787	return 0;
1788}
1789
1790static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1791{
1792	struct mmc_test_area *t = &test->area;
1793	unsigned int dev_addr, i, cnt;
1794	struct timespec ts1, ts2;
1795	int ret;
1796
1797	cnt = t->max_sz / sz;
1798	dev_addr = t->dev_addr;
1799	getnstimeofday(&ts1);
1800	for (i = 0; i < cnt; i++) {
1801		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1802		if (ret)
1803			return ret;
1804		dev_addr += (sz >> 9);
1805	}
1806	getnstimeofday(&ts2);
1807	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1808	return 0;
1809}
1810
1811/*
1812 * Consecutive read performance by transfer size.
1813 */
1814static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1815{
1816	struct mmc_test_area *t = &test->area;
1817	unsigned long sz;
1818	int ret;
1819
1820	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1821		ret = mmc_test_seq_read_perf(test, sz);
1822		if (ret)
1823			return ret;
1824	}
1825	sz = t->max_tfr;
1826	return mmc_test_seq_read_perf(test, sz);
1827}
1828
1829static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1830{
1831	struct mmc_test_area *t = &test->area;
1832	unsigned int dev_addr, i, cnt;
1833	struct timespec ts1, ts2;
1834	int ret;
1835
1836	ret = mmc_test_area_erase(test);
1837	if (ret)
1838		return ret;
1839	cnt = t->max_sz / sz;
1840	dev_addr = t->dev_addr;
1841	getnstimeofday(&ts1);
1842	for (i = 0; i < cnt; i++) {
1843		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1844		if (ret)
1845			return ret;
1846		dev_addr += (sz >> 9);
1847	}
1848	getnstimeofday(&ts2);
1849	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1850	return 0;
1851}
1852
1853/*
1854 * Consecutive write performance by transfer size.
1855 */
1856static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1857{
1858	struct mmc_test_area *t = &test->area;
1859	unsigned long sz;
1860	int ret;
1861
1862	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1863		ret = mmc_test_seq_write_perf(test, sz);
1864		if (ret)
1865			return ret;
1866	}
1867	sz = t->max_tfr;
1868	return mmc_test_seq_write_perf(test, sz);
1869}
1870
1871/*
1872 * Consecutive trim performance by transfer size.
1873 */
1874static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1875{
1876	struct mmc_test_area *t = &test->area;
1877	unsigned long sz;
1878	unsigned int dev_addr, i, cnt;
1879	struct timespec ts1, ts2;
1880	int ret;
1881
1882	if (!mmc_can_trim(test->card))
1883		return RESULT_UNSUP_CARD;
1884
1885	if (!mmc_can_erase(test->card))
1886		return RESULT_UNSUP_HOST;
1887
1888	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1889		ret = mmc_test_area_erase(test);
1890		if (ret)
1891			return ret;
1892		ret = mmc_test_area_fill(test);
1893		if (ret)
1894			return ret;
1895		cnt = t->max_sz / sz;
1896		dev_addr = t->dev_addr;
1897		getnstimeofday(&ts1);
1898		for (i = 0; i < cnt; i++) {
1899			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1900					MMC_TRIM_ARG);
1901			if (ret)
1902				return ret;
1903			dev_addr += (sz >> 9);
1904		}
1905		getnstimeofday(&ts2);
1906		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1907	}
1908	return 0;
1909}
1910
1911static unsigned int rnd_next = 1;
1912
1913static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1914{
1915	uint64_t r;
1916
1917	rnd_next = rnd_next * 1103515245 + 12345;
1918	r = (rnd_next >> 16) & 0x7fff;
1919	return (r * rnd_cnt) >> 15;
1920}
1921
1922static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1923			     unsigned long sz)
1924{
1925	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1926	unsigned int ssz;
1927	struct timespec ts1, ts2, ts;
1928	int ret;
1929
1930	ssz = sz >> 9;
1931
1932	rnd_addr = mmc_test_capacity(test->card) / 4;
1933	range1 = rnd_addr / test->card->pref_erase;
1934	range2 = range1 / ssz;
1935
1936	getnstimeofday(&ts1);
1937	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1938		getnstimeofday(&ts2);
1939		ts = timespec_sub(ts2, ts1);
1940		if (ts.tv_sec >= 10)
1941			break;
1942		ea = mmc_test_rnd_num(range1);
1943		if (ea == last_ea)
1944			ea -= 1;
1945		last_ea = ea;
1946		dev_addr = rnd_addr + test->card->pref_erase * ea +
1947			   ssz * mmc_test_rnd_num(range2);
1948		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1949		if (ret)
1950			return ret;
1951	}
1952	if (print)
1953		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1954	return 0;
1955}
1956
1957static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1958{
1959	struct mmc_test_area *t = &test->area;
1960	unsigned int next;
1961	unsigned long sz;
1962	int ret;
1963
1964	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1965		/*
1966		 * When writing, try to get more consistent results by running
1967		 * the test twice with exactly the same I/O but outputting the
1968		 * results only for the 2nd run.
1969		 */
1970		if (write) {
1971			next = rnd_next;
1972			ret = mmc_test_rnd_perf(test, write, 0, sz);
1973			if (ret)
1974				return ret;
1975			rnd_next = next;
1976		}
1977		ret = mmc_test_rnd_perf(test, write, 1, sz);
1978		if (ret)
1979			return ret;
1980	}
1981	sz = t->max_tfr;
1982	if (write) {
1983		next = rnd_next;
1984		ret = mmc_test_rnd_perf(test, write, 0, sz);
1985		if (ret)
1986			return ret;
1987		rnd_next = next;
1988	}
1989	return mmc_test_rnd_perf(test, write, 1, sz);
1990}
1991
1992/*
1993 * Random read performance by transfer size.
1994 */
1995static int mmc_test_random_read_perf(struct mmc_test_card *test)
1996{
1997	return mmc_test_random_perf(test, 0);
1998}
1999
2000/*
2001 * Random write performance by transfer size.
2002 */
2003static int mmc_test_random_write_perf(struct mmc_test_card *test)
2004{
2005	return mmc_test_random_perf(test, 1);
2006}
2007
2008static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2009			     unsigned int tot_sz, int max_scatter)
2010{
2011	struct mmc_test_area *t = &test->area;
2012	unsigned int dev_addr, i, cnt, sz, ssz;
2013	struct timespec ts1, ts2;
2014	int ret;
2015
2016	sz = t->max_tfr;
2017
2018	/*
2019	 * In the case of a maximally scattered transfer, the maximum transfer
2020	 * size is further limited by using PAGE_SIZE segments.
2021	 */
2022	if (max_scatter) {
2023		unsigned long max_tfr;
2024
2025		if (t->max_seg_sz >= PAGE_SIZE)
2026			max_tfr = t->max_segs * PAGE_SIZE;
2027		else
2028			max_tfr = t->max_segs * t->max_seg_sz;
2029		if (sz > max_tfr)
2030			sz = max_tfr;
2031	}
2032
2033	ssz = sz >> 9;
2034	dev_addr = mmc_test_capacity(test->card) / 4;
2035	if (tot_sz > dev_addr << 9)
2036		tot_sz = dev_addr << 9;
2037	cnt = tot_sz / sz;
2038	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2039
2040	getnstimeofday(&ts1);
2041	for (i = 0; i < cnt; i++) {
2042		ret = mmc_test_area_io(test, sz, dev_addr, write,
2043				       max_scatter, 0);
2044		if (ret)
2045			return ret;
2046		dev_addr += ssz;
2047	}
2048	getnstimeofday(&ts2);
2049
2050	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2051
2052	return 0;
2053}
2054
2055static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2056{
2057	int ret, i;
2058
2059	for (i = 0; i < 10; i++) {
2060		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2061		if (ret)
2062			return ret;
2063	}
2064	for (i = 0; i < 5; i++) {
2065		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2066		if (ret)
2067			return ret;
2068	}
2069	for (i = 0; i < 3; i++) {
2070		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2071		if (ret)
2072			return ret;
2073	}
2074
2075	return ret;
2076}
2077
2078/*
2079 * Large sequential read performance.
2080 */
2081static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2082{
2083	return mmc_test_large_seq_perf(test, 0);
2084}
2085
2086/*
2087 * Large sequential write performance.
2088 */
2089static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2090{
2091	return mmc_test_large_seq_perf(test, 1);
2092}
2093
2094static int mmc_test_rw_multiple(struct mmc_test_card *test,
2095				struct mmc_test_multiple_rw *tdata,
2096				unsigned int reqsize, unsigned int size,
2097				int min_sg_len)
2098{
2099	unsigned int dev_addr;
2100	struct mmc_test_area *t = &test->area;
2101	int ret = 0;
2102
2103	/* Set up test area */
2104	if (size > mmc_test_capacity(test->card) / 2 * 512)
2105		size = mmc_test_capacity(test->card) / 2 * 512;
2106	if (reqsize > t->max_tfr)
2107		reqsize = t->max_tfr;
2108	dev_addr = mmc_test_capacity(test->card) / 4;
2109	if ((dev_addr & 0xffff0000))
2110		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2111	else
2112		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2113	if (!dev_addr)
2114		goto err;
2115
2116	if (reqsize > size)
2117		return 0;
2118
2119	/* prepare test area */
2120	if (mmc_can_erase(test->card) &&
2121	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2122		ret = mmc_erase(test->card, dev_addr,
2123				size / 512, MMC_SECURE_ERASE_ARG);
2124		if (ret)
2125			ret = mmc_erase(test->card, dev_addr,
2126					size / 512, MMC_ERASE_ARG);
2127		if (ret)
2128			goto err;
2129	}
2130
2131	/* Run test */
2132	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2133				   tdata->do_write, 0, 1, size / reqsize,
2134				   tdata->do_nonblock_req, min_sg_len);
2135	if (ret)
2136		goto err;
2137
2138	return ret;
2139 err:
2140	pr_info("[%s] error\n", __func__);
2141	return ret;
2142}
2143
2144static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2145				     struct mmc_test_multiple_rw *rw)
2146{
2147	int ret = 0;
2148	int i;
2149	void *pre_req = test->card->host->ops->pre_req;
2150	void *post_req = test->card->host->ops->post_req;
2151
2152	if (rw->do_nonblock_req &&
2153	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2154		pr_info("error: only one of pre/post is defined\n");
2155		return -EINVAL;
2156	}
2157
2158	for (i = 0 ; i < rw->len && ret == 0; i++) {
2159		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2160		if (ret)
2161			break;
2162	}
2163	return ret;
2164}
2165
2166static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2167				       struct mmc_test_multiple_rw *rw)
2168{
2169	int ret = 0;
2170	int i;
2171
2172	for (i = 0 ; i < rw->len && ret == 0; i++) {
2173		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2174					   rw->sg_len[i]);
2175		if (ret)
2176			break;
2177	}
2178	return ret;
2179}
2180
2181/*
2182 * Multiple blocking write 4k to 4 MB chunks
2183 */
2184static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2185{
2186	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2187			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2188	struct mmc_test_multiple_rw test_data = {
2189		.bs = bs,
2190		.size = TEST_AREA_MAX_SIZE,
2191		.len = ARRAY_SIZE(bs),
2192		.do_write = true,
2193		.do_nonblock_req = false,
2194		.prepare = MMC_TEST_PREP_ERASE,
2195	};
2196
2197	return mmc_test_rw_multiple_size(test, &test_data);
2198};
2199
2200/*
2201 * Multiple non-blocking write 4k to 4 MB chunks
2202 */
2203static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2204{
2205	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2206			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2207	struct mmc_test_multiple_rw test_data = {
2208		.bs = bs,
2209		.size = TEST_AREA_MAX_SIZE,
2210		.len = ARRAY_SIZE(bs),
2211		.do_write = true,
2212		.do_nonblock_req = true,
2213		.prepare = MMC_TEST_PREP_ERASE,
2214	};
2215
2216	return mmc_test_rw_multiple_size(test, &test_data);
2217}
2218
2219/*
2220 * Multiple blocking read 4k to 4 MB chunks
2221 */
2222static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2223{
2224	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2225			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2226	struct mmc_test_multiple_rw test_data = {
2227		.bs = bs,
2228		.size = TEST_AREA_MAX_SIZE,
2229		.len = ARRAY_SIZE(bs),
2230		.do_write = false,
2231		.do_nonblock_req = false,
2232		.prepare = MMC_TEST_PREP_NONE,
2233	};
2234
2235	return mmc_test_rw_multiple_size(test, &test_data);
2236}
2237
2238/*
2239 * Multiple non-blocking read 4k to 4 MB chunks
2240 */
2241static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2242{
2243	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2244			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2245	struct mmc_test_multiple_rw test_data = {
2246		.bs = bs,
2247		.size = TEST_AREA_MAX_SIZE,
2248		.len = ARRAY_SIZE(bs),
2249		.do_write = false,
2250		.do_nonblock_req = true,
2251		.prepare = MMC_TEST_PREP_NONE,
2252	};
2253
2254	return mmc_test_rw_multiple_size(test, &test_data);
2255}
2256
2257/*
2258 * Multiple blocking write 1 to 512 sg elements
2259 */
2260static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2261{
2262	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2263				 1 << 7, 1 << 8, 1 << 9};
2264	struct mmc_test_multiple_rw test_data = {
2265		.sg_len = sg_len,
2266		.size = TEST_AREA_MAX_SIZE,
2267		.len = ARRAY_SIZE(sg_len),
2268		.do_write = true,
2269		.do_nonblock_req = false,
2270		.prepare = MMC_TEST_PREP_ERASE,
2271	};
2272
2273	return mmc_test_rw_multiple_sg_len(test, &test_data);
2274};
2275
2276/*
2277 * Multiple non-blocking write 1 to 512 sg elements
2278 */
2279static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2280{
2281	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2282				 1 << 7, 1 << 8, 1 << 9};
2283	struct mmc_test_multiple_rw test_data = {
2284		.sg_len = sg_len,
2285		.size = TEST_AREA_MAX_SIZE,
2286		.len = ARRAY_SIZE(sg_len),
2287		.do_write = true,
2288		.do_nonblock_req = true,
2289		.prepare = MMC_TEST_PREP_ERASE,
2290	};
2291
2292	return mmc_test_rw_multiple_sg_len(test, &test_data);
2293}
2294
2295/*
2296 * Multiple blocking read 1 to 512 sg elements
2297 */
2298static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2299{
2300	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2301				 1 << 7, 1 << 8, 1 << 9};
2302	struct mmc_test_multiple_rw test_data = {
2303		.sg_len = sg_len,
2304		.size = TEST_AREA_MAX_SIZE,
2305		.len = ARRAY_SIZE(sg_len),
2306		.do_write = false,
2307		.do_nonblock_req = false,
2308		.prepare = MMC_TEST_PREP_NONE,
2309	};
2310
2311	return mmc_test_rw_multiple_sg_len(test, &test_data);
2312}
2313
2314/*
2315 * Multiple non-blocking read 1 to 512 sg elements
2316 */
2317static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2318{
2319	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2320				 1 << 7, 1 << 8, 1 << 9};
2321	struct mmc_test_multiple_rw test_data = {
2322		.sg_len = sg_len,
2323		.size = TEST_AREA_MAX_SIZE,
2324		.len = ARRAY_SIZE(sg_len),
2325		.do_write = false,
2326		.do_nonblock_req = true,
2327		.prepare = MMC_TEST_PREP_NONE,
2328	};
2329
2330	return mmc_test_rw_multiple_sg_len(test, &test_data);
2331}
2332
2333/*
2334 * eMMC hardware reset.
2335 */
2336static int mmc_test_hw_reset(struct mmc_test_card *test)
2337{
2338	struct mmc_card *card = test->card;
2339	struct mmc_host *host = card->host;
2340	int err;
2341
2342	err = mmc_hw_reset_check(host);
2343	if (!err)
2344		return RESULT_OK;
2345
2346	if (err == -ENOSYS)
2347		return RESULT_FAIL;
2348
2349	if (err != -EOPNOTSUPP)
2350		return err;
2351
2352	if (!mmc_can_reset(card))
2353		return RESULT_UNSUP_CARD;
2354
2355	return RESULT_UNSUP_HOST;
2356}
2357
2358static const struct mmc_test_case mmc_test_cases[] = {
2359	{
2360		.name = "Basic write (no data verification)",
2361		.run = mmc_test_basic_write,
2362	},
2363
2364	{
2365		.name = "Basic read (no data verification)",
2366		.run = mmc_test_basic_read,
2367	},
2368
2369	{
2370		.name = "Basic write (with data verification)",
2371		.prepare = mmc_test_prepare_write,
2372		.run = mmc_test_verify_write,
2373		.cleanup = mmc_test_cleanup,
2374	},
2375
2376	{
2377		.name = "Basic read (with data verification)",
2378		.prepare = mmc_test_prepare_read,
2379		.run = mmc_test_verify_read,
2380		.cleanup = mmc_test_cleanup,
2381	},
2382
2383	{
2384		.name = "Multi-block write",
2385		.prepare = mmc_test_prepare_write,
2386		.run = mmc_test_multi_write,
2387		.cleanup = mmc_test_cleanup,
2388	},
2389
2390	{
2391		.name = "Multi-block read",
2392		.prepare = mmc_test_prepare_read,
2393		.run = mmc_test_multi_read,
2394		.cleanup = mmc_test_cleanup,
2395	},
2396
2397	{
2398		.name = "Power of two block writes",
2399		.prepare = mmc_test_prepare_write,
2400		.run = mmc_test_pow2_write,
2401		.cleanup = mmc_test_cleanup,
2402	},
2403
2404	{
2405		.name = "Power of two block reads",
2406		.prepare = mmc_test_prepare_read,
2407		.run = mmc_test_pow2_read,
2408		.cleanup = mmc_test_cleanup,
2409	},
2410
2411	{
2412		.name = "Weird sized block writes",
2413		.prepare = mmc_test_prepare_write,
2414		.run = mmc_test_weird_write,
2415		.cleanup = mmc_test_cleanup,
2416	},
2417
2418	{
2419		.name = "Weird sized block reads",
2420		.prepare = mmc_test_prepare_read,
2421		.run = mmc_test_weird_read,
2422		.cleanup = mmc_test_cleanup,
2423	},
2424
2425	{
2426		.name = "Badly aligned write",
2427		.prepare = mmc_test_prepare_write,
2428		.run = mmc_test_align_write,
2429		.cleanup = mmc_test_cleanup,
2430	},
2431
2432	{
2433		.name = "Badly aligned read",
2434		.prepare = mmc_test_prepare_read,
2435		.run = mmc_test_align_read,
2436		.cleanup = mmc_test_cleanup,
2437	},
2438
2439	{
2440		.name = "Badly aligned multi-block write",
2441		.prepare = mmc_test_prepare_write,
2442		.run = mmc_test_align_multi_write,
2443		.cleanup = mmc_test_cleanup,
2444	},
2445
2446	{
2447		.name = "Badly aligned multi-block read",
2448		.prepare = mmc_test_prepare_read,
2449		.run = mmc_test_align_multi_read,
2450		.cleanup = mmc_test_cleanup,
2451	},
2452
2453	{
2454		.name = "Correct xfer_size at write (start failure)",
2455		.run = mmc_test_xfersize_write,
2456	},
2457
2458	{
2459		.name = "Correct xfer_size at read (start failure)",
2460		.run = mmc_test_xfersize_read,
2461	},
2462
2463	{
2464		.name = "Correct xfer_size at write (midway failure)",
2465		.run = mmc_test_multi_xfersize_write,
2466	},
2467
2468	{
2469		.name = "Correct xfer_size at read (midway failure)",
2470		.run = mmc_test_multi_xfersize_read,
2471	},
2472
2473#ifdef CONFIG_HIGHMEM
2474
2475	{
2476		.name = "Highmem write",
2477		.prepare = mmc_test_prepare_write,
2478		.run = mmc_test_write_high,
2479		.cleanup = mmc_test_cleanup,
2480	},
2481
2482	{
2483		.name = "Highmem read",
2484		.prepare = mmc_test_prepare_read,
2485		.run = mmc_test_read_high,
2486		.cleanup = mmc_test_cleanup,
2487	},
2488
2489	{
2490		.name = "Multi-block highmem write",
2491		.prepare = mmc_test_prepare_write,
2492		.run = mmc_test_multi_write_high,
2493		.cleanup = mmc_test_cleanup,
2494	},
2495
2496	{
2497		.name = "Multi-block highmem read",
2498		.prepare = mmc_test_prepare_read,
2499		.run = mmc_test_multi_read_high,
2500		.cleanup = mmc_test_cleanup,
2501	},
2502
2503#else
2504
2505	{
2506		.name = "Highmem write",
2507		.run = mmc_test_no_highmem,
2508	},
2509
2510	{
2511		.name = "Highmem read",
2512		.run = mmc_test_no_highmem,
2513	},
2514
2515	{
2516		.name = "Multi-block highmem write",
2517		.run = mmc_test_no_highmem,
2518	},
2519
2520	{
2521		.name = "Multi-block highmem read",
2522		.run = mmc_test_no_highmem,
2523	},
2524
2525#endif /* CONFIG_HIGHMEM */
2526
2527	{
2528		.name = "Best-case read performance",
2529		.prepare = mmc_test_area_prepare_fill,
2530		.run = mmc_test_best_read_performance,
2531		.cleanup = mmc_test_area_cleanup,
2532	},
2533
2534	{
2535		.name = "Best-case write performance",
2536		.prepare = mmc_test_area_prepare_erase,
2537		.run = mmc_test_best_write_performance,
2538		.cleanup = mmc_test_area_cleanup,
2539	},
2540
2541	{
2542		.name = "Best-case read performance into scattered pages",
2543		.prepare = mmc_test_area_prepare_fill,
2544		.run = mmc_test_best_read_perf_max_scatter,
2545		.cleanup = mmc_test_area_cleanup,
2546	},
2547
2548	{
2549		.name = "Best-case write performance from scattered pages",
2550		.prepare = mmc_test_area_prepare_erase,
2551		.run = mmc_test_best_write_perf_max_scatter,
2552		.cleanup = mmc_test_area_cleanup,
2553	},
2554
2555	{
2556		.name = "Single read performance by transfer size",
2557		.prepare = mmc_test_area_prepare_fill,
2558		.run = mmc_test_profile_read_perf,
2559		.cleanup = mmc_test_area_cleanup,
2560	},
2561
2562	{
2563		.name = "Single write performance by transfer size",
2564		.prepare = mmc_test_area_prepare,
2565		.run = mmc_test_profile_write_perf,
2566		.cleanup = mmc_test_area_cleanup,
2567	},
2568
2569	{
2570		.name = "Single trim performance by transfer size",
2571		.prepare = mmc_test_area_prepare_fill,
2572		.run = mmc_test_profile_trim_perf,
2573		.cleanup = mmc_test_area_cleanup,
2574	},
2575
2576	{
2577		.name = "Consecutive read performance by transfer size",
2578		.prepare = mmc_test_area_prepare_fill,
2579		.run = mmc_test_profile_seq_read_perf,
2580		.cleanup = mmc_test_area_cleanup,
2581	},
2582
2583	{
2584		.name = "Consecutive write performance by transfer size",
2585		.prepare = mmc_test_area_prepare,
2586		.run = mmc_test_profile_seq_write_perf,
2587		.cleanup = mmc_test_area_cleanup,
2588	},
2589
2590	{
2591		.name = "Consecutive trim performance by transfer size",
2592		.prepare = mmc_test_area_prepare,
2593		.run = mmc_test_profile_seq_trim_perf,
2594		.cleanup = mmc_test_area_cleanup,
2595	},
2596
2597	{
2598		.name = "Random read performance by transfer size",
2599		.prepare = mmc_test_area_prepare,
2600		.run = mmc_test_random_read_perf,
2601		.cleanup = mmc_test_area_cleanup,
2602	},
2603
2604	{
2605		.name = "Random write performance by transfer size",
2606		.prepare = mmc_test_area_prepare,
2607		.run = mmc_test_random_write_perf,
2608		.cleanup = mmc_test_area_cleanup,
2609	},
2610
2611	{
2612		.name = "Large sequential read into scattered pages",
2613		.prepare = mmc_test_area_prepare,
2614		.run = mmc_test_large_seq_read_perf,
2615		.cleanup = mmc_test_area_cleanup,
2616	},
2617
2618	{
2619		.name = "Large sequential write from scattered pages",
2620		.prepare = mmc_test_area_prepare,
2621		.run = mmc_test_large_seq_write_perf,
2622		.cleanup = mmc_test_area_cleanup,
2623	},
2624
2625	{
2626		.name = "Write performance with blocking req 4k to 4MB",
2627		.prepare = mmc_test_area_prepare,
2628		.run = mmc_test_profile_mult_write_blocking_perf,
2629		.cleanup = mmc_test_area_cleanup,
2630	},
2631
2632	{
2633		.name = "Write performance with non-blocking req 4k to 4MB",
2634		.prepare = mmc_test_area_prepare,
2635		.run = mmc_test_profile_mult_write_nonblock_perf,
2636		.cleanup = mmc_test_area_cleanup,
2637	},
2638
2639	{
2640		.name = "Read performance with blocking req 4k to 4MB",
2641		.prepare = mmc_test_area_prepare,
2642		.run = mmc_test_profile_mult_read_blocking_perf,
2643		.cleanup = mmc_test_area_cleanup,
2644	},
2645
2646	{
2647		.name = "Read performance with non-blocking req 4k to 4MB",
2648		.prepare = mmc_test_area_prepare,
2649		.run = mmc_test_profile_mult_read_nonblock_perf,
2650		.cleanup = mmc_test_area_cleanup,
2651	},
2652
2653	{
2654		.name = "Write performance blocking req 1 to 512 sg elems",
2655		.prepare = mmc_test_area_prepare,
2656		.run = mmc_test_profile_sglen_wr_blocking_perf,
2657		.cleanup = mmc_test_area_cleanup,
2658	},
2659
2660	{
2661		.name = "Write performance non-blocking req 1 to 512 sg elems",
2662		.prepare = mmc_test_area_prepare,
2663		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2664		.cleanup = mmc_test_area_cleanup,
2665	},
2666
2667	{
2668		.name = "Read performance blocking req 1 to 512 sg elems",
2669		.prepare = mmc_test_area_prepare,
2670		.run = mmc_test_profile_sglen_r_blocking_perf,
2671		.cleanup = mmc_test_area_cleanup,
2672	},
2673
2674	{
2675		.name = "Read performance non-blocking req 1 to 512 sg elems",
2676		.prepare = mmc_test_area_prepare,
2677		.run = mmc_test_profile_sglen_r_nonblock_perf,
2678		.cleanup = mmc_test_area_cleanup,
2679	},
2680
2681	{
2682		.name = "eMMC hardware reset",
2683		.run = mmc_test_hw_reset,
2684	},
2685};
2686
2687static DEFINE_MUTEX(mmc_test_lock);
2688
2689static LIST_HEAD(mmc_test_result);
2690
2691static void mmc_test_run(struct mmc_test_card *test, int testcase)
2692{
2693	int i, ret;
2694
2695	pr_info("%s: Starting tests of card %s...\n",
2696		mmc_hostname(test->card->host), mmc_card_id(test->card));
2697
2698	mmc_claim_host(test->card->host);
2699
2700	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2701		struct mmc_test_general_result *gr;
2702
2703		if (testcase && ((i + 1) != testcase))
2704			continue;
2705
2706		pr_info("%s: Test case %d. %s...\n",
2707			mmc_hostname(test->card->host), i + 1,
2708			mmc_test_cases[i].name);
2709
2710		if (mmc_test_cases[i].prepare) {
2711			ret = mmc_test_cases[i].prepare(test);
2712			if (ret) {
2713				pr_info("%s: Result: Prepare "
2714					"stage failed! (%d)\n",
2715					mmc_hostname(test->card->host),
2716					ret);
2717				continue;
2718			}
2719		}
2720
2721		gr = kzalloc(sizeof(struct mmc_test_general_result),
2722			GFP_KERNEL);
2723		if (gr) {
2724			INIT_LIST_HEAD(&gr->tr_lst);
2725
2726			/* Assign data what we know already */
2727			gr->card = test->card;
2728			gr->testcase = i;
2729
2730			/* Append container to global one */
2731			list_add_tail(&gr->link, &mmc_test_result);
2732
2733			/*
2734			 * Save the pointer to created container in our private
2735			 * structure.
2736			 */
2737			test->gr = gr;
2738		}
2739
2740		ret = mmc_test_cases[i].run(test);
2741		switch (ret) {
2742		case RESULT_OK:
2743			pr_info("%s: Result: OK\n",
2744				mmc_hostname(test->card->host));
2745			break;
2746		case RESULT_FAIL:
2747			pr_info("%s: Result: FAILED\n",
2748				mmc_hostname(test->card->host));
2749			break;
2750		case RESULT_UNSUP_HOST:
2751			pr_info("%s: Result: UNSUPPORTED "
2752				"(by host)\n",
2753				mmc_hostname(test->card->host));
2754			break;
2755		case RESULT_UNSUP_CARD:
2756			pr_info("%s: Result: UNSUPPORTED "
2757				"(by card)\n",
2758				mmc_hostname(test->card->host));
2759			break;
2760		default:
2761			pr_info("%s: Result: ERROR (%d)\n",
2762				mmc_hostname(test->card->host), ret);
2763		}
2764
2765		/* Save the result */
2766		if (gr)
2767			gr->result = ret;
2768
2769		if (mmc_test_cases[i].cleanup) {
2770			ret = mmc_test_cases[i].cleanup(test);
2771			if (ret) {
2772				pr_info("%s: Warning: Cleanup "
2773					"stage failed! (%d)\n",
2774					mmc_hostname(test->card->host),
2775					ret);
2776			}
2777		}
2778	}
2779
2780	mmc_release_host(test->card->host);
2781
2782	pr_info("%s: Tests completed.\n",
2783		mmc_hostname(test->card->host));
2784}
2785
2786static void mmc_test_free_result(struct mmc_card *card)
2787{
2788	struct mmc_test_general_result *gr, *grs;
2789
2790	mutex_lock(&mmc_test_lock);
2791
2792	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2793		struct mmc_test_transfer_result *tr, *trs;
2794
2795		if (card && gr->card != card)
2796			continue;
2797
2798		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2799			list_del(&tr->link);
2800			kfree(tr);
2801		}
2802
2803		list_del(&gr->link);
2804		kfree(gr);
2805	}
2806
2807	mutex_unlock(&mmc_test_lock);
2808}
2809
2810static LIST_HEAD(mmc_test_file_test);
2811
2812static int mtf_test_show(struct seq_file *sf, void *data)
2813{
2814	struct mmc_card *card = (struct mmc_card *)sf->private;
2815	struct mmc_test_general_result *gr;
2816
2817	mutex_lock(&mmc_test_lock);
2818
2819	list_for_each_entry(gr, &mmc_test_result, link) {
2820		struct mmc_test_transfer_result *tr;
2821
2822		if (gr->card != card)
2823			continue;
2824
2825		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2826
2827		list_for_each_entry(tr, &gr->tr_lst, link) {
2828			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2829				tr->count, tr->sectors,
2830				(unsigned long)tr->ts.tv_sec,
2831				(unsigned long)tr->ts.tv_nsec,
2832				tr->rate, tr->iops / 100, tr->iops % 100);
2833		}
2834	}
2835
2836	mutex_unlock(&mmc_test_lock);
2837
2838	return 0;
2839}
2840
2841static int mtf_test_open(struct inode *inode, struct file *file)
2842{
2843	return single_open(file, mtf_test_show, inode->i_private);
2844}
2845
2846static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2847	size_t count, loff_t *pos)
2848{
2849	struct seq_file *sf = (struct seq_file *)file->private_data;
2850	struct mmc_card *card = (struct mmc_card *)sf->private;
2851	struct mmc_test_card *test;
2852	char lbuf[12];
2853	long testcase;
2854
2855	if (count >= sizeof(lbuf))
2856		return -EINVAL;
2857
2858	if (copy_from_user(lbuf, buf, count))
2859		return -EFAULT;
2860	lbuf[count] = '\0';
2861
2862	if (strict_strtol(lbuf, 10, &testcase))
2863		return -EINVAL;
2864
2865	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2866	if (!test)
2867		return -ENOMEM;
2868
2869	/*
2870	 * Remove all test cases associated with given card. Thus we have only
2871	 * actual data of the last run.
2872	 */
2873	mmc_test_free_result(card);
2874
2875	test->card = card;
2876
2877	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2878#ifdef CONFIG_HIGHMEM
2879	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2880#endif
2881
2882#ifdef CONFIG_HIGHMEM
2883	if (test->buffer && test->highmem) {
2884#else
2885	if (test->buffer) {
2886#endif
2887		mutex_lock(&mmc_test_lock);
2888		mmc_test_run(test, testcase);
2889		mutex_unlock(&mmc_test_lock);
2890	}
2891
2892#ifdef CONFIG_HIGHMEM
2893	__free_pages(test->highmem, BUFFER_ORDER);
2894#endif
2895	kfree(test->buffer);
2896	kfree(test);
2897
2898	return count;
2899}
2900
2901static const struct file_operations mmc_test_fops_test = {
2902	.open		= mtf_test_open,
2903	.read		= seq_read,
2904	.write		= mtf_test_write,
2905	.llseek		= seq_lseek,
2906	.release	= single_release,
2907};
2908
2909static int mtf_testlist_show(struct seq_file *sf, void *data)
2910{
2911	int i;
2912
2913	mutex_lock(&mmc_test_lock);
2914
2915	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2916		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2917
2918	mutex_unlock(&mmc_test_lock);
2919
2920	return 0;
2921}
2922
2923static int mtf_testlist_open(struct inode *inode, struct file *file)
2924{
2925	return single_open(file, mtf_testlist_show, inode->i_private);
2926}
2927
2928static const struct file_operations mmc_test_fops_testlist = {
2929	.open		= mtf_testlist_open,
2930	.read		= seq_read,
2931	.llseek		= seq_lseek,
2932	.release	= single_release,
2933};
2934
2935static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2936{
2937	struct mmc_test_dbgfs_file *df, *dfs;
2938
2939	mutex_lock(&mmc_test_lock);
2940
2941	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2942		if (card && df->card != card)
2943			continue;
2944		debugfs_remove(df->file);
2945		list_del(&df->link);
2946		kfree(df);
2947	}
2948
2949	mutex_unlock(&mmc_test_lock);
2950}
2951
2952static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2953	const char *name, umode_t mode, const struct file_operations *fops)
2954{
2955	struct dentry *file = NULL;
2956	struct mmc_test_dbgfs_file *df;
2957
2958	if (card->debugfs_root)
2959		file = debugfs_create_file(name, mode, card->debugfs_root,
2960			card, fops);
2961
2962	if (IS_ERR_OR_NULL(file)) {
2963		dev_err(&card->dev,
2964			"Can't create %s. Perhaps debugfs is disabled.\n",
2965			name);
2966		return -ENODEV;
2967	}
2968
2969	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2970	if (!df) {
2971		debugfs_remove(file);
2972		dev_err(&card->dev,
2973			"Can't allocate memory for internal usage.\n");
2974		return -ENOMEM;
2975	}
2976
2977	df->card = card;
2978	df->file = file;
2979
2980	list_add(&df->link, &mmc_test_file_test);
2981	return 0;
2982}
2983
2984static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2985{
2986	int ret;
2987
2988	mutex_lock(&mmc_test_lock);
2989
2990	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2991		&mmc_test_fops_test);
2992	if (ret)
2993		goto err;
2994
2995	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2996		&mmc_test_fops_testlist);
2997	if (ret)
2998		goto err;
2999
3000err:
3001	mutex_unlock(&mmc_test_lock);
3002
3003	return ret;
3004}
3005
3006static int mmc_test_probe(struct mmc_card *card)
3007{
3008	int ret;
3009
3010	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3011		return -ENODEV;
3012
3013	ret = mmc_test_register_dbgfs_file(card);
3014	if (ret)
3015		return ret;
3016
3017	dev_info(&card->dev, "Card claimed for testing.\n");
3018
3019	return 0;
3020}
3021
3022static void mmc_test_remove(struct mmc_card *card)
3023{
3024	mmc_test_free_result(card);
3025	mmc_test_free_dbgfs_file(card);
3026}
3027
3028static struct mmc_driver mmc_driver = {
3029	.drv		= {
3030		.name	= "mmc_test",
3031	},
3032	.probe		= mmc_test_probe,
3033	.remove		= mmc_test_remove,
3034};
3035
3036static int __init mmc_test_init(void)
3037{
3038	return mmc_register_driver(&mmc_driver);
3039}
3040
3041static void __exit mmc_test_exit(void)
3042{
3043	/* Clear stalled data if card is still plugged */
3044	mmc_test_free_result(NULL);
3045	mmc_test_free_dbgfs_file(NULL);
3046
3047	mmc_unregister_driver(&mmc_driver);
3048}
3049
3050module_init(mmc_test_init);
3051module_exit(mmc_test_exit);
3052
3053MODULE_LICENSE("GPL");
3054MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3055MODULE_AUTHOR("Pierre Ossman");
v3.1
   1/*
   2 *  linux/drivers/mmc/card/mmc_test.c
   3 *
   4 *  Copyright 2007-2008 Pierre Ossman
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 */
  11
  12#include <linux/mmc/core.h>
  13#include <linux/mmc/card.h>
  14#include <linux/mmc/host.h>
  15#include <linux/mmc/mmc.h>
  16#include <linux/slab.h>
  17
  18#include <linux/scatterlist.h>
  19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
  20#include <linux/list.h>
  21
  22#include <linux/debugfs.h>
  23#include <linux/uaccess.h>
  24#include <linux/seq_file.h>
 
  25
  26#define RESULT_OK		0
  27#define RESULT_FAIL		1
  28#define RESULT_UNSUP_HOST	2
  29#define RESULT_UNSUP_CARD	3
  30
  31#define BUFFER_ORDER		2
  32#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
  33
  34/*
  35 * Limit the test area size to the maximum MMC HC erase group size.  Note that
  36 * the maximum SD allocation unit size is just 4MiB.
  37 */
  38#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
  39
  40/**
  41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
  42 * @page: first page in the allocation
  43 * @order: order of the number of pages allocated
  44 */
  45struct mmc_test_pages {
  46	struct page *page;
  47	unsigned int order;
  48};
  49
  50/**
  51 * struct mmc_test_mem - allocated memory.
  52 * @arr: array of allocations
  53 * @cnt: number of allocations
  54 */
  55struct mmc_test_mem {
  56	struct mmc_test_pages *arr;
  57	unsigned int cnt;
  58};
  59
  60/**
  61 * struct mmc_test_area - information for performance tests.
  62 * @max_sz: test area size (in bytes)
  63 * @dev_addr: address on card at which to do performance tests
  64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
  65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
  66 * @max_seg_sz: maximum segment size allowed by driver
  67 * @blocks: number of (512 byte) blocks currently mapped by @sg
  68 * @sg_len: length of currently mapped scatterlist @sg
  69 * @mem: allocated memory
  70 * @sg: scatterlist
  71 */
  72struct mmc_test_area {
  73	unsigned long max_sz;
  74	unsigned int dev_addr;
  75	unsigned int max_tfr;
  76	unsigned int max_segs;
  77	unsigned int max_seg_sz;
  78	unsigned int blocks;
  79	unsigned int sg_len;
  80	struct mmc_test_mem *mem;
  81	struct scatterlist *sg;
  82};
  83
  84/**
  85 * struct mmc_test_transfer_result - transfer results for performance tests.
  86 * @link: double-linked list
  87 * @count: amount of group of sectors to check
  88 * @sectors: amount of sectors to check in one group
  89 * @ts: time values of transfer
  90 * @rate: calculated transfer rate
  91 * @iops: I/O operations per second (times 100)
  92 */
  93struct mmc_test_transfer_result {
  94	struct list_head link;
  95	unsigned int count;
  96	unsigned int sectors;
  97	struct timespec ts;
  98	unsigned int rate;
  99	unsigned int iops;
 100};
 101
 102/**
 103 * struct mmc_test_general_result - results for tests.
 104 * @link: double-linked list
 105 * @card: card under test
 106 * @testcase: number of test case
 107 * @result: result of test run
 108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
 109 */
 110struct mmc_test_general_result {
 111	struct list_head link;
 112	struct mmc_card *card;
 113	int testcase;
 114	int result;
 115	struct list_head tr_lst;
 116};
 117
 118/**
 119 * struct mmc_test_dbgfs_file - debugfs related file.
 120 * @link: double-linked list
 121 * @card: card under test
 122 * @file: file created under debugfs
 123 */
 124struct mmc_test_dbgfs_file {
 125	struct list_head link;
 126	struct mmc_card *card;
 127	struct dentry *file;
 128};
 129
 130/**
 131 * struct mmc_test_card - test information.
 132 * @card: card under test
 133 * @scratch: transfer buffer
 134 * @buffer: transfer buffer
 135 * @highmem: buffer for highmem tests
 136 * @area: information for performance tests
 137 * @gr: pointer to results of current testcase
 138 */
 139struct mmc_test_card {
 140	struct mmc_card	*card;
 141
 142	u8		scratch[BUFFER_SIZE];
 143	u8		*buffer;
 144#ifdef CONFIG_HIGHMEM
 145	struct page	*highmem;
 146#endif
 147	struct mmc_test_area		area;
 148	struct mmc_test_general_result	*gr;
 149};
 150
 151enum mmc_test_prep_media {
 152	MMC_TEST_PREP_NONE = 0,
 153	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
 154	MMC_TEST_PREP_ERASE = 1 << 1,
 155};
 156
 157struct mmc_test_multiple_rw {
 158	unsigned int *sg_len;
 159	unsigned int *bs;
 160	unsigned int len;
 161	unsigned int size;
 162	bool do_write;
 163	bool do_nonblock_req;
 164	enum mmc_test_prep_media prepare;
 165};
 166
 167struct mmc_test_async_req {
 168	struct mmc_async_req areq;
 169	struct mmc_test_card *test;
 170};
 171
 172/*******************************************************************/
 173/*  General helper functions                                       */
 174/*******************************************************************/
 175
 176/*
 177 * Configure correct block size in card
 178 */
 179static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
 180{
 181	return mmc_set_blocklen(test->card, size);
 182}
 183
 184/*
 185 * Fill in the mmc_request structure given a set of transfer parameters.
 186 */
 187static void mmc_test_prepare_mrq(struct mmc_test_card *test,
 188	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
 189	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
 190{
 191	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
 192
 193	if (blocks > 1) {
 194		mrq->cmd->opcode = write ?
 195			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
 196	} else {
 197		mrq->cmd->opcode = write ?
 198			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 199	}
 200
 201	mrq->cmd->arg = dev_addr;
 202	if (!mmc_card_blockaddr(test->card))
 203		mrq->cmd->arg <<= 9;
 204
 205	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 206
 207	if (blocks == 1)
 208		mrq->stop = NULL;
 209	else {
 210		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
 211		mrq->stop->arg = 0;
 212		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 213	}
 214
 215	mrq->data->blksz = blksz;
 216	mrq->data->blocks = blocks;
 217	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
 218	mrq->data->sg = sg;
 219	mrq->data->sg_len = sg_len;
 220
 221	mmc_set_data_timeout(mrq->data, test->card);
 222}
 223
 224static int mmc_test_busy(struct mmc_command *cmd)
 225{
 226	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
 227		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
 228}
 229
 230/*
 231 * Wait for the card to finish the busy state
 232 */
 233static int mmc_test_wait_busy(struct mmc_test_card *test)
 234{
 235	int ret, busy;
 236	struct mmc_command cmd = {0};
 237
 238	busy = 0;
 239	do {
 240		memset(&cmd, 0, sizeof(struct mmc_command));
 241
 242		cmd.opcode = MMC_SEND_STATUS;
 243		cmd.arg = test->card->rca << 16;
 244		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 245
 246		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
 247		if (ret)
 248			break;
 249
 250		if (!busy && mmc_test_busy(&cmd)) {
 251			busy = 1;
 252			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 253				printk(KERN_INFO "%s: Warning: Host did not "
 254					"wait for busy state to end.\n",
 255					mmc_hostname(test->card->host));
 256		}
 257	} while (mmc_test_busy(&cmd));
 258
 259	return ret;
 260}
 261
 262/*
 263 * Transfer a single sector of kernel addressable data
 264 */
 265static int mmc_test_buffer_transfer(struct mmc_test_card *test,
 266	u8 *buffer, unsigned addr, unsigned blksz, int write)
 267{
 268	int ret;
 269
 270	struct mmc_request mrq = {0};
 271	struct mmc_command cmd = {0};
 272	struct mmc_command stop = {0};
 273	struct mmc_data data = {0};
 274
 275	struct scatterlist sg;
 276
 277	mrq.cmd = &cmd;
 278	mrq.data = &data;
 279	mrq.stop = &stop;
 280
 281	sg_init_one(&sg, buffer, blksz);
 282
 283	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
 284
 285	mmc_wait_for_req(test->card->host, &mrq);
 286
 287	if (cmd.error)
 288		return cmd.error;
 289	if (data.error)
 290		return data.error;
 291
 292	ret = mmc_test_wait_busy(test);
 293	if (ret)
 294		return ret;
 295
 296	return 0;
 297}
 298
 299static void mmc_test_free_mem(struct mmc_test_mem *mem)
 300{
 301	if (!mem)
 302		return;
 303	while (mem->cnt--)
 304		__free_pages(mem->arr[mem->cnt].page,
 305			     mem->arr[mem->cnt].order);
 306	kfree(mem->arr);
 307	kfree(mem);
 308}
 309
 310/*
 311 * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
 312 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
 313 * not exceed a maximum number of segments and try not to make segments much
 314 * bigger than maximum segment size.
 315 */
 316static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
 317					       unsigned long max_sz,
 318					       unsigned int max_segs,
 319					       unsigned int max_seg_sz)
 320{
 321	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
 322	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
 323	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
 324	unsigned long page_cnt = 0;
 325	unsigned long limit = nr_free_buffer_pages() >> 4;
 326	struct mmc_test_mem *mem;
 327
 328	if (max_page_cnt > limit)
 329		max_page_cnt = limit;
 330	if (min_page_cnt > max_page_cnt)
 331		min_page_cnt = max_page_cnt;
 332
 333	if (max_seg_page_cnt > max_page_cnt)
 334		max_seg_page_cnt = max_page_cnt;
 335
 336	if (max_segs > max_page_cnt)
 337		max_segs = max_page_cnt;
 338
 339	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
 340	if (!mem)
 341		return NULL;
 342
 343	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
 344			   GFP_KERNEL);
 345	if (!mem->arr)
 346		goto out_free;
 347
 348	while (max_page_cnt) {
 349		struct page *page;
 350		unsigned int order;
 351		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
 352				__GFP_NORETRY;
 353
 354		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
 355		while (1) {
 356			page = alloc_pages(flags, order);
 357			if (page || !order)
 358				break;
 359			order -= 1;
 360		}
 361		if (!page) {
 362			if (page_cnt < min_page_cnt)
 363				goto out_free;
 364			break;
 365		}
 366		mem->arr[mem->cnt].page = page;
 367		mem->arr[mem->cnt].order = order;
 368		mem->cnt += 1;
 369		if (max_page_cnt <= (1UL << order))
 370			break;
 371		max_page_cnt -= 1UL << order;
 372		page_cnt += 1UL << order;
 373		if (mem->cnt >= max_segs) {
 374			if (page_cnt < min_page_cnt)
 375				goto out_free;
 376			break;
 377		}
 378	}
 379
 380	return mem;
 381
 382out_free:
 383	mmc_test_free_mem(mem);
 384	return NULL;
 385}
 386
 387/*
 388 * Map memory into a scatterlist.  Optionally allow the same memory to be
 389 * mapped more than once.
 390 */
 391static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
 392			   struct scatterlist *sglist, int repeat,
 393			   unsigned int max_segs, unsigned int max_seg_sz,
 394			   unsigned int *sg_len, int min_sg_len)
 395{
 396	struct scatterlist *sg = NULL;
 397	unsigned int i;
 398	unsigned long sz = size;
 399
 400	sg_init_table(sglist, max_segs);
 401	if (min_sg_len > max_segs)
 402		min_sg_len = max_segs;
 403
 404	*sg_len = 0;
 405	do {
 406		for (i = 0; i < mem->cnt; i++) {
 407			unsigned long len = PAGE_SIZE << mem->arr[i].order;
 408
 409			if (min_sg_len && (size / min_sg_len < len))
 410				len = ALIGN(size / min_sg_len, 512);
 411			if (len > sz)
 412				len = sz;
 413			if (len > max_seg_sz)
 414				len = max_seg_sz;
 415			if (sg)
 416				sg = sg_next(sg);
 417			else
 418				sg = sglist;
 419			if (!sg)
 420				return -EINVAL;
 421			sg_set_page(sg, mem->arr[i].page, len, 0);
 422			sz -= len;
 423			*sg_len += 1;
 424			if (!sz)
 425				break;
 426		}
 427	} while (sz && repeat);
 428
 429	if (sz)
 430		return -EINVAL;
 431
 432	if (sg)
 433		sg_mark_end(sg);
 434
 435	return 0;
 436}
 437
 438/*
 439 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
 440 * same memory to be mapped more than once.
 441 */
 442static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
 443				       unsigned long sz,
 444				       struct scatterlist *sglist,
 445				       unsigned int max_segs,
 446				       unsigned int max_seg_sz,
 447				       unsigned int *sg_len)
 448{
 449	struct scatterlist *sg = NULL;
 450	unsigned int i = mem->cnt, cnt;
 451	unsigned long len;
 452	void *base, *addr, *last_addr = NULL;
 453
 454	sg_init_table(sglist, max_segs);
 455
 456	*sg_len = 0;
 457	while (sz) {
 458		base = page_address(mem->arr[--i].page);
 459		cnt = 1 << mem->arr[i].order;
 460		while (sz && cnt) {
 461			addr = base + PAGE_SIZE * --cnt;
 462			if (last_addr && last_addr + PAGE_SIZE == addr)
 463				continue;
 464			last_addr = addr;
 465			len = PAGE_SIZE;
 466			if (len > max_seg_sz)
 467				len = max_seg_sz;
 468			if (len > sz)
 469				len = sz;
 470			if (sg)
 471				sg = sg_next(sg);
 472			else
 473				sg = sglist;
 474			if (!sg)
 475				return -EINVAL;
 476			sg_set_page(sg, virt_to_page(addr), len, 0);
 477			sz -= len;
 478			*sg_len += 1;
 479		}
 480		if (i == 0)
 481			i = mem->cnt;
 482	}
 483
 484	if (sg)
 485		sg_mark_end(sg);
 486
 487	return 0;
 488}
 489
 490/*
 491 * Calculate transfer rate in bytes per second.
 492 */
 493static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
 494{
 495	uint64_t ns;
 496
 497	ns = ts->tv_sec;
 498	ns *= 1000000000;
 499	ns += ts->tv_nsec;
 500
 501	bytes *= 1000000000;
 502
 503	while (ns > UINT_MAX) {
 504		bytes >>= 1;
 505		ns >>= 1;
 506	}
 507
 508	if (!ns)
 509		return 0;
 510
 511	do_div(bytes, (uint32_t)ns);
 512
 513	return bytes;
 514}
 515
 516/*
 517 * Save transfer results for future usage
 518 */
 519static void mmc_test_save_transfer_result(struct mmc_test_card *test,
 520	unsigned int count, unsigned int sectors, struct timespec ts,
 521	unsigned int rate, unsigned int iops)
 522{
 523	struct mmc_test_transfer_result *tr;
 524
 525	if (!test->gr)
 526		return;
 527
 528	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
 529	if (!tr)
 530		return;
 531
 532	tr->count = count;
 533	tr->sectors = sectors;
 534	tr->ts = ts;
 535	tr->rate = rate;
 536	tr->iops = iops;
 537
 538	list_add_tail(&tr->link, &test->gr->tr_lst);
 539}
 540
 541/*
 542 * Print the transfer rate.
 543 */
 544static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
 545				struct timespec *ts1, struct timespec *ts2)
 546{
 547	unsigned int rate, iops, sectors = bytes >> 9;
 548	struct timespec ts;
 549
 550	ts = timespec_sub(*ts2, *ts1);
 551
 552	rate = mmc_test_rate(bytes, &ts);
 553	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
 554
 555	printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
 556			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
 557			 mmc_hostname(test->card->host), sectors, sectors >> 1,
 558			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
 559			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
 560			 iops / 100, iops % 100);
 561
 562	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
 563}
 564
 565/*
 566 * Print the average transfer rate.
 567 */
 568static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
 569				    unsigned int count, struct timespec *ts1,
 570				    struct timespec *ts2)
 571{
 572	unsigned int rate, iops, sectors = bytes >> 9;
 573	uint64_t tot = bytes * count;
 574	struct timespec ts;
 575
 576	ts = timespec_sub(*ts2, *ts1);
 577
 578	rate = mmc_test_rate(tot, &ts);
 579	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
 580
 581	printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
 582			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
 583			 "%u.%02u IOPS, sg_len %d)\n",
 584			 mmc_hostname(test->card->host), count, sectors, count,
 585			 sectors >> 1, (sectors & 1 ? ".5" : ""),
 586			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
 587			 rate / 1000, rate / 1024, iops / 100, iops % 100,
 588			 test->area.sg_len);
 589
 590	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
 591}
 592
 593/*
 594 * Return the card size in sectors.
 595 */
 596static unsigned int mmc_test_capacity(struct mmc_card *card)
 597{
 598	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
 599		return card->ext_csd.sectors;
 600	else
 601		return card->csd.capacity << (card->csd.read_blkbits - 9);
 602}
 603
 604/*******************************************************************/
 605/*  Test preparation and cleanup                                   */
 606/*******************************************************************/
 607
 608/*
 609 * Fill the first couple of sectors of the card with known data
 610 * so that bad reads/writes can be detected
 611 */
 612static int __mmc_test_prepare(struct mmc_test_card *test, int write)
 613{
 614	int ret, i;
 615
 616	ret = mmc_test_set_blksize(test, 512);
 617	if (ret)
 618		return ret;
 619
 620	if (write)
 621		memset(test->buffer, 0xDF, 512);
 622	else {
 623		for (i = 0;i < 512;i++)
 624			test->buffer[i] = i;
 625	}
 626
 627	for (i = 0;i < BUFFER_SIZE / 512;i++) {
 628		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 629		if (ret)
 630			return ret;
 631	}
 632
 633	return 0;
 634}
 635
 636static int mmc_test_prepare_write(struct mmc_test_card *test)
 637{
 638	return __mmc_test_prepare(test, 1);
 639}
 640
 641static int mmc_test_prepare_read(struct mmc_test_card *test)
 642{
 643	return __mmc_test_prepare(test, 0);
 644}
 645
 646static int mmc_test_cleanup(struct mmc_test_card *test)
 647{
 648	int ret, i;
 649
 650	ret = mmc_test_set_blksize(test, 512);
 651	if (ret)
 652		return ret;
 653
 654	memset(test->buffer, 0, 512);
 655
 656	for (i = 0;i < BUFFER_SIZE / 512;i++) {
 657		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 658		if (ret)
 659			return ret;
 660	}
 661
 662	return 0;
 663}
 664
 665/*******************************************************************/
 666/*  Test execution helpers                                         */
 667/*******************************************************************/
 668
 669/*
 670 * Modifies the mmc_request to perform the "short transfer" tests
 671 */
 672static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
 673	struct mmc_request *mrq, int write)
 674{
 675	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 676
 677	if (mrq->data->blocks > 1) {
 678		mrq->cmd->opcode = write ?
 679			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 680		mrq->stop = NULL;
 681	} else {
 682		mrq->cmd->opcode = MMC_SEND_STATUS;
 683		mrq->cmd->arg = test->card->rca << 16;
 684	}
 685}
 686
 687/*
 688 * Checks that a normal transfer didn't have any errors
 689 */
 690static int mmc_test_check_result(struct mmc_test_card *test,
 691				 struct mmc_request *mrq)
 692{
 693	int ret;
 694
 695	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 696
 697	ret = 0;
 698
 699	if (!ret && mrq->cmd->error)
 700		ret = mrq->cmd->error;
 701	if (!ret && mrq->data->error)
 702		ret = mrq->data->error;
 703	if (!ret && mrq->stop && mrq->stop->error)
 704		ret = mrq->stop->error;
 705	if (!ret && mrq->data->bytes_xfered !=
 706		mrq->data->blocks * mrq->data->blksz)
 707		ret = RESULT_FAIL;
 708
 709	if (ret == -EINVAL)
 710		ret = RESULT_UNSUP_HOST;
 711
 712	return ret;
 713}
 714
 715static int mmc_test_check_result_async(struct mmc_card *card,
 716				       struct mmc_async_req *areq)
 717{
 718	struct mmc_test_async_req *test_async =
 719		container_of(areq, struct mmc_test_async_req, areq);
 720
 721	mmc_test_wait_busy(test_async->test);
 722
 723	return mmc_test_check_result(test_async->test, areq->mrq);
 724}
 725
 726/*
 727 * Checks that a "short transfer" behaved as expected
 728 */
 729static int mmc_test_check_broken_result(struct mmc_test_card *test,
 730	struct mmc_request *mrq)
 731{
 732	int ret;
 733
 734	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 735
 736	ret = 0;
 737
 738	if (!ret && mrq->cmd->error)
 739		ret = mrq->cmd->error;
 740	if (!ret && mrq->data->error == 0)
 741		ret = RESULT_FAIL;
 742	if (!ret && mrq->data->error != -ETIMEDOUT)
 743		ret = mrq->data->error;
 744	if (!ret && mrq->stop && mrq->stop->error)
 745		ret = mrq->stop->error;
 746	if (mrq->data->blocks > 1) {
 747		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
 748			ret = RESULT_FAIL;
 749	} else {
 750		if (!ret && mrq->data->bytes_xfered > 0)
 751			ret = RESULT_FAIL;
 752	}
 753
 754	if (ret == -EINVAL)
 755		ret = RESULT_UNSUP_HOST;
 756
 757	return ret;
 758}
 759
 760/*
 761 * Tests nonblock transfer with certain parameters
 762 */
 763static void mmc_test_nonblock_reset(struct mmc_request *mrq,
 764				    struct mmc_command *cmd,
 765				    struct mmc_command *stop,
 766				    struct mmc_data *data)
 767{
 768	memset(mrq, 0, sizeof(struct mmc_request));
 769	memset(cmd, 0, sizeof(struct mmc_command));
 770	memset(data, 0, sizeof(struct mmc_data));
 771	memset(stop, 0, sizeof(struct mmc_command));
 772
 773	mrq->cmd = cmd;
 774	mrq->data = data;
 775	mrq->stop = stop;
 776}
 777static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
 778				      struct scatterlist *sg, unsigned sg_len,
 779				      unsigned dev_addr, unsigned blocks,
 780				      unsigned blksz, int write, int count)
 781{
 782	struct mmc_request mrq1;
 783	struct mmc_command cmd1;
 784	struct mmc_command stop1;
 785	struct mmc_data data1;
 786
 787	struct mmc_request mrq2;
 788	struct mmc_command cmd2;
 789	struct mmc_command stop2;
 790	struct mmc_data data2;
 791
 792	struct mmc_test_async_req test_areq[2];
 793	struct mmc_async_req *done_areq;
 794	struct mmc_async_req *cur_areq = &test_areq[0].areq;
 795	struct mmc_async_req *other_areq = &test_areq[1].areq;
 796	int i;
 797	int ret;
 798
 799	test_areq[0].test = test;
 800	test_areq[1].test = test;
 801
 802	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
 803	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
 804
 805	cur_areq->mrq = &mrq1;
 806	cur_areq->err_check = mmc_test_check_result_async;
 807	other_areq->mrq = &mrq2;
 808	other_areq->err_check = mmc_test_check_result_async;
 809
 810	for (i = 0; i < count; i++) {
 811		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
 812				     blocks, blksz, write);
 813		done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
 814
 815		if (ret || (!done_areq && i > 0))
 816			goto err;
 817
 818		if (done_areq) {
 819			if (done_areq->mrq == &mrq2)
 820				mmc_test_nonblock_reset(&mrq2, &cmd2,
 821							&stop2, &data2);
 822			else
 823				mmc_test_nonblock_reset(&mrq1, &cmd1,
 824							&stop1, &data1);
 825		}
 826		done_areq = cur_areq;
 827		cur_areq = other_areq;
 828		other_areq = done_areq;
 829		dev_addr += blocks;
 830	}
 831
 832	done_areq = mmc_start_req(test->card->host, NULL, &ret);
 833
 834	return ret;
 835err:
 836	return ret;
 837}
 838
 839/*
 840 * Tests a basic transfer with certain parameters
 841 */
 842static int mmc_test_simple_transfer(struct mmc_test_card *test,
 843	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 844	unsigned blocks, unsigned blksz, int write)
 845{
 846	struct mmc_request mrq = {0};
 847	struct mmc_command cmd = {0};
 848	struct mmc_command stop = {0};
 849	struct mmc_data data = {0};
 850
 851	mrq.cmd = &cmd;
 852	mrq.data = &data;
 853	mrq.stop = &stop;
 854
 855	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
 856		blocks, blksz, write);
 857
 858	mmc_wait_for_req(test->card->host, &mrq);
 859
 860	mmc_test_wait_busy(test);
 861
 862	return mmc_test_check_result(test, &mrq);
 863}
 864
 865/*
 866 * Tests a transfer where the card will fail completely or partly
 867 */
 868static int mmc_test_broken_transfer(struct mmc_test_card *test,
 869	unsigned blocks, unsigned blksz, int write)
 870{
 871	struct mmc_request mrq = {0};
 872	struct mmc_command cmd = {0};
 873	struct mmc_command stop = {0};
 874	struct mmc_data data = {0};
 875
 876	struct scatterlist sg;
 877
 878	mrq.cmd = &cmd;
 879	mrq.data = &data;
 880	mrq.stop = &stop;
 881
 882	sg_init_one(&sg, test->buffer, blocks * blksz);
 883
 884	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
 885	mmc_test_prepare_broken_mrq(test, &mrq, write);
 886
 887	mmc_wait_for_req(test->card->host, &mrq);
 888
 889	mmc_test_wait_busy(test);
 890
 891	return mmc_test_check_broken_result(test, &mrq);
 892}
 893
 894/*
 895 * Does a complete transfer test where data is also validated
 896 *
 897 * Note: mmc_test_prepare() must have been done before this call
 898 */
 899static int mmc_test_transfer(struct mmc_test_card *test,
 900	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 901	unsigned blocks, unsigned blksz, int write)
 902{
 903	int ret, i;
 904	unsigned long flags;
 905
 906	if (write) {
 907		for (i = 0;i < blocks * blksz;i++)
 908			test->scratch[i] = i;
 909	} else {
 910		memset(test->scratch, 0, BUFFER_SIZE);
 911	}
 912	local_irq_save(flags);
 913	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 914	local_irq_restore(flags);
 915
 916	ret = mmc_test_set_blksize(test, blksz);
 917	if (ret)
 918		return ret;
 919
 920	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
 921		blocks, blksz, write);
 922	if (ret)
 923		return ret;
 924
 925	if (write) {
 926		int sectors;
 927
 928		ret = mmc_test_set_blksize(test, 512);
 929		if (ret)
 930			return ret;
 931
 932		sectors = (blocks * blksz + 511) / 512;
 933		if ((sectors * 512) == (blocks * blksz))
 934			sectors++;
 935
 936		if ((sectors * 512) > BUFFER_SIZE)
 937			return -EINVAL;
 938
 939		memset(test->buffer, 0, sectors * 512);
 940
 941		for (i = 0;i < sectors;i++) {
 942			ret = mmc_test_buffer_transfer(test,
 943				test->buffer + i * 512,
 944				dev_addr + i, 512, 0);
 945			if (ret)
 946				return ret;
 947		}
 948
 949		for (i = 0;i < blocks * blksz;i++) {
 950			if (test->buffer[i] != (u8)i)
 951				return RESULT_FAIL;
 952		}
 953
 954		for (;i < sectors * 512;i++) {
 955			if (test->buffer[i] != 0xDF)
 956				return RESULT_FAIL;
 957		}
 958	} else {
 959		local_irq_save(flags);
 960		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 961		local_irq_restore(flags);
 962		for (i = 0;i < blocks * blksz;i++) {
 963			if (test->scratch[i] != (u8)i)
 964				return RESULT_FAIL;
 965		}
 966	}
 967
 968	return 0;
 969}
 970
 971/*******************************************************************/
 972/*  Tests                                                          */
 973/*******************************************************************/
 974
 975struct mmc_test_case {
 976	const char *name;
 977
 978	int (*prepare)(struct mmc_test_card *);
 979	int (*run)(struct mmc_test_card *);
 980	int (*cleanup)(struct mmc_test_card *);
 981};
 982
 983static int mmc_test_basic_write(struct mmc_test_card *test)
 984{
 985	int ret;
 986	struct scatterlist sg;
 987
 988	ret = mmc_test_set_blksize(test, 512);
 989	if (ret)
 990		return ret;
 991
 992	sg_init_one(&sg, test->buffer, 512);
 993
 994	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
 995	if (ret)
 996		return ret;
 997
 998	return 0;
 999}
1000
1001static int mmc_test_basic_read(struct mmc_test_card *test)
1002{
1003	int ret;
1004	struct scatterlist sg;
1005
1006	ret = mmc_test_set_blksize(test, 512);
1007	if (ret)
1008		return ret;
1009
1010	sg_init_one(&sg, test->buffer, 512);
1011
1012	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1013	if (ret)
1014		return ret;
1015
1016	return 0;
1017}
1018
1019static int mmc_test_verify_write(struct mmc_test_card *test)
1020{
1021	int ret;
1022	struct scatterlist sg;
1023
1024	sg_init_one(&sg, test->buffer, 512);
1025
1026	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1027	if (ret)
1028		return ret;
1029
1030	return 0;
1031}
1032
1033static int mmc_test_verify_read(struct mmc_test_card *test)
1034{
1035	int ret;
1036	struct scatterlist sg;
1037
1038	sg_init_one(&sg, test->buffer, 512);
1039
1040	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1041	if (ret)
1042		return ret;
1043
1044	return 0;
1045}
1046
1047static int mmc_test_multi_write(struct mmc_test_card *test)
1048{
1049	int ret;
1050	unsigned int size;
1051	struct scatterlist sg;
1052
1053	if (test->card->host->max_blk_count == 1)
1054		return RESULT_UNSUP_HOST;
1055
1056	size = PAGE_SIZE * 2;
1057	size = min(size, test->card->host->max_req_size);
1058	size = min(size, test->card->host->max_seg_size);
1059	size = min(size, test->card->host->max_blk_count * 512);
1060
1061	if (size < 1024)
1062		return RESULT_UNSUP_HOST;
1063
1064	sg_init_one(&sg, test->buffer, size);
1065
1066	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1067	if (ret)
1068		return ret;
1069
1070	return 0;
1071}
1072
1073static int mmc_test_multi_read(struct mmc_test_card *test)
1074{
1075	int ret;
1076	unsigned int size;
1077	struct scatterlist sg;
1078
1079	if (test->card->host->max_blk_count == 1)
1080		return RESULT_UNSUP_HOST;
1081
1082	size = PAGE_SIZE * 2;
1083	size = min(size, test->card->host->max_req_size);
1084	size = min(size, test->card->host->max_seg_size);
1085	size = min(size, test->card->host->max_blk_count * 512);
1086
1087	if (size < 1024)
1088		return RESULT_UNSUP_HOST;
1089
1090	sg_init_one(&sg, test->buffer, size);
1091
1092	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1093	if (ret)
1094		return ret;
1095
1096	return 0;
1097}
1098
1099static int mmc_test_pow2_write(struct mmc_test_card *test)
1100{
1101	int ret, i;
1102	struct scatterlist sg;
1103
1104	if (!test->card->csd.write_partial)
1105		return RESULT_UNSUP_CARD;
1106
1107	for (i = 1; i < 512;i <<= 1) {
1108		sg_init_one(&sg, test->buffer, i);
1109		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1110		if (ret)
1111			return ret;
1112	}
1113
1114	return 0;
1115}
1116
1117static int mmc_test_pow2_read(struct mmc_test_card *test)
1118{
1119	int ret, i;
1120	struct scatterlist sg;
1121
1122	if (!test->card->csd.read_partial)
1123		return RESULT_UNSUP_CARD;
1124
1125	for (i = 1; i < 512;i <<= 1) {
1126		sg_init_one(&sg, test->buffer, i);
1127		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1128		if (ret)
1129			return ret;
1130	}
1131
1132	return 0;
1133}
1134
1135static int mmc_test_weird_write(struct mmc_test_card *test)
1136{
1137	int ret, i;
1138	struct scatterlist sg;
1139
1140	if (!test->card->csd.write_partial)
1141		return RESULT_UNSUP_CARD;
1142
1143	for (i = 3; i < 512;i += 7) {
1144		sg_init_one(&sg, test->buffer, i);
1145		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1146		if (ret)
1147			return ret;
1148	}
1149
1150	return 0;
1151}
1152
1153static int mmc_test_weird_read(struct mmc_test_card *test)
1154{
1155	int ret, i;
1156	struct scatterlist sg;
1157
1158	if (!test->card->csd.read_partial)
1159		return RESULT_UNSUP_CARD;
1160
1161	for (i = 3; i < 512;i += 7) {
1162		sg_init_one(&sg, test->buffer, i);
1163		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1164		if (ret)
1165			return ret;
1166	}
1167
1168	return 0;
1169}
1170
1171static int mmc_test_align_write(struct mmc_test_card *test)
1172{
1173	int ret, i;
1174	struct scatterlist sg;
1175
1176	for (i = 1;i < 4;i++) {
1177		sg_init_one(&sg, test->buffer + i, 512);
1178		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1179		if (ret)
1180			return ret;
1181	}
1182
1183	return 0;
1184}
1185
1186static int mmc_test_align_read(struct mmc_test_card *test)
1187{
1188	int ret, i;
1189	struct scatterlist sg;
1190
1191	for (i = 1;i < 4;i++) {
1192		sg_init_one(&sg, test->buffer + i, 512);
1193		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1194		if (ret)
1195			return ret;
1196	}
1197
1198	return 0;
1199}
1200
1201static int mmc_test_align_multi_write(struct mmc_test_card *test)
1202{
1203	int ret, i;
1204	unsigned int size;
1205	struct scatterlist sg;
1206
1207	if (test->card->host->max_blk_count == 1)
1208		return RESULT_UNSUP_HOST;
1209
1210	size = PAGE_SIZE * 2;
1211	size = min(size, test->card->host->max_req_size);
1212	size = min(size, test->card->host->max_seg_size);
1213	size = min(size, test->card->host->max_blk_count * 512);
1214
1215	if (size < 1024)
1216		return RESULT_UNSUP_HOST;
1217
1218	for (i = 1;i < 4;i++) {
1219		sg_init_one(&sg, test->buffer + i, size);
1220		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1221		if (ret)
1222			return ret;
1223	}
1224
1225	return 0;
1226}
1227
1228static int mmc_test_align_multi_read(struct mmc_test_card *test)
1229{
1230	int ret, i;
1231	unsigned int size;
1232	struct scatterlist sg;
1233
1234	if (test->card->host->max_blk_count == 1)
1235		return RESULT_UNSUP_HOST;
1236
1237	size = PAGE_SIZE * 2;
1238	size = min(size, test->card->host->max_req_size);
1239	size = min(size, test->card->host->max_seg_size);
1240	size = min(size, test->card->host->max_blk_count * 512);
1241
1242	if (size < 1024)
1243		return RESULT_UNSUP_HOST;
1244
1245	for (i = 1;i < 4;i++) {
1246		sg_init_one(&sg, test->buffer + i, size);
1247		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1248		if (ret)
1249			return ret;
1250	}
1251
1252	return 0;
1253}
1254
1255static int mmc_test_xfersize_write(struct mmc_test_card *test)
1256{
1257	int ret;
1258
1259	ret = mmc_test_set_blksize(test, 512);
1260	if (ret)
1261		return ret;
1262
1263	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1264	if (ret)
1265		return ret;
1266
1267	return 0;
1268}
1269
1270static int mmc_test_xfersize_read(struct mmc_test_card *test)
1271{
1272	int ret;
1273
1274	ret = mmc_test_set_blksize(test, 512);
1275	if (ret)
1276		return ret;
1277
1278	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1279	if (ret)
1280		return ret;
1281
1282	return 0;
1283}
1284
1285static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1286{
1287	int ret;
1288
1289	if (test->card->host->max_blk_count == 1)
1290		return RESULT_UNSUP_HOST;
1291
1292	ret = mmc_test_set_blksize(test, 512);
1293	if (ret)
1294		return ret;
1295
1296	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1297	if (ret)
1298		return ret;
1299
1300	return 0;
1301}
1302
1303static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1304{
1305	int ret;
1306
1307	if (test->card->host->max_blk_count == 1)
1308		return RESULT_UNSUP_HOST;
1309
1310	ret = mmc_test_set_blksize(test, 512);
1311	if (ret)
1312		return ret;
1313
1314	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1315	if (ret)
1316		return ret;
1317
1318	return 0;
1319}
1320
1321#ifdef CONFIG_HIGHMEM
1322
1323static int mmc_test_write_high(struct mmc_test_card *test)
1324{
1325	int ret;
1326	struct scatterlist sg;
1327
1328	sg_init_table(&sg, 1);
1329	sg_set_page(&sg, test->highmem, 512, 0);
1330
1331	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1332	if (ret)
1333		return ret;
1334
1335	return 0;
1336}
1337
1338static int mmc_test_read_high(struct mmc_test_card *test)
1339{
1340	int ret;
1341	struct scatterlist sg;
1342
1343	sg_init_table(&sg, 1);
1344	sg_set_page(&sg, test->highmem, 512, 0);
1345
1346	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1347	if (ret)
1348		return ret;
1349
1350	return 0;
1351}
1352
1353static int mmc_test_multi_write_high(struct mmc_test_card *test)
1354{
1355	int ret;
1356	unsigned int size;
1357	struct scatterlist sg;
1358
1359	if (test->card->host->max_blk_count == 1)
1360		return RESULT_UNSUP_HOST;
1361
1362	size = PAGE_SIZE * 2;
1363	size = min(size, test->card->host->max_req_size);
1364	size = min(size, test->card->host->max_seg_size);
1365	size = min(size, test->card->host->max_blk_count * 512);
1366
1367	if (size < 1024)
1368		return RESULT_UNSUP_HOST;
1369
1370	sg_init_table(&sg, 1);
1371	sg_set_page(&sg, test->highmem, size, 0);
1372
1373	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1374	if (ret)
1375		return ret;
1376
1377	return 0;
1378}
1379
1380static int mmc_test_multi_read_high(struct mmc_test_card *test)
1381{
1382	int ret;
1383	unsigned int size;
1384	struct scatterlist sg;
1385
1386	if (test->card->host->max_blk_count == 1)
1387		return RESULT_UNSUP_HOST;
1388
1389	size = PAGE_SIZE * 2;
1390	size = min(size, test->card->host->max_req_size);
1391	size = min(size, test->card->host->max_seg_size);
1392	size = min(size, test->card->host->max_blk_count * 512);
1393
1394	if (size < 1024)
1395		return RESULT_UNSUP_HOST;
1396
1397	sg_init_table(&sg, 1);
1398	sg_set_page(&sg, test->highmem, size, 0);
1399
1400	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1401	if (ret)
1402		return ret;
1403
1404	return 0;
1405}
1406
1407#else
1408
1409static int mmc_test_no_highmem(struct mmc_test_card *test)
1410{
1411	printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1412	       mmc_hostname(test->card->host));
1413	return 0;
1414}
1415
1416#endif /* CONFIG_HIGHMEM */
1417
1418/*
1419 * Map sz bytes so that it can be transferred.
1420 */
1421static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1422			     int max_scatter, int min_sg_len)
1423{
1424	struct mmc_test_area *t = &test->area;
1425	int err;
1426
1427	t->blocks = sz >> 9;
1428
1429	if (max_scatter) {
1430		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1431						  t->max_segs, t->max_seg_sz,
1432				       &t->sg_len);
1433	} else {
1434		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1435				      t->max_seg_sz, &t->sg_len, min_sg_len);
1436	}
1437	if (err)
1438		printk(KERN_INFO "%s: Failed to map sg list\n",
1439		       mmc_hostname(test->card->host));
1440	return err;
1441}
1442
1443/*
1444 * Transfer bytes mapped by mmc_test_area_map().
1445 */
1446static int mmc_test_area_transfer(struct mmc_test_card *test,
1447				  unsigned int dev_addr, int write)
1448{
1449	struct mmc_test_area *t = &test->area;
1450
1451	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1452					t->blocks, 512, write);
1453}
1454
1455/*
1456 * Map and transfer bytes for multiple transfers.
1457 */
1458static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1459				unsigned int dev_addr, int write,
1460				int max_scatter, int timed, int count,
1461				bool nonblock, int min_sg_len)
1462{
1463	struct timespec ts1, ts2;
1464	int ret = 0;
1465	int i;
1466	struct mmc_test_area *t = &test->area;
1467
1468	/*
1469	 * In the case of a maximally scattered transfer, the maximum transfer
1470	 * size is further limited by using PAGE_SIZE segments.
1471	 */
1472	if (max_scatter) {
1473		struct mmc_test_area *t = &test->area;
1474		unsigned long max_tfr;
1475
1476		if (t->max_seg_sz >= PAGE_SIZE)
1477			max_tfr = t->max_segs * PAGE_SIZE;
1478		else
1479			max_tfr = t->max_segs * t->max_seg_sz;
1480		if (sz > max_tfr)
1481			sz = max_tfr;
1482	}
1483
1484	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1485	if (ret)
1486		return ret;
1487
1488	if (timed)
1489		getnstimeofday(&ts1);
1490	if (nonblock)
1491		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1492				 dev_addr, t->blocks, 512, write, count);
1493	else
1494		for (i = 0; i < count && ret == 0; i++) {
1495			ret = mmc_test_area_transfer(test, dev_addr, write);
1496			dev_addr += sz >> 9;
1497		}
1498
1499	if (ret)
1500		return ret;
1501
1502	if (timed)
1503		getnstimeofday(&ts2);
1504
1505	if (timed)
1506		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1507
1508	return 0;
1509}
1510
1511static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1512			    unsigned int dev_addr, int write, int max_scatter,
1513			    int timed)
1514{
1515	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1516				    timed, 1, false, 0);
1517}
1518
1519/*
1520 * Write the test area entirely.
1521 */
1522static int mmc_test_area_fill(struct mmc_test_card *test)
1523{
1524	struct mmc_test_area *t = &test->area;
1525
1526	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1527}
1528
1529/*
1530 * Erase the test area entirely.
1531 */
1532static int mmc_test_area_erase(struct mmc_test_card *test)
1533{
1534	struct mmc_test_area *t = &test->area;
1535
1536	if (!mmc_can_erase(test->card))
1537		return 0;
1538
1539	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1540			 MMC_ERASE_ARG);
1541}
1542
1543/*
1544 * Cleanup struct mmc_test_area.
1545 */
1546static int mmc_test_area_cleanup(struct mmc_test_card *test)
1547{
1548	struct mmc_test_area *t = &test->area;
1549
1550	kfree(t->sg);
1551	mmc_test_free_mem(t->mem);
1552
1553	return 0;
1554}
1555
1556/*
1557 * Initialize an area for testing large transfers.  The test area is set to the
1558 * middle of the card because cards may have different charateristics at the
1559 * front (for FAT file system optimization).  Optionally, the area is erased
1560 * (if the card supports it) which may improve write performance.  Optionally,
1561 * the area is filled with data for subsequent read tests.
1562 */
1563static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1564{
1565	struct mmc_test_area *t = &test->area;
1566	unsigned long min_sz = 64 * 1024, sz;
1567	int ret;
1568
1569	ret = mmc_test_set_blksize(test, 512);
1570	if (ret)
1571		return ret;
1572
1573	/* Make the test area size about 4MiB */
1574	sz = (unsigned long)test->card->pref_erase << 9;
1575	t->max_sz = sz;
1576	while (t->max_sz < 4 * 1024 * 1024)
1577		t->max_sz += sz;
1578	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1579		t->max_sz -= sz;
1580
1581	t->max_segs = test->card->host->max_segs;
1582	t->max_seg_sz = test->card->host->max_seg_size;
 
1583
1584	t->max_tfr = t->max_sz;
1585	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1586		t->max_tfr = test->card->host->max_blk_count << 9;
1587	if (t->max_tfr > test->card->host->max_req_size)
1588		t->max_tfr = test->card->host->max_req_size;
1589	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1590		t->max_tfr = t->max_segs * t->max_seg_sz;
1591
1592	/*
1593	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1594	 * because the same memory can be mapped into the scatterlist more than
1595	 * once.  Also, take into account the limits imposed on scatterlist
1596	 * segments by the host driver.
1597	 */
1598	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1599				    t->max_seg_sz);
1600	if (!t->mem)
1601		return -ENOMEM;
1602
1603	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1604	if (!t->sg) {
1605		ret = -ENOMEM;
1606		goto out_free;
1607	}
1608
1609	t->dev_addr = mmc_test_capacity(test->card) / 2;
1610	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1611
1612	if (erase) {
1613		ret = mmc_test_area_erase(test);
1614		if (ret)
1615			goto out_free;
1616	}
1617
1618	if (fill) {
1619		ret = mmc_test_area_fill(test);
1620		if (ret)
1621			goto out_free;
1622	}
1623
1624	return 0;
1625
1626out_free:
1627	mmc_test_area_cleanup(test);
1628	return ret;
1629}
1630
1631/*
1632 * Prepare for large transfers.  Do not erase the test area.
1633 */
1634static int mmc_test_area_prepare(struct mmc_test_card *test)
1635{
1636	return mmc_test_area_init(test, 0, 0);
1637}
1638
1639/*
1640 * Prepare for large transfers.  Do erase the test area.
1641 */
1642static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1643{
1644	return mmc_test_area_init(test, 1, 0);
1645}
1646
1647/*
1648 * Prepare for large transfers.  Erase and fill the test area.
1649 */
1650static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1651{
1652	return mmc_test_area_init(test, 1, 1);
1653}
1654
1655/*
1656 * Test best-case performance.  Best-case performance is expected from
1657 * a single large transfer.
1658 *
1659 * An additional option (max_scatter) allows the measurement of the same
1660 * transfer but with no contiguous pages in the scatter list.  This tests
1661 * the efficiency of DMA to handle scattered pages.
1662 */
1663static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1664				     int max_scatter)
1665{
1666	struct mmc_test_area *t = &test->area;
1667
1668	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1669				max_scatter, 1);
1670}
1671
1672/*
1673 * Best-case read performance.
1674 */
1675static int mmc_test_best_read_performance(struct mmc_test_card *test)
1676{
1677	return mmc_test_best_performance(test, 0, 0);
1678}
1679
1680/*
1681 * Best-case write performance.
1682 */
1683static int mmc_test_best_write_performance(struct mmc_test_card *test)
1684{
1685	return mmc_test_best_performance(test, 1, 0);
1686}
1687
1688/*
1689 * Best-case read performance into scattered pages.
1690 */
1691static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1692{
1693	return mmc_test_best_performance(test, 0, 1);
1694}
1695
1696/*
1697 * Best-case write performance from scattered pages.
1698 */
1699static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1700{
1701	return mmc_test_best_performance(test, 1, 1);
1702}
1703
1704/*
1705 * Single read performance by transfer size.
1706 */
1707static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1708{
1709	struct mmc_test_area *t = &test->area;
1710	unsigned long sz;
1711	unsigned int dev_addr;
1712	int ret;
1713
1714	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1715		dev_addr = t->dev_addr + (sz >> 9);
1716		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1717		if (ret)
1718			return ret;
1719	}
1720	sz = t->max_tfr;
1721	dev_addr = t->dev_addr;
1722	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1723}
1724
1725/*
1726 * Single write performance by transfer size.
1727 */
1728static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1729{
1730	struct mmc_test_area *t = &test->area;
1731	unsigned long sz;
1732	unsigned int dev_addr;
1733	int ret;
1734
1735	ret = mmc_test_area_erase(test);
1736	if (ret)
1737		return ret;
1738	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1739		dev_addr = t->dev_addr + (sz >> 9);
1740		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1741		if (ret)
1742			return ret;
1743	}
1744	ret = mmc_test_area_erase(test);
1745	if (ret)
1746		return ret;
1747	sz = t->max_tfr;
1748	dev_addr = t->dev_addr;
1749	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1750}
1751
1752/*
1753 * Single trim performance by transfer size.
1754 */
1755static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1756{
1757	struct mmc_test_area *t = &test->area;
1758	unsigned long sz;
1759	unsigned int dev_addr;
1760	struct timespec ts1, ts2;
1761	int ret;
1762
1763	if (!mmc_can_trim(test->card))
1764		return RESULT_UNSUP_CARD;
1765
1766	if (!mmc_can_erase(test->card))
1767		return RESULT_UNSUP_HOST;
1768
1769	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1770		dev_addr = t->dev_addr + (sz >> 9);
1771		getnstimeofday(&ts1);
1772		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1773		if (ret)
1774			return ret;
1775		getnstimeofday(&ts2);
1776		mmc_test_print_rate(test, sz, &ts1, &ts2);
1777	}
1778	dev_addr = t->dev_addr;
1779	getnstimeofday(&ts1);
1780	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1781	if (ret)
1782		return ret;
1783	getnstimeofday(&ts2);
1784	mmc_test_print_rate(test, sz, &ts1, &ts2);
1785	return 0;
1786}
1787
1788static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1789{
1790	struct mmc_test_area *t = &test->area;
1791	unsigned int dev_addr, i, cnt;
1792	struct timespec ts1, ts2;
1793	int ret;
1794
1795	cnt = t->max_sz / sz;
1796	dev_addr = t->dev_addr;
1797	getnstimeofday(&ts1);
1798	for (i = 0; i < cnt; i++) {
1799		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1800		if (ret)
1801			return ret;
1802		dev_addr += (sz >> 9);
1803	}
1804	getnstimeofday(&ts2);
1805	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1806	return 0;
1807}
1808
1809/*
1810 * Consecutive read performance by transfer size.
1811 */
1812static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1813{
1814	struct mmc_test_area *t = &test->area;
1815	unsigned long sz;
1816	int ret;
1817
1818	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1819		ret = mmc_test_seq_read_perf(test, sz);
1820		if (ret)
1821			return ret;
1822	}
1823	sz = t->max_tfr;
1824	return mmc_test_seq_read_perf(test, sz);
1825}
1826
1827static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1828{
1829	struct mmc_test_area *t = &test->area;
1830	unsigned int dev_addr, i, cnt;
1831	struct timespec ts1, ts2;
1832	int ret;
1833
1834	ret = mmc_test_area_erase(test);
1835	if (ret)
1836		return ret;
1837	cnt = t->max_sz / sz;
1838	dev_addr = t->dev_addr;
1839	getnstimeofday(&ts1);
1840	for (i = 0; i < cnt; i++) {
1841		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1842		if (ret)
1843			return ret;
1844		dev_addr += (sz >> 9);
1845	}
1846	getnstimeofday(&ts2);
1847	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1848	return 0;
1849}
1850
1851/*
1852 * Consecutive write performance by transfer size.
1853 */
1854static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1855{
1856	struct mmc_test_area *t = &test->area;
1857	unsigned long sz;
1858	int ret;
1859
1860	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1861		ret = mmc_test_seq_write_perf(test, sz);
1862		if (ret)
1863			return ret;
1864	}
1865	sz = t->max_tfr;
1866	return mmc_test_seq_write_perf(test, sz);
1867}
1868
1869/*
1870 * Consecutive trim performance by transfer size.
1871 */
1872static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1873{
1874	struct mmc_test_area *t = &test->area;
1875	unsigned long sz;
1876	unsigned int dev_addr, i, cnt;
1877	struct timespec ts1, ts2;
1878	int ret;
1879
1880	if (!mmc_can_trim(test->card))
1881		return RESULT_UNSUP_CARD;
1882
1883	if (!mmc_can_erase(test->card))
1884		return RESULT_UNSUP_HOST;
1885
1886	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1887		ret = mmc_test_area_erase(test);
1888		if (ret)
1889			return ret;
1890		ret = mmc_test_area_fill(test);
1891		if (ret)
1892			return ret;
1893		cnt = t->max_sz / sz;
1894		dev_addr = t->dev_addr;
1895		getnstimeofday(&ts1);
1896		for (i = 0; i < cnt; i++) {
1897			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1898					MMC_TRIM_ARG);
1899			if (ret)
1900				return ret;
1901			dev_addr += (sz >> 9);
1902		}
1903		getnstimeofday(&ts2);
1904		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1905	}
1906	return 0;
1907}
1908
1909static unsigned int rnd_next = 1;
1910
1911static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1912{
1913	uint64_t r;
1914
1915	rnd_next = rnd_next * 1103515245 + 12345;
1916	r = (rnd_next >> 16) & 0x7fff;
1917	return (r * rnd_cnt) >> 15;
1918}
1919
1920static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1921			     unsigned long sz)
1922{
1923	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1924	unsigned int ssz;
1925	struct timespec ts1, ts2, ts;
1926	int ret;
1927
1928	ssz = sz >> 9;
1929
1930	rnd_addr = mmc_test_capacity(test->card) / 4;
1931	range1 = rnd_addr / test->card->pref_erase;
1932	range2 = range1 / ssz;
1933
1934	getnstimeofday(&ts1);
1935	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1936		getnstimeofday(&ts2);
1937		ts = timespec_sub(ts2, ts1);
1938		if (ts.tv_sec >= 10)
1939			break;
1940		ea = mmc_test_rnd_num(range1);
1941		if (ea == last_ea)
1942			ea -= 1;
1943		last_ea = ea;
1944		dev_addr = rnd_addr + test->card->pref_erase * ea +
1945			   ssz * mmc_test_rnd_num(range2);
1946		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1947		if (ret)
1948			return ret;
1949	}
1950	if (print)
1951		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1952	return 0;
1953}
1954
1955static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1956{
1957	struct mmc_test_area *t = &test->area;
1958	unsigned int next;
1959	unsigned long sz;
1960	int ret;
1961
1962	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1963		/*
1964		 * When writing, try to get more consistent results by running
1965		 * the test twice with exactly the same I/O but outputting the
1966		 * results only for the 2nd run.
1967		 */
1968		if (write) {
1969			next = rnd_next;
1970			ret = mmc_test_rnd_perf(test, write, 0, sz);
1971			if (ret)
1972				return ret;
1973			rnd_next = next;
1974		}
1975		ret = mmc_test_rnd_perf(test, write, 1, sz);
1976		if (ret)
1977			return ret;
1978	}
1979	sz = t->max_tfr;
1980	if (write) {
1981		next = rnd_next;
1982		ret = mmc_test_rnd_perf(test, write, 0, sz);
1983		if (ret)
1984			return ret;
1985		rnd_next = next;
1986	}
1987	return mmc_test_rnd_perf(test, write, 1, sz);
1988}
1989
1990/*
1991 * Random read performance by transfer size.
1992 */
1993static int mmc_test_random_read_perf(struct mmc_test_card *test)
1994{
1995	return mmc_test_random_perf(test, 0);
1996}
1997
1998/*
1999 * Random write performance by transfer size.
2000 */
2001static int mmc_test_random_write_perf(struct mmc_test_card *test)
2002{
2003	return mmc_test_random_perf(test, 1);
2004}
2005
2006static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2007			     unsigned int tot_sz, int max_scatter)
2008{
2009	struct mmc_test_area *t = &test->area;
2010	unsigned int dev_addr, i, cnt, sz, ssz;
2011	struct timespec ts1, ts2;
2012	int ret;
2013
2014	sz = t->max_tfr;
2015
2016	/*
2017	 * In the case of a maximally scattered transfer, the maximum transfer
2018	 * size is further limited by using PAGE_SIZE segments.
2019	 */
2020	if (max_scatter) {
2021		unsigned long max_tfr;
2022
2023		if (t->max_seg_sz >= PAGE_SIZE)
2024			max_tfr = t->max_segs * PAGE_SIZE;
2025		else
2026			max_tfr = t->max_segs * t->max_seg_sz;
2027		if (sz > max_tfr)
2028			sz = max_tfr;
2029	}
2030
2031	ssz = sz >> 9;
2032	dev_addr = mmc_test_capacity(test->card) / 4;
2033	if (tot_sz > dev_addr << 9)
2034		tot_sz = dev_addr << 9;
2035	cnt = tot_sz / sz;
2036	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2037
2038	getnstimeofday(&ts1);
2039	for (i = 0; i < cnt; i++) {
2040		ret = mmc_test_area_io(test, sz, dev_addr, write,
2041				       max_scatter, 0);
2042		if (ret)
2043			return ret;
2044		dev_addr += ssz;
2045	}
2046	getnstimeofday(&ts2);
2047
2048	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2049
2050	return 0;
2051}
2052
2053static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2054{
2055	int ret, i;
2056
2057	for (i = 0; i < 10; i++) {
2058		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2059		if (ret)
2060			return ret;
2061	}
2062	for (i = 0; i < 5; i++) {
2063		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2064		if (ret)
2065			return ret;
2066	}
2067	for (i = 0; i < 3; i++) {
2068		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2069		if (ret)
2070			return ret;
2071	}
2072
2073	return ret;
2074}
2075
2076/*
2077 * Large sequential read performance.
2078 */
2079static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2080{
2081	return mmc_test_large_seq_perf(test, 0);
2082}
2083
2084/*
2085 * Large sequential write performance.
2086 */
2087static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2088{
2089	return mmc_test_large_seq_perf(test, 1);
2090}
2091
2092static int mmc_test_rw_multiple(struct mmc_test_card *test,
2093				struct mmc_test_multiple_rw *tdata,
2094				unsigned int reqsize, unsigned int size,
2095				int min_sg_len)
2096{
2097	unsigned int dev_addr;
2098	struct mmc_test_area *t = &test->area;
2099	int ret = 0;
2100
2101	/* Set up test area */
2102	if (size > mmc_test_capacity(test->card) / 2 * 512)
2103		size = mmc_test_capacity(test->card) / 2 * 512;
2104	if (reqsize > t->max_tfr)
2105		reqsize = t->max_tfr;
2106	dev_addr = mmc_test_capacity(test->card) / 4;
2107	if ((dev_addr & 0xffff0000))
2108		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2109	else
2110		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2111	if (!dev_addr)
2112		goto err;
2113
2114	if (reqsize > size)
2115		return 0;
2116
2117	/* prepare test area */
2118	if (mmc_can_erase(test->card) &&
2119	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2120		ret = mmc_erase(test->card, dev_addr,
2121				size / 512, MMC_SECURE_ERASE_ARG);
2122		if (ret)
2123			ret = mmc_erase(test->card, dev_addr,
2124					size / 512, MMC_ERASE_ARG);
2125		if (ret)
2126			goto err;
2127	}
2128
2129	/* Run test */
2130	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2131				   tdata->do_write, 0, 1, size / reqsize,
2132				   tdata->do_nonblock_req, min_sg_len);
2133	if (ret)
2134		goto err;
2135
2136	return ret;
2137 err:
2138	printk(KERN_INFO "[%s] error\n", __func__);
2139	return ret;
2140}
2141
2142static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2143				     struct mmc_test_multiple_rw *rw)
2144{
2145	int ret = 0;
2146	int i;
2147	void *pre_req = test->card->host->ops->pre_req;
2148	void *post_req = test->card->host->ops->post_req;
2149
2150	if (rw->do_nonblock_req &&
2151	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2152		printk(KERN_INFO "error: only one of pre/post is defined\n");
2153		return -EINVAL;
2154	}
2155
2156	for (i = 0 ; i < rw->len && ret == 0; i++) {
2157		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2158		if (ret)
2159			break;
2160	}
2161	return ret;
2162}
2163
2164static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2165				       struct mmc_test_multiple_rw *rw)
2166{
2167	int ret = 0;
2168	int i;
2169
2170	for (i = 0 ; i < rw->len && ret == 0; i++) {
2171		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2172					   rw->sg_len[i]);
2173		if (ret)
2174			break;
2175	}
2176	return ret;
2177}
2178
2179/*
2180 * Multiple blocking write 4k to 4 MB chunks
2181 */
2182static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2183{
2184	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2185			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2186	struct mmc_test_multiple_rw test_data = {
2187		.bs = bs,
2188		.size = TEST_AREA_MAX_SIZE,
2189		.len = ARRAY_SIZE(bs),
2190		.do_write = true,
2191		.do_nonblock_req = false,
2192		.prepare = MMC_TEST_PREP_ERASE,
2193	};
2194
2195	return mmc_test_rw_multiple_size(test, &test_data);
2196};
2197
2198/*
2199 * Multiple non-blocking write 4k to 4 MB chunks
2200 */
2201static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2202{
2203	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2204			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2205	struct mmc_test_multiple_rw test_data = {
2206		.bs = bs,
2207		.size = TEST_AREA_MAX_SIZE,
2208		.len = ARRAY_SIZE(bs),
2209		.do_write = true,
2210		.do_nonblock_req = true,
2211		.prepare = MMC_TEST_PREP_ERASE,
2212	};
2213
2214	return mmc_test_rw_multiple_size(test, &test_data);
2215}
2216
2217/*
2218 * Multiple blocking read 4k to 4 MB chunks
2219 */
2220static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2221{
2222	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2223			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2224	struct mmc_test_multiple_rw test_data = {
2225		.bs = bs,
2226		.size = TEST_AREA_MAX_SIZE,
2227		.len = ARRAY_SIZE(bs),
2228		.do_write = false,
2229		.do_nonblock_req = false,
2230		.prepare = MMC_TEST_PREP_NONE,
2231	};
2232
2233	return mmc_test_rw_multiple_size(test, &test_data);
2234}
2235
2236/*
2237 * Multiple non-blocking read 4k to 4 MB chunks
2238 */
2239static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2240{
2241	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2242			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2243	struct mmc_test_multiple_rw test_data = {
2244		.bs = bs,
2245		.size = TEST_AREA_MAX_SIZE,
2246		.len = ARRAY_SIZE(bs),
2247		.do_write = false,
2248		.do_nonblock_req = true,
2249		.prepare = MMC_TEST_PREP_NONE,
2250	};
2251
2252	return mmc_test_rw_multiple_size(test, &test_data);
2253}
2254
2255/*
2256 * Multiple blocking write 1 to 512 sg elements
2257 */
2258static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2259{
2260	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2261				 1 << 7, 1 << 8, 1 << 9};
2262	struct mmc_test_multiple_rw test_data = {
2263		.sg_len = sg_len,
2264		.size = TEST_AREA_MAX_SIZE,
2265		.len = ARRAY_SIZE(sg_len),
2266		.do_write = true,
2267		.do_nonblock_req = false,
2268		.prepare = MMC_TEST_PREP_ERASE,
2269	};
2270
2271	return mmc_test_rw_multiple_sg_len(test, &test_data);
2272};
2273
2274/*
2275 * Multiple non-blocking write 1 to 512 sg elements
2276 */
2277static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2278{
2279	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2280				 1 << 7, 1 << 8, 1 << 9};
2281	struct mmc_test_multiple_rw test_data = {
2282		.sg_len = sg_len,
2283		.size = TEST_AREA_MAX_SIZE,
2284		.len = ARRAY_SIZE(sg_len),
2285		.do_write = true,
2286		.do_nonblock_req = true,
2287		.prepare = MMC_TEST_PREP_ERASE,
2288	};
2289
2290	return mmc_test_rw_multiple_sg_len(test, &test_data);
2291}
2292
2293/*
2294 * Multiple blocking read 1 to 512 sg elements
2295 */
2296static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2297{
2298	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2299				 1 << 7, 1 << 8, 1 << 9};
2300	struct mmc_test_multiple_rw test_data = {
2301		.sg_len = sg_len,
2302		.size = TEST_AREA_MAX_SIZE,
2303		.len = ARRAY_SIZE(sg_len),
2304		.do_write = false,
2305		.do_nonblock_req = false,
2306		.prepare = MMC_TEST_PREP_NONE,
2307	};
2308
2309	return mmc_test_rw_multiple_sg_len(test, &test_data);
2310}
2311
2312/*
2313 * Multiple non-blocking read 1 to 512 sg elements
2314 */
2315static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2316{
2317	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2318				 1 << 7, 1 << 8, 1 << 9};
2319	struct mmc_test_multiple_rw test_data = {
2320		.sg_len = sg_len,
2321		.size = TEST_AREA_MAX_SIZE,
2322		.len = ARRAY_SIZE(sg_len),
2323		.do_write = false,
2324		.do_nonblock_req = true,
2325		.prepare = MMC_TEST_PREP_NONE,
2326	};
2327
2328	return mmc_test_rw_multiple_sg_len(test, &test_data);
2329}
2330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2331static const struct mmc_test_case mmc_test_cases[] = {
2332	{
2333		.name = "Basic write (no data verification)",
2334		.run = mmc_test_basic_write,
2335	},
2336
2337	{
2338		.name = "Basic read (no data verification)",
2339		.run = mmc_test_basic_read,
2340	},
2341
2342	{
2343		.name = "Basic write (with data verification)",
2344		.prepare = mmc_test_prepare_write,
2345		.run = mmc_test_verify_write,
2346		.cleanup = mmc_test_cleanup,
2347	},
2348
2349	{
2350		.name = "Basic read (with data verification)",
2351		.prepare = mmc_test_prepare_read,
2352		.run = mmc_test_verify_read,
2353		.cleanup = mmc_test_cleanup,
2354	},
2355
2356	{
2357		.name = "Multi-block write",
2358		.prepare = mmc_test_prepare_write,
2359		.run = mmc_test_multi_write,
2360		.cleanup = mmc_test_cleanup,
2361	},
2362
2363	{
2364		.name = "Multi-block read",
2365		.prepare = mmc_test_prepare_read,
2366		.run = mmc_test_multi_read,
2367		.cleanup = mmc_test_cleanup,
2368	},
2369
2370	{
2371		.name = "Power of two block writes",
2372		.prepare = mmc_test_prepare_write,
2373		.run = mmc_test_pow2_write,
2374		.cleanup = mmc_test_cleanup,
2375	},
2376
2377	{
2378		.name = "Power of two block reads",
2379		.prepare = mmc_test_prepare_read,
2380		.run = mmc_test_pow2_read,
2381		.cleanup = mmc_test_cleanup,
2382	},
2383
2384	{
2385		.name = "Weird sized block writes",
2386		.prepare = mmc_test_prepare_write,
2387		.run = mmc_test_weird_write,
2388		.cleanup = mmc_test_cleanup,
2389	},
2390
2391	{
2392		.name = "Weird sized block reads",
2393		.prepare = mmc_test_prepare_read,
2394		.run = mmc_test_weird_read,
2395		.cleanup = mmc_test_cleanup,
2396	},
2397
2398	{
2399		.name = "Badly aligned write",
2400		.prepare = mmc_test_prepare_write,
2401		.run = mmc_test_align_write,
2402		.cleanup = mmc_test_cleanup,
2403	},
2404
2405	{
2406		.name = "Badly aligned read",
2407		.prepare = mmc_test_prepare_read,
2408		.run = mmc_test_align_read,
2409		.cleanup = mmc_test_cleanup,
2410	},
2411
2412	{
2413		.name = "Badly aligned multi-block write",
2414		.prepare = mmc_test_prepare_write,
2415		.run = mmc_test_align_multi_write,
2416		.cleanup = mmc_test_cleanup,
2417	},
2418
2419	{
2420		.name = "Badly aligned multi-block read",
2421		.prepare = mmc_test_prepare_read,
2422		.run = mmc_test_align_multi_read,
2423		.cleanup = mmc_test_cleanup,
2424	},
2425
2426	{
2427		.name = "Correct xfer_size at write (start failure)",
2428		.run = mmc_test_xfersize_write,
2429	},
2430
2431	{
2432		.name = "Correct xfer_size at read (start failure)",
2433		.run = mmc_test_xfersize_read,
2434	},
2435
2436	{
2437		.name = "Correct xfer_size at write (midway failure)",
2438		.run = mmc_test_multi_xfersize_write,
2439	},
2440
2441	{
2442		.name = "Correct xfer_size at read (midway failure)",
2443		.run = mmc_test_multi_xfersize_read,
2444	},
2445
2446#ifdef CONFIG_HIGHMEM
2447
2448	{
2449		.name = "Highmem write",
2450		.prepare = mmc_test_prepare_write,
2451		.run = mmc_test_write_high,
2452		.cleanup = mmc_test_cleanup,
2453	},
2454
2455	{
2456		.name = "Highmem read",
2457		.prepare = mmc_test_prepare_read,
2458		.run = mmc_test_read_high,
2459		.cleanup = mmc_test_cleanup,
2460	},
2461
2462	{
2463		.name = "Multi-block highmem write",
2464		.prepare = mmc_test_prepare_write,
2465		.run = mmc_test_multi_write_high,
2466		.cleanup = mmc_test_cleanup,
2467	},
2468
2469	{
2470		.name = "Multi-block highmem read",
2471		.prepare = mmc_test_prepare_read,
2472		.run = mmc_test_multi_read_high,
2473		.cleanup = mmc_test_cleanup,
2474	},
2475
2476#else
2477
2478	{
2479		.name = "Highmem write",
2480		.run = mmc_test_no_highmem,
2481	},
2482
2483	{
2484		.name = "Highmem read",
2485		.run = mmc_test_no_highmem,
2486	},
2487
2488	{
2489		.name = "Multi-block highmem write",
2490		.run = mmc_test_no_highmem,
2491	},
2492
2493	{
2494		.name = "Multi-block highmem read",
2495		.run = mmc_test_no_highmem,
2496	},
2497
2498#endif /* CONFIG_HIGHMEM */
2499
2500	{
2501		.name = "Best-case read performance",
2502		.prepare = mmc_test_area_prepare_fill,
2503		.run = mmc_test_best_read_performance,
2504		.cleanup = mmc_test_area_cleanup,
2505	},
2506
2507	{
2508		.name = "Best-case write performance",
2509		.prepare = mmc_test_area_prepare_erase,
2510		.run = mmc_test_best_write_performance,
2511		.cleanup = mmc_test_area_cleanup,
2512	},
2513
2514	{
2515		.name = "Best-case read performance into scattered pages",
2516		.prepare = mmc_test_area_prepare_fill,
2517		.run = mmc_test_best_read_perf_max_scatter,
2518		.cleanup = mmc_test_area_cleanup,
2519	},
2520
2521	{
2522		.name = "Best-case write performance from scattered pages",
2523		.prepare = mmc_test_area_prepare_erase,
2524		.run = mmc_test_best_write_perf_max_scatter,
2525		.cleanup = mmc_test_area_cleanup,
2526	},
2527
2528	{
2529		.name = "Single read performance by transfer size",
2530		.prepare = mmc_test_area_prepare_fill,
2531		.run = mmc_test_profile_read_perf,
2532		.cleanup = mmc_test_area_cleanup,
2533	},
2534
2535	{
2536		.name = "Single write performance by transfer size",
2537		.prepare = mmc_test_area_prepare,
2538		.run = mmc_test_profile_write_perf,
2539		.cleanup = mmc_test_area_cleanup,
2540	},
2541
2542	{
2543		.name = "Single trim performance by transfer size",
2544		.prepare = mmc_test_area_prepare_fill,
2545		.run = mmc_test_profile_trim_perf,
2546		.cleanup = mmc_test_area_cleanup,
2547	},
2548
2549	{
2550		.name = "Consecutive read performance by transfer size",
2551		.prepare = mmc_test_area_prepare_fill,
2552		.run = mmc_test_profile_seq_read_perf,
2553		.cleanup = mmc_test_area_cleanup,
2554	},
2555
2556	{
2557		.name = "Consecutive write performance by transfer size",
2558		.prepare = mmc_test_area_prepare,
2559		.run = mmc_test_profile_seq_write_perf,
2560		.cleanup = mmc_test_area_cleanup,
2561	},
2562
2563	{
2564		.name = "Consecutive trim performance by transfer size",
2565		.prepare = mmc_test_area_prepare,
2566		.run = mmc_test_profile_seq_trim_perf,
2567		.cleanup = mmc_test_area_cleanup,
2568	},
2569
2570	{
2571		.name = "Random read performance by transfer size",
2572		.prepare = mmc_test_area_prepare,
2573		.run = mmc_test_random_read_perf,
2574		.cleanup = mmc_test_area_cleanup,
2575	},
2576
2577	{
2578		.name = "Random write performance by transfer size",
2579		.prepare = mmc_test_area_prepare,
2580		.run = mmc_test_random_write_perf,
2581		.cleanup = mmc_test_area_cleanup,
2582	},
2583
2584	{
2585		.name = "Large sequential read into scattered pages",
2586		.prepare = mmc_test_area_prepare,
2587		.run = mmc_test_large_seq_read_perf,
2588		.cleanup = mmc_test_area_cleanup,
2589	},
2590
2591	{
2592		.name = "Large sequential write from scattered pages",
2593		.prepare = mmc_test_area_prepare,
2594		.run = mmc_test_large_seq_write_perf,
2595		.cleanup = mmc_test_area_cleanup,
2596	},
2597
2598	{
2599		.name = "Write performance with blocking req 4k to 4MB",
2600		.prepare = mmc_test_area_prepare,
2601		.run = mmc_test_profile_mult_write_blocking_perf,
2602		.cleanup = mmc_test_area_cleanup,
2603	},
2604
2605	{
2606		.name = "Write performance with non-blocking req 4k to 4MB",
2607		.prepare = mmc_test_area_prepare,
2608		.run = mmc_test_profile_mult_write_nonblock_perf,
2609		.cleanup = mmc_test_area_cleanup,
2610	},
2611
2612	{
2613		.name = "Read performance with blocking req 4k to 4MB",
2614		.prepare = mmc_test_area_prepare,
2615		.run = mmc_test_profile_mult_read_blocking_perf,
2616		.cleanup = mmc_test_area_cleanup,
2617	},
2618
2619	{
2620		.name = "Read performance with non-blocking req 4k to 4MB",
2621		.prepare = mmc_test_area_prepare,
2622		.run = mmc_test_profile_mult_read_nonblock_perf,
2623		.cleanup = mmc_test_area_cleanup,
2624	},
2625
2626	{
2627		.name = "Write performance blocking req 1 to 512 sg elems",
2628		.prepare = mmc_test_area_prepare,
2629		.run = mmc_test_profile_sglen_wr_blocking_perf,
2630		.cleanup = mmc_test_area_cleanup,
2631	},
2632
2633	{
2634		.name = "Write performance non-blocking req 1 to 512 sg elems",
2635		.prepare = mmc_test_area_prepare,
2636		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2637		.cleanup = mmc_test_area_cleanup,
2638	},
2639
2640	{
2641		.name = "Read performance blocking req 1 to 512 sg elems",
2642		.prepare = mmc_test_area_prepare,
2643		.run = mmc_test_profile_sglen_r_blocking_perf,
2644		.cleanup = mmc_test_area_cleanup,
2645	},
2646
2647	{
2648		.name = "Read performance non-blocking req 1 to 512 sg elems",
2649		.prepare = mmc_test_area_prepare,
2650		.run = mmc_test_profile_sglen_r_nonblock_perf,
2651		.cleanup = mmc_test_area_cleanup,
2652	},
 
 
 
 
 
2653};
2654
2655static DEFINE_MUTEX(mmc_test_lock);
2656
2657static LIST_HEAD(mmc_test_result);
2658
2659static void mmc_test_run(struct mmc_test_card *test, int testcase)
2660{
2661	int i, ret;
2662
2663	printk(KERN_INFO "%s: Starting tests of card %s...\n",
2664		mmc_hostname(test->card->host), mmc_card_id(test->card));
2665
2666	mmc_claim_host(test->card->host);
2667
2668	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2669		struct mmc_test_general_result *gr;
2670
2671		if (testcase && ((i + 1) != testcase))
2672			continue;
2673
2674		printk(KERN_INFO "%s: Test case %d. %s...\n",
2675			mmc_hostname(test->card->host), i + 1,
2676			mmc_test_cases[i].name);
2677
2678		if (mmc_test_cases[i].prepare) {
2679			ret = mmc_test_cases[i].prepare(test);
2680			if (ret) {
2681				printk(KERN_INFO "%s: Result: Prepare "
2682					"stage failed! (%d)\n",
2683					mmc_hostname(test->card->host),
2684					ret);
2685				continue;
2686			}
2687		}
2688
2689		gr = kzalloc(sizeof(struct mmc_test_general_result),
2690			GFP_KERNEL);
2691		if (gr) {
2692			INIT_LIST_HEAD(&gr->tr_lst);
2693
2694			/* Assign data what we know already */
2695			gr->card = test->card;
2696			gr->testcase = i;
2697
2698			/* Append container to global one */
2699			list_add_tail(&gr->link, &mmc_test_result);
2700
2701			/*
2702			 * Save the pointer to created container in our private
2703			 * structure.
2704			 */
2705			test->gr = gr;
2706		}
2707
2708		ret = mmc_test_cases[i].run(test);
2709		switch (ret) {
2710		case RESULT_OK:
2711			printk(KERN_INFO "%s: Result: OK\n",
2712				mmc_hostname(test->card->host));
2713			break;
2714		case RESULT_FAIL:
2715			printk(KERN_INFO "%s: Result: FAILED\n",
2716				mmc_hostname(test->card->host));
2717			break;
2718		case RESULT_UNSUP_HOST:
2719			printk(KERN_INFO "%s: Result: UNSUPPORTED "
2720				"(by host)\n",
2721				mmc_hostname(test->card->host));
2722			break;
2723		case RESULT_UNSUP_CARD:
2724			printk(KERN_INFO "%s: Result: UNSUPPORTED "
2725				"(by card)\n",
2726				mmc_hostname(test->card->host));
2727			break;
2728		default:
2729			printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2730				mmc_hostname(test->card->host), ret);
2731		}
2732
2733		/* Save the result */
2734		if (gr)
2735			gr->result = ret;
2736
2737		if (mmc_test_cases[i].cleanup) {
2738			ret = mmc_test_cases[i].cleanup(test);
2739			if (ret) {
2740				printk(KERN_INFO "%s: Warning: Cleanup "
2741					"stage failed! (%d)\n",
2742					mmc_hostname(test->card->host),
2743					ret);
2744			}
2745		}
2746	}
2747
2748	mmc_release_host(test->card->host);
2749
2750	printk(KERN_INFO "%s: Tests completed.\n",
2751		mmc_hostname(test->card->host));
2752}
2753
2754static void mmc_test_free_result(struct mmc_card *card)
2755{
2756	struct mmc_test_general_result *gr, *grs;
2757
2758	mutex_lock(&mmc_test_lock);
2759
2760	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2761		struct mmc_test_transfer_result *tr, *trs;
2762
2763		if (card && gr->card != card)
2764			continue;
2765
2766		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2767			list_del(&tr->link);
2768			kfree(tr);
2769		}
2770
2771		list_del(&gr->link);
2772		kfree(gr);
2773	}
2774
2775	mutex_unlock(&mmc_test_lock);
2776}
2777
2778static LIST_HEAD(mmc_test_file_test);
2779
2780static int mtf_test_show(struct seq_file *sf, void *data)
2781{
2782	struct mmc_card *card = (struct mmc_card *)sf->private;
2783	struct mmc_test_general_result *gr;
2784
2785	mutex_lock(&mmc_test_lock);
2786
2787	list_for_each_entry(gr, &mmc_test_result, link) {
2788		struct mmc_test_transfer_result *tr;
2789
2790		if (gr->card != card)
2791			continue;
2792
2793		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2794
2795		list_for_each_entry(tr, &gr->tr_lst, link) {
2796			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2797				tr->count, tr->sectors,
2798				(unsigned long)tr->ts.tv_sec,
2799				(unsigned long)tr->ts.tv_nsec,
2800				tr->rate, tr->iops / 100, tr->iops % 100);
2801		}
2802	}
2803
2804	mutex_unlock(&mmc_test_lock);
2805
2806	return 0;
2807}
2808
2809static int mtf_test_open(struct inode *inode, struct file *file)
2810{
2811	return single_open(file, mtf_test_show, inode->i_private);
2812}
2813
2814static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2815	size_t count, loff_t *pos)
2816{
2817	struct seq_file *sf = (struct seq_file *)file->private_data;
2818	struct mmc_card *card = (struct mmc_card *)sf->private;
2819	struct mmc_test_card *test;
2820	char lbuf[12];
2821	long testcase;
2822
2823	if (count >= sizeof(lbuf))
2824		return -EINVAL;
2825
2826	if (copy_from_user(lbuf, buf, count))
2827		return -EFAULT;
2828	lbuf[count] = '\0';
2829
2830	if (strict_strtol(lbuf, 10, &testcase))
2831		return -EINVAL;
2832
2833	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2834	if (!test)
2835		return -ENOMEM;
2836
2837	/*
2838	 * Remove all test cases associated with given card. Thus we have only
2839	 * actual data of the last run.
2840	 */
2841	mmc_test_free_result(card);
2842
2843	test->card = card;
2844
2845	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2846#ifdef CONFIG_HIGHMEM
2847	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2848#endif
2849
2850#ifdef CONFIG_HIGHMEM
2851	if (test->buffer && test->highmem) {
2852#else
2853	if (test->buffer) {
2854#endif
2855		mutex_lock(&mmc_test_lock);
2856		mmc_test_run(test, testcase);
2857		mutex_unlock(&mmc_test_lock);
2858	}
2859
2860#ifdef CONFIG_HIGHMEM
2861	__free_pages(test->highmem, BUFFER_ORDER);
2862#endif
2863	kfree(test->buffer);
2864	kfree(test);
2865
2866	return count;
2867}
2868
2869static const struct file_operations mmc_test_fops_test = {
2870	.open		= mtf_test_open,
2871	.read		= seq_read,
2872	.write		= mtf_test_write,
2873	.llseek		= seq_lseek,
2874	.release	= single_release,
2875};
2876
2877static int mtf_testlist_show(struct seq_file *sf, void *data)
2878{
2879	int i;
2880
2881	mutex_lock(&mmc_test_lock);
2882
2883	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2884		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2885
2886	mutex_unlock(&mmc_test_lock);
2887
2888	return 0;
2889}
2890
2891static int mtf_testlist_open(struct inode *inode, struct file *file)
2892{
2893	return single_open(file, mtf_testlist_show, inode->i_private);
2894}
2895
2896static const struct file_operations mmc_test_fops_testlist = {
2897	.open		= mtf_testlist_open,
2898	.read		= seq_read,
2899	.llseek		= seq_lseek,
2900	.release	= single_release,
2901};
2902
2903static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2904{
2905	struct mmc_test_dbgfs_file *df, *dfs;
2906
2907	mutex_lock(&mmc_test_lock);
2908
2909	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2910		if (card && df->card != card)
2911			continue;
2912		debugfs_remove(df->file);
2913		list_del(&df->link);
2914		kfree(df);
2915	}
2916
2917	mutex_unlock(&mmc_test_lock);
2918}
2919
2920static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2921	const char *name, mode_t mode, const struct file_operations *fops)
2922{
2923	struct dentry *file = NULL;
2924	struct mmc_test_dbgfs_file *df;
2925
2926	if (card->debugfs_root)
2927		file = debugfs_create_file(name, mode, card->debugfs_root,
2928			card, fops);
2929
2930	if (IS_ERR_OR_NULL(file)) {
2931		dev_err(&card->dev,
2932			"Can't create %s. Perhaps debugfs is disabled.\n",
2933			name);
2934		return -ENODEV;
2935	}
2936
2937	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2938	if (!df) {
2939		debugfs_remove(file);
2940		dev_err(&card->dev,
2941			"Can't allocate memory for internal usage.\n");
2942		return -ENOMEM;
2943	}
2944
2945	df->card = card;
2946	df->file = file;
2947
2948	list_add(&df->link, &mmc_test_file_test);
2949	return 0;
2950}
2951
2952static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2953{
2954	int ret;
2955
2956	mutex_lock(&mmc_test_lock);
2957
2958	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2959		&mmc_test_fops_test);
2960	if (ret)
2961		goto err;
2962
2963	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2964		&mmc_test_fops_testlist);
2965	if (ret)
2966		goto err;
2967
2968err:
2969	mutex_unlock(&mmc_test_lock);
2970
2971	return ret;
2972}
2973
2974static int mmc_test_probe(struct mmc_card *card)
2975{
2976	int ret;
2977
2978	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2979		return -ENODEV;
2980
2981	ret = mmc_test_register_dbgfs_file(card);
2982	if (ret)
2983		return ret;
2984
2985	dev_info(&card->dev, "Card claimed for testing.\n");
2986
2987	return 0;
2988}
2989
2990static void mmc_test_remove(struct mmc_card *card)
2991{
2992	mmc_test_free_result(card);
2993	mmc_test_free_dbgfs_file(card);
2994}
2995
2996static struct mmc_driver mmc_driver = {
2997	.drv		= {
2998		.name	= "mmc_test",
2999	},
3000	.probe		= mmc_test_probe,
3001	.remove		= mmc_test_remove,
3002};
3003
3004static int __init mmc_test_init(void)
3005{
3006	return mmc_register_driver(&mmc_driver);
3007}
3008
3009static void __exit mmc_test_exit(void)
3010{
3011	/* Clear stalled data if card is still plugged */
3012	mmc_test_free_result(NULL);
3013	mmc_test_free_dbgfs_file(NULL);
3014
3015	mmc_unregister_driver(&mmc_driver);
3016}
3017
3018module_init(mmc_test_init);
3019module_exit(mmc_test_exit);
3020
3021MODULE_LICENSE("GPL");
3022MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3023MODULE_AUTHOR("Pierre Ossman");