Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 *  linux/drivers/mmc/card/mmc_test.c
   3 *
   4 *  Copyright 2007-2008 Pierre Ossman
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 */
  11
  12#include <linux/mmc/core.h>
  13#include <linux/mmc/card.h>
  14#include <linux/mmc/host.h>
  15#include <linux/mmc/mmc.h>
  16#include <linux/slab.h>
  17
  18#include <linux/scatterlist.h>
  19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
  20#include <linux/list.h>
  21
  22#include <linux/debugfs.h>
  23#include <linux/uaccess.h>
  24#include <linux/seq_file.h>
  25
  26#define RESULT_OK		0
  27#define RESULT_FAIL		1
  28#define RESULT_UNSUP_HOST	2
  29#define RESULT_UNSUP_CARD	3
  30
  31#define BUFFER_ORDER		2
  32#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
  33
  34/*
  35 * Limit the test area size to the maximum MMC HC erase group size.  Note that
  36 * the maximum SD allocation unit size is just 4MiB.
  37 */
  38#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
  39
  40/**
  41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
  42 * @page: first page in the allocation
  43 * @order: order of the number of pages allocated
  44 */
  45struct mmc_test_pages {
  46	struct page *page;
  47	unsigned int order;
  48};
  49
  50/**
  51 * struct mmc_test_mem - allocated memory.
  52 * @arr: array of allocations
  53 * @cnt: number of allocations
  54 */
  55struct mmc_test_mem {
  56	struct mmc_test_pages *arr;
  57	unsigned int cnt;
  58};
  59
  60/**
  61 * struct mmc_test_area - information for performance tests.
  62 * @max_sz: test area size (in bytes)
  63 * @dev_addr: address on card at which to do performance tests
  64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
  65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
  66 * @max_seg_sz: maximum segment size allowed by driver
  67 * @blocks: number of (512 byte) blocks currently mapped by @sg
  68 * @sg_len: length of currently mapped scatterlist @sg
  69 * @mem: allocated memory
  70 * @sg: scatterlist
  71 */
  72struct mmc_test_area {
  73	unsigned long max_sz;
  74	unsigned int dev_addr;
  75	unsigned int max_tfr;
  76	unsigned int max_segs;
  77	unsigned int max_seg_sz;
  78	unsigned int blocks;
  79	unsigned int sg_len;
  80	struct mmc_test_mem *mem;
  81	struct scatterlist *sg;
  82};
  83
  84/**
  85 * struct mmc_test_transfer_result - transfer results for performance tests.
  86 * @link: double-linked list
  87 * @count: amount of group of sectors to check
  88 * @sectors: amount of sectors to check in one group
  89 * @ts: time values of transfer
  90 * @rate: calculated transfer rate
  91 * @iops: I/O operations per second (times 100)
  92 */
  93struct mmc_test_transfer_result {
  94	struct list_head link;
  95	unsigned int count;
  96	unsigned int sectors;
  97	struct timespec ts;
  98	unsigned int rate;
  99	unsigned int iops;
 100};
 101
 102/**
 103 * struct mmc_test_general_result - results for tests.
 104 * @link: double-linked list
 105 * @card: card under test
 106 * @testcase: number of test case
 107 * @result: result of test run
 108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
 109 */
 110struct mmc_test_general_result {
 111	struct list_head link;
 112	struct mmc_card *card;
 113	int testcase;
 114	int result;
 115	struct list_head tr_lst;
 116};
 117
 118/**
 119 * struct mmc_test_dbgfs_file - debugfs related file.
 120 * @link: double-linked list
 121 * @card: card under test
 122 * @file: file created under debugfs
 123 */
 124struct mmc_test_dbgfs_file {
 125	struct list_head link;
 126	struct mmc_card *card;
 127	struct dentry *file;
 128};
 129
 130/**
 131 * struct mmc_test_card - test information.
 132 * @card: card under test
 133 * @scratch: transfer buffer
 134 * @buffer: transfer buffer
 135 * @highmem: buffer for highmem tests
 136 * @area: information for performance tests
 137 * @gr: pointer to results of current testcase
 138 */
 139struct mmc_test_card {
 140	struct mmc_card	*card;
 141
 142	u8		scratch[BUFFER_SIZE];
 143	u8		*buffer;
 144#ifdef CONFIG_HIGHMEM
 145	struct page	*highmem;
 146#endif
 147	struct mmc_test_area		area;
 148	struct mmc_test_general_result	*gr;
 149};
 150
 151enum mmc_test_prep_media {
 152	MMC_TEST_PREP_NONE = 0,
 153	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
 154	MMC_TEST_PREP_ERASE = 1 << 1,
 155};
 156
 157struct mmc_test_multiple_rw {
 158	unsigned int *sg_len;
 159	unsigned int *bs;
 160	unsigned int len;
 161	unsigned int size;
 162	bool do_write;
 163	bool do_nonblock_req;
 164	enum mmc_test_prep_media prepare;
 165};
 166
 167struct mmc_test_async_req {
 168	struct mmc_async_req areq;
 169	struct mmc_test_card *test;
 170};
 171
 172/*******************************************************************/
 173/*  General helper functions                                       */
 174/*******************************************************************/
 175
 176/*
 177 * Configure correct block size in card
 178 */
 179static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
 180{
 181	return mmc_set_blocklen(test->card, size);
 182}
 183
 184/*
 185 * Fill in the mmc_request structure given a set of transfer parameters.
 186 */
 187static void mmc_test_prepare_mrq(struct mmc_test_card *test,
 188	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
 189	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
 190{
 191	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
 192
 193	if (blocks > 1) {
 194		mrq->cmd->opcode = write ?
 195			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
 196	} else {
 197		mrq->cmd->opcode = write ?
 198			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 199	}
 200
 201	mrq->cmd->arg = dev_addr;
 202	if (!mmc_card_blockaddr(test->card))
 203		mrq->cmd->arg <<= 9;
 204
 205	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 206
 207	if (blocks == 1)
 208		mrq->stop = NULL;
 209	else {
 210		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
 211		mrq->stop->arg = 0;
 212		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 213	}
 214
 215	mrq->data->blksz = blksz;
 216	mrq->data->blocks = blocks;
 217	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
 218	mrq->data->sg = sg;
 219	mrq->data->sg_len = sg_len;
 220
 221	mmc_set_data_timeout(mrq->data, test->card);
 222}
 223
 224static int mmc_test_busy(struct mmc_command *cmd)
 225{
 226	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
 227		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
 228}
 229
 230/*
 231 * Wait for the card to finish the busy state
 232 */
 233static int mmc_test_wait_busy(struct mmc_test_card *test)
 234{
 235	int ret, busy;
 236	struct mmc_command cmd = {0};
 237
 238	busy = 0;
 239	do {
 240		memset(&cmd, 0, sizeof(struct mmc_command));
 241
 242		cmd.opcode = MMC_SEND_STATUS;
 243		cmd.arg = test->card->rca << 16;
 244		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 245
 246		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
 247		if (ret)
 248			break;
 249
 250		if (!busy && mmc_test_busy(&cmd)) {
 251			busy = 1;
 252			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 253				printk(KERN_INFO "%s: Warning: Host did not "
 254					"wait for busy state to end.\n",
 255					mmc_hostname(test->card->host));
 256		}
 257	} while (mmc_test_busy(&cmd));
 258
 259	return ret;
 260}
 261
 262/*
 263 * Transfer a single sector of kernel addressable data
 264 */
 265static int mmc_test_buffer_transfer(struct mmc_test_card *test,
 266	u8 *buffer, unsigned addr, unsigned blksz, int write)
 267{
 268	int ret;
 269
 270	struct mmc_request mrq = {0};
 271	struct mmc_command cmd = {0};
 272	struct mmc_command stop = {0};
 273	struct mmc_data data = {0};
 274
 275	struct scatterlist sg;
 276
 277	mrq.cmd = &cmd;
 278	mrq.data = &data;
 279	mrq.stop = &stop;
 280
 281	sg_init_one(&sg, buffer, blksz);
 282
 283	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
 284
 285	mmc_wait_for_req(test->card->host, &mrq);
 286
 287	if (cmd.error)
 288		return cmd.error;
 289	if (data.error)
 290		return data.error;
 291
 292	ret = mmc_test_wait_busy(test);
 293	if (ret)
 294		return ret;
 295
 296	return 0;
 297}
 298
 299static void mmc_test_free_mem(struct mmc_test_mem *mem)
 300{
 301	if (!mem)
 302		return;
 303	while (mem->cnt--)
 304		__free_pages(mem->arr[mem->cnt].page,
 305			     mem->arr[mem->cnt].order);
 306	kfree(mem->arr);
 307	kfree(mem);
 308}
 309
 310/*
 311 * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
 312 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
 313 * not exceed a maximum number of segments and try not to make segments much
 314 * bigger than maximum segment size.
 315 */
 316static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
 317					       unsigned long max_sz,
 318					       unsigned int max_segs,
 319					       unsigned int max_seg_sz)
 320{
 321	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
 322	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
 323	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
 324	unsigned long page_cnt = 0;
 325	unsigned long limit = nr_free_buffer_pages() >> 4;
 326	struct mmc_test_mem *mem;
 327
 328	if (max_page_cnt > limit)
 329		max_page_cnt = limit;
 330	if (min_page_cnt > max_page_cnt)
 331		min_page_cnt = max_page_cnt;
 332
 333	if (max_seg_page_cnt > max_page_cnt)
 334		max_seg_page_cnt = max_page_cnt;
 335
 336	if (max_segs > max_page_cnt)
 337		max_segs = max_page_cnt;
 338
 339	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
 340	if (!mem)
 341		return NULL;
 342
 343	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
 344			   GFP_KERNEL);
 345	if (!mem->arr)
 346		goto out_free;
 347
 348	while (max_page_cnt) {
 349		struct page *page;
 350		unsigned int order;
 351		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
 352				__GFP_NORETRY;
 353
 354		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
 355		while (1) {
 356			page = alloc_pages(flags, order);
 357			if (page || !order)
 358				break;
 359			order -= 1;
 360		}
 361		if (!page) {
 362			if (page_cnt < min_page_cnt)
 363				goto out_free;
 364			break;
 365		}
 366		mem->arr[mem->cnt].page = page;
 367		mem->arr[mem->cnt].order = order;
 368		mem->cnt += 1;
 369		if (max_page_cnt <= (1UL << order))
 370			break;
 371		max_page_cnt -= 1UL << order;
 372		page_cnt += 1UL << order;
 373		if (mem->cnt >= max_segs) {
 374			if (page_cnt < min_page_cnt)
 375				goto out_free;
 376			break;
 377		}
 378	}
 379
 380	return mem;
 381
 382out_free:
 383	mmc_test_free_mem(mem);
 384	return NULL;
 385}
 386
 387/*
 388 * Map memory into a scatterlist.  Optionally allow the same memory to be
 389 * mapped more than once.
 390 */
 391static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
 392			   struct scatterlist *sglist, int repeat,
 393			   unsigned int max_segs, unsigned int max_seg_sz,
 394			   unsigned int *sg_len, int min_sg_len)
 395{
 396	struct scatterlist *sg = NULL;
 397	unsigned int i;
 398	unsigned long sz = size;
 399
 400	sg_init_table(sglist, max_segs);
 401	if (min_sg_len > max_segs)
 402		min_sg_len = max_segs;
 403
 404	*sg_len = 0;
 405	do {
 406		for (i = 0; i < mem->cnt; i++) {
 407			unsigned long len = PAGE_SIZE << mem->arr[i].order;
 408
 409			if (min_sg_len && (size / min_sg_len < len))
 410				len = ALIGN(size / min_sg_len, 512);
 411			if (len > sz)
 412				len = sz;
 413			if (len > max_seg_sz)
 414				len = max_seg_sz;
 415			if (sg)
 416				sg = sg_next(sg);
 417			else
 418				sg = sglist;
 419			if (!sg)
 420				return -EINVAL;
 421			sg_set_page(sg, mem->arr[i].page, len, 0);
 422			sz -= len;
 423			*sg_len += 1;
 424			if (!sz)
 425				break;
 426		}
 427	} while (sz && repeat);
 428
 429	if (sz)
 430		return -EINVAL;
 431
 432	if (sg)
 433		sg_mark_end(sg);
 434
 435	return 0;
 436}
 437
 438/*
 439 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
 440 * same memory to be mapped more than once.
 441 */
 442static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
 443				       unsigned long sz,
 444				       struct scatterlist *sglist,
 445				       unsigned int max_segs,
 446				       unsigned int max_seg_sz,
 447				       unsigned int *sg_len)
 448{
 449	struct scatterlist *sg = NULL;
 450	unsigned int i = mem->cnt, cnt;
 451	unsigned long len;
 452	void *base, *addr, *last_addr = NULL;
 453
 454	sg_init_table(sglist, max_segs);
 455
 456	*sg_len = 0;
 457	while (sz) {
 458		base = page_address(mem->arr[--i].page);
 459		cnt = 1 << mem->arr[i].order;
 460		while (sz && cnt) {
 461			addr = base + PAGE_SIZE * --cnt;
 462			if (last_addr && last_addr + PAGE_SIZE == addr)
 463				continue;
 464			last_addr = addr;
 465			len = PAGE_SIZE;
 466			if (len > max_seg_sz)
 467				len = max_seg_sz;
 468			if (len > sz)
 469				len = sz;
 470			if (sg)
 471				sg = sg_next(sg);
 472			else
 473				sg = sglist;
 474			if (!sg)
 475				return -EINVAL;
 476			sg_set_page(sg, virt_to_page(addr), len, 0);
 477			sz -= len;
 478			*sg_len += 1;
 479		}
 480		if (i == 0)
 481			i = mem->cnt;
 482	}
 483
 484	if (sg)
 485		sg_mark_end(sg);
 486
 487	return 0;
 488}
 489
 490/*
 491 * Calculate transfer rate in bytes per second.
 492 */
 493static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
 494{
 495	uint64_t ns;
 496
 497	ns = ts->tv_sec;
 498	ns *= 1000000000;
 499	ns += ts->tv_nsec;
 500
 501	bytes *= 1000000000;
 502
 503	while (ns > UINT_MAX) {
 504		bytes >>= 1;
 505		ns >>= 1;
 506	}
 507
 508	if (!ns)
 509		return 0;
 510
 511	do_div(bytes, (uint32_t)ns);
 512
 513	return bytes;
 514}
 515
 516/*
 517 * Save transfer results for future usage
 518 */
 519static void mmc_test_save_transfer_result(struct mmc_test_card *test,
 520	unsigned int count, unsigned int sectors, struct timespec ts,
 521	unsigned int rate, unsigned int iops)
 522{
 523	struct mmc_test_transfer_result *tr;
 524
 525	if (!test->gr)
 526		return;
 527
 528	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
 529	if (!tr)
 530		return;
 531
 532	tr->count = count;
 533	tr->sectors = sectors;
 534	tr->ts = ts;
 535	tr->rate = rate;
 536	tr->iops = iops;
 537
 538	list_add_tail(&tr->link, &test->gr->tr_lst);
 539}
 540
 541/*
 542 * Print the transfer rate.
 543 */
 544static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
 545				struct timespec *ts1, struct timespec *ts2)
 546{
 547	unsigned int rate, iops, sectors = bytes >> 9;
 548	struct timespec ts;
 549
 550	ts = timespec_sub(*ts2, *ts1);
 551
 552	rate = mmc_test_rate(bytes, &ts);
 553	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
 554
 555	printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
 556			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
 557			 mmc_hostname(test->card->host), sectors, sectors >> 1,
 558			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
 559			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
 560			 iops / 100, iops % 100);
 561
 562	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
 563}
 564
 565/*
 566 * Print the average transfer rate.
 567 */
 568static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
 569				    unsigned int count, struct timespec *ts1,
 570				    struct timespec *ts2)
 571{
 572	unsigned int rate, iops, sectors = bytes >> 9;
 573	uint64_t tot = bytes * count;
 574	struct timespec ts;
 575
 576	ts = timespec_sub(*ts2, *ts1);
 577
 578	rate = mmc_test_rate(tot, &ts);
 579	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
 580
 581	printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
 582			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
 583			 "%u.%02u IOPS, sg_len %d)\n",
 584			 mmc_hostname(test->card->host), count, sectors, count,
 585			 sectors >> 1, (sectors & 1 ? ".5" : ""),
 586			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
 587			 rate / 1000, rate / 1024, iops / 100, iops % 100,
 588			 test->area.sg_len);
 589
 590	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
 591}
 592
 593/*
 594 * Return the card size in sectors.
 595 */
 596static unsigned int mmc_test_capacity(struct mmc_card *card)
 597{
 598	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
 599		return card->ext_csd.sectors;
 600	else
 601		return card->csd.capacity << (card->csd.read_blkbits - 9);
 602}
 603
 604/*******************************************************************/
 605/*  Test preparation and cleanup                                   */
 606/*******************************************************************/
 607
 608/*
 609 * Fill the first couple of sectors of the card with known data
 610 * so that bad reads/writes can be detected
 611 */
 612static int __mmc_test_prepare(struct mmc_test_card *test, int write)
 613{
 614	int ret, i;
 615
 616	ret = mmc_test_set_blksize(test, 512);
 617	if (ret)
 618		return ret;
 619
 620	if (write)
 621		memset(test->buffer, 0xDF, 512);
 622	else {
 623		for (i = 0;i < 512;i++)
 624			test->buffer[i] = i;
 625	}
 626
 627	for (i = 0;i < BUFFER_SIZE / 512;i++) {
 628		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 629		if (ret)
 630			return ret;
 631	}
 632
 633	return 0;
 634}
 635
 636static int mmc_test_prepare_write(struct mmc_test_card *test)
 637{
 638	return __mmc_test_prepare(test, 1);
 639}
 640
 641static int mmc_test_prepare_read(struct mmc_test_card *test)
 642{
 643	return __mmc_test_prepare(test, 0);
 644}
 645
 646static int mmc_test_cleanup(struct mmc_test_card *test)
 647{
 648	int ret, i;
 649
 650	ret = mmc_test_set_blksize(test, 512);
 651	if (ret)
 652		return ret;
 653
 654	memset(test->buffer, 0, 512);
 655
 656	for (i = 0;i < BUFFER_SIZE / 512;i++) {
 657		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 658		if (ret)
 659			return ret;
 660	}
 661
 662	return 0;
 663}
 664
 665/*******************************************************************/
 666/*  Test execution helpers                                         */
 667/*******************************************************************/
 668
 669/*
 670 * Modifies the mmc_request to perform the "short transfer" tests
 671 */
 672static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
 673	struct mmc_request *mrq, int write)
 674{
 675	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 676
 677	if (mrq->data->blocks > 1) {
 678		mrq->cmd->opcode = write ?
 679			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 680		mrq->stop = NULL;
 681	} else {
 682		mrq->cmd->opcode = MMC_SEND_STATUS;
 683		mrq->cmd->arg = test->card->rca << 16;
 684	}
 685}
 686
 687/*
 688 * Checks that a normal transfer didn't have any errors
 689 */
 690static int mmc_test_check_result(struct mmc_test_card *test,
 691				 struct mmc_request *mrq)
 692{
 693	int ret;
 694
 695	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 696
 697	ret = 0;
 698
 699	if (!ret && mrq->cmd->error)
 700		ret = mrq->cmd->error;
 701	if (!ret && mrq->data->error)
 702		ret = mrq->data->error;
 703	if (!ret && mrq->stop && mrq->stop->error)
 704		ret = mrq->stop->error;
 705	if (!ret && mrq->data->bytes_xfered !=
 706		mrq->data->blocks * mrq->data->blksz)
 707		ret = RESULT_FAIL;
 708
 709	if (ret == -EINVAL)
 710		ret = RESULT_UNSUP_HOST;
 711
 712	return ret;
 713}
 714
 715static int mmc_test_check_result_async(struct mmc_card *card,
 716				       struct mmc_async_req *areq)
 717{
 718	struct mmc_test_async_req *test_async =
 719		container_of(areq, struct mmc_test_async_req, areq);
 720
 721	mmc_test_wait_busy(test_async->test);
 722
 723	return mmc_test_check_result(test_async->test, areq->mrq);
 724}
 725
 726/*
 727 * Checks that a "short transfer" behaved as expected
 728 */
 729static int mmc_test_check_broken_result(struct mmc_test_card *test,
 730	struct mmc_request *mrq)
 731{
 732	int ret;
 733
 734	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
 735
 736	ret = 0;
 737
 738	if (!ret && mrq->cmd->error)
 739		ret = mrq->cmd->error;
 740	if (!ret && mrq->data->error == 0)
 741		ret = RESULT_FAIL;
 742	if (!ret && mrq->data->error != -ETIMEDOUT)
 743		ret = mrq->data->error;
 744	if (!ret && mrq->stop && mrq->stop->error)
 745		ret = mrq->stop->error;
 746	if (mrq->data->blocks > 1) {
 747		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
 748			ret = RESULT_FAIL;
 749	} else {
 750		if (!ret && mrq->data->bytes_xfered > 0)
 751			ret = RESULT_FAIL;
 752	}
 753
 754	if (ret == -EINVAL)
 755		ret = RESULT_UNSUP_HOST;
 756
 757	return ret;
 758}
 759
 760/*
 761 * Tests nonblock transfer with certain parameters
 762 */
 763static void mmc_test_nonblock_reset(struct mmc_request *mrq,
 764				    struct mmc_command *cmd,
 765				    struct mmc_command *stop,
 766				    struct mmc_data *data)
 767{
 768	memset(mrq, 0, sizeof(struct mmc_request));
 769	memset(cmd, 0, sizeof(struct mmc_command));
 770	memset(data, 0, sizeof(struct mmc_data));
 771	memset(stop, 0, sizeof(struct mmc_command));
 772
 773	mrq->cmd = cmd;
 774	mrq->data = data;
 775	mrq->stop = stop;
 776}
 777static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
 778				      struct scatterlist *sg, unsigned sg_len,
 779				      unsigned dev_addr, unsigned blocks,
 780				      unsigned blksz, int write, int count)
 781{
 782	struct mmc_request mrq1;
 783	struct mmc_command cmd1;
 784	struct mmc_command stop1;
 785	struct mmc_data data1;
 786
 787	struct mmc_request mrq2;
 788	struct mmc_command cmd2;
 789	struct mmc_command stop2;
 790	struct mmc_data data2;
 791
 792	struct mmc_test_async_req test_areq[2];
 793	struct mmc_async_req *done_areq;
 794	struct mmc_async_req *cur_areq = &test_areq[0].areq;
 795	struct mmc_async_req *other_areq = &test_areq[1].areq;
 796	int i;
 797	int ret;
 798
 799	test_areq[0].test = test;
 800	test_areq[1].test = test;
 801
 802	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
 803	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
 804
 805	cur_areq->mrq = &mrq1;
 806	cur_areq->err_check = mmc_test_check_result_async;
 807	other_areq->mrq = &mrq2;
 808	other_areq->err_check = mmc_test_check_result_async;
 809
 810	for (i = 0; i < count; i++) {
 811		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
 812				     blocks, blksz, write);
 813		done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
 814
 815		if (ret || (!done_areq && i > 0))
 816			goto err;
 817
 818		if (done_areq) {
 819			if (done_areq->mrq == &mrq2)
 820				mmc_test_nonblock_reset(&mrq2, &cmd2,
 821							&stop2, &data2);
 822			else
 823				mmc_test_nonblock_reset(&mrq1, &cmd1,
 824							&stop1, &data1);
 825		}
 826		done_areq = cur_areq;
 827		cur_areq = other_areq;
 828		other_areq = done_areq;
 829		dev_addr += blocks;
 830	}
 831
 832	done_areq = mmc_start_req(test->card->host, NULL, &ret);
 833
 834	return ret;
 835err:
 836	return ret;
 837}
 838
 839/*
 840 * Tests a basic transfer with certain parameters
 841 */
 842static int mmc_test_simple_transfer(struct mmc_test_card *test,
 843	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 844	unsigned blocks, unsigned blksz, int write)
 845{
 846	struct mmc_request mrq = {0};
 847	struct mmc_command cmd = {0};
 848	struct mmc_command stop = {0};
 849	struct mmc_data data = {0};
 850
 851	mrq.cmd = &cmd;
 852	mrq.data = &data;
 853	mrq.stop = &stop;
 854
 855	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
 856		blocks, blksz, write);
 857
 858	mmc_wait_for_req(test->card->host, &mrq);
 859
 860	mmc_test_wait_busy(test);
 861
 862	return mmc_test_check_result(test, &mrq);
 863}
 864
 865/*
 866 * Tests a transfer where the card will fail completely or partly
 867 */
 868static int mmc_test_broken_transfer(struct mmc_test_card *test,
 869	unsigned blocks, unsigned blksz, int write)
 870{
 871	struct mmc_request mrq = {0};
 872	struct mmc_command cmd = {0};
 873	struct mmc_command stop = {0};
 874	struct mmc_data data = {0};
 875
 876	struct scatterlist sg;
 877
 878	mrq.cmd = &cmd;
 879	mrq.data = &data;
 880	mrq.stop = &stop;
 881
 882	sg_init_one(&sg, test->buffer, blocks * blksz);
 883
 884	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
 885	mmc_test_prepare_broken_mrq(test, &mrq, write);
 886
 887	mmc_wait_for_req(test->card->host, &mrq);
 888
 889	mmc_test_wait_busy(test);
 890
 891	return mmc_test_check_broken_result(test, &mrq);
 892}
 893
 894/*
 895 * Does a complete transfer test where data is also validated
 896 *
 897 * Note: mmc_test_prepare() must have been done before this call
 898 */
 899static int mmc_test_transfer(struct mmc_test_card *test,
 900	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 901	unsigned blocks, unsigned blksz, int write)
 902{
 903	int ret, i;
 904	unsigned long flags;
 905
 906	if (write) {
 907		for (i = 0;i < blocks * blksz;i++)
 908			test->scratch[i] = i;
 909	} else {
 910		memset(test->scratch, 0, BUFFER_SIZE);
 911	}
 912	local_irq_save(flags);
 913	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 914	local_irq_restore(flags);
 915
 916	ret = mmc_test_set_blksize(test, blksz);
 917	if (ret)
 918		return ret;
 919
 920	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
 921		blocks, blksz, write);
 922	if (ret)
 923		return ret;
 924
 925	if (write) {
 926		int sectors;
 927
 928		ret = mmc_test_set_blksize(test, 512);
 929		if (ret)
 930			return ret;
 931
 932		sectors = (blocks * blksz + 511) / 512;
 933		if ((sectors * 512) == (blocks * blksz))
 934			sectors++;
 935
 936		if ((sectors * 512) > BUFFER_SIZE)
 937			return -EINVAL;
 938
 939		memset(test->buffer, 0, sectors * 512);
 940
 941		for (i = 0;i < sectors;i++) {
 942			ret = mmc_test_buffer_transfer(test,
 943				test->buffer + i * 512,
 944				dev_addr + i, 512, 0);
 945			if (ret)
 946				return ret;
 947		}
 948
 949		for (i = 0;i < blocks * blksz;i++) {
 950			if (test->buffer[i] != (u8)i)
 951				return RESULT_FAIL;
 952		}
 953
 954		for (;i < sectors * 512;i++) {
 955			if (test->buffer[i] != 0xDF)
 956				return RESULT_FAIL;
 957		}
 958	} else {
 959		local_irq_save(flags);
 960		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 961		local_irq_restore(flags);
 962		for (i = 0;i < blocks * blksz;i++) {
 963			if (test->scratch[i] != (u8)i)
 964				return RESULT_FAIL;
 965		}
 966	}
 967
 968	return 0;
 969}
 970
 971/*******************************************************************/
 972/*  Tests                                                          */
 973/*******************************************************************/
 974
 975struct mmc_test_case {
 976	const char *name;
 977
 978	int (*prepare)(struct mmc_test_card *);
 979	int (*run)(struct mmc_test_card *);
 980	int (*cleanup)(struct mmc_test_card *);
 981};
 982
 983static int mmc_test_basic_write(struct mmc_test_card *test)
 984{
 985	int ret;
 986	struct scatterlist sg;
 987
 988	ret = mmc_test_set_blksize(test, 512);
 989	if (ret)
 990		return ret;
 991
 992	sg_init_one(&sg, test->buffer, 512);
 993
 994	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
 995	if (ret)
 996		return ret;
 997
 998	return 0;
 999}
1000
1001static int mmc_test_basic_read(struct mmc_test_card *test)
1002{
1003	int ret;
1004	struct scatterlist sg;
1005
1006	ret = mmc_test_set_blksize(test, 512);
1007	if (ret)
1008		return ret;
1009
1010	sg_init_one(&sg, test->buffer, 512);
1011
1012	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1013	if (ret)
1014		return ret;
1015
1016	return 0;
1017}
1018
1019static int mmc_test_verify_write(struct mmc_test_card *test)
1020{
1021	int ret;
1022	struct scatterlist sg;
1023
1024	sg_init_one(&sg, test->buffer, 512);
1025
1026	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1027	if (ret)
1028		return ret;
1029
1030	return 0;
1031}
1032
1033static int mmc_test_verify_read(struct mmc_test_card *test)
1034{
1035	int ret;
1036	struct scatterlist sg;
1037
1038	sg_init_one(&sg, test->buffer, 512);
1039
1040	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1041	if (ret)
1042		return ret;
1043
1044	return 0;
1045}
1046
1047static int mmc_test_multi_write(struct mmc_test_card *test)
1048{
1049	int ret;
1050	unsigned int size;
1051	struct scatterlist sg;
1052
1053	if (test->card->host->max_blk_count == 1)
1054		return RESULT_UNSUP_HOST;
1055
1056	size = PAGE_SIZE * 2;
1057	size = min(size, test->card->host->max_req_size);
1058	size = min(size, test->card->host->max_seg_size);
1059	size = min(size, test->card->host->max_blk_count * 512);
1060
1061	if (size < 1024)
1062		return RESULT_UNSUP_HOST;
1063
1064	sg_init_one(&sg, test->buffer, size);
1065
1066	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1067	if (ret)
1068		return ret;
1069
1070	return 0;
1071}
1072
1073static int mmc_test_multi_read(struct mmc_test_card *test)
1074{
1075	int ret;
1076	unsigned int size;
1077	struct scatterlist sg;
1078
1079	if (test->card->host->max_blk_count == 1)
1080		return RESULT_UNSUP_HOST;
1081
1082	size = PAGE_SIZE * 2;
1083	size = min(size, test->card->host->max_req_size);
1084	size = min(size, test->card->host->max_seg_size);
1085	size = min(size, test->card->host->max_blk_count * 512);
1086
1087	if (size < 1024)
1088		return RESULT_UNSUP_HOST;
1089
1090	sg_init_one(&sg, test->buffer, size);
1091
1092	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1093	if (ret)
1094		return ret;
1095
1096	return 0;
1097}
1098
1099static int mmc_test_pow2_write(struct mmc_test_card *test)
1100{
1101	int ret, i;
1102	struct scatterlist sg;
1103
1104	if (!test->card->csd.write_partial)
1105		return RESULT_UNSUP_CARD;
1106
1107	for (i = 1; i < 512;i <<= 1) {
1108		sg_init_one(&sg, test->buffer, i);
1109		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1110		if (ret)
1111			return ret;
1112	}
1113
1114	return 0;
1115}
1116
1117static int mmc_test_pow2_read(struct mmc_test_card *test)
1118{
1119	int ret, i;
1120	struct scatterlist sg;
1121
1122	if (!test->card->csd.read_partial)
1123		return RESULT_UNSUP_CARD;
1124
1125	for (i = 1; i < 512;i <<= 1) {
1126		sg_init_one(&sg, test->buffer, i);
1127		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1128		if (ret)
1129			return ret;
1130	}
1131
1132	return 0;
1133}
1134
1135static int mmc_test_weird_write(struct mmc_test_card *test)
1136{
1137	int ret, i;
1138	struct scatterlist sg;
1139
1140	if (!test->card->csd.write_partial)
1141		return RESULT_UNSUP_CARD;
1142
1143	for (i = 3; i < 512;i += 7) {
1144		sg_init_one(&sg, test->buffer, i);
1145		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1146		if (ret)
1147			return ret;
1148	}
1149
1150	return 0;
1151}
1152
1153static int mmc_test_weird_read(struct mmc_test_card *test)
1154{
1155	int ret, i;
1156	struct scatterlist sg;
1157
1158	if (!test->card->csd.read_partial)
1159		return RESULT_UNSUP_CARD;
1160
1161	for (i = 3; i < 512;i += 7) {
1162		sg_init_one(&sg, test->buffer, i);
1163		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1164		if (ret)
1165			return ret;
1166	}
1167
1168	return 0;
1169}
1170
1171static int mmc_test_align_write(struct mmc_test_card *test)
1172{
1173	int ret, i;
1174	struct scatterlist sg;
1175
1176	for (i = 1;i < 4;i++) {
1177		sg_init_one(&sg, test->buffer + i, 512);
1178		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1179		if (ret)
1180			return ret;
1181	}
1182
1183	return 0;
1184}
1185
1186static int mmc_test_align_read(struct mmc_test_card *test)
1187{
1188	int ret, i;
1189	struct scatterlist sg;
1190
1191	for (i = 1;i < 4;i++) {
1192		sg_init_one(&sg, test->buffer + i, 512);
1193		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1194		if (ret)
1195			return ret;
1196	}
1197
1198	return 0;
1199}
1200
1201static int mmc_test_align_multi_write(struct mmc_test_card *test)
1202{
1203	int ret, i;
1204	unsigned int size;
1205	struct scatterlist sg;
1206
1207	if (test->card->host->max_blk_count == 1)
1208		return RESULT_UNSUP_HOST;
1209
1210	size = PAGE_SIZE * 2;
1211	size = min(size, test->card->host->max_req_size);
1212	size = min(size, test->card->host->max_seg_size);
1213	size = min(size, test->card->host->max_blk_count * 512);
1214
1215	if (size < 1024)
1216		return RESULT_UNSUP_HOST;
1217
1218	for (i = 1;i < 4;i++) {
1219		sg_init_one(&sg, test->buffer + i, size);
1220		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1221		if (ret)
1222			return ret;
1223	}
1224
1225	return 0;
1226}
1227
1228static int mmc_test_align_multi_read(struct mmc_test_card *test)
1229{
1230	int ret, i;
1231	unsigned int size;
1232	struct scatterlist sg;
1233
1234	if (test->card->host->max_blk_count == 1)
1235		return RESULT_UNSUP_HOST;
1236
1237	size = PAGE_SIZE * 2;
1238	size = min(size, test->card->host->max_req_size);
1239	size = min(size, test->card->host->max_seg_size);
1240	size = min(size, test->card->host->max_blk_count * 512);
1241
1242	if (size < 1024)
1243		return RESULT_UNSUP_HOST;
1244
1245	for (i = 1;i < 4;i++) {
1246		sg_init_one(&sg, test->buffer + i, size);
1247		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1248		if (ret)
1249			return ret;
1250	}
1251
1252	return 0;
1253}
1254
1255static int mmc_test_xfersize_write(struct mmc_test_card *test)
1256{
1257	int ret;
1258
1259	ret = mmc_test_set_blksize(test, 512);
1260	if (ret)
1261		return ret;
1262
1263	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1264	if (ret)
1265		return ret;
1266
1267	return 0;
1268}
1269
1270static int mmc_test_xfersize_read(struct mmc_test_card *test)
1271{
1272	int ret;
1273
1274	ret = mmc_test_set_blksize(test, 512);
1275	if (ret)
1276		return ret;
1277
1278	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1279	if (ret)
1280		return ret;
1281
1282	return 0;
1283}
1284
1285static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1286{
1287	int ret;
1288
1289	if (test->card->host->max_blk_count == 1)
1290		return RESULT_UNSUP_HOST;
1291
1292	ret = mmc_test_set_blksize(test, 512);
1293	if (ret)
1294		return ret;
1295
1296	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1297	if (ret)
1298		return ret;
1299
1300	return 0;
1301}
1302
1303static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1304{
1305	int ret;
1306
1307	if (test->card->host->max_blk_count == 1)
1308		return RESULT_UNSUP_HOST;
1309
1310	ret = mmc_test_set_blksize(test, 512);
1311	if (ret)
1312		return ret;
1313
1314	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1315	if (ret)
1316		return ret;
1317
1318	return 0;
1319}
1320
1321#ifdef CONFIG_HIGHMEM
1322
1323static int mmc_test_write_high(struct mmc_test_card *test)
1324{
1325	int ret;
1326	struct scatterlist sg;
1327
1328	sg_init_table(&sg, 1);
1329	sg_set_page(&sg, test->highmem, 512, 0);
1330
1331	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1332	if (ret)
1333		return ret;
1334
1335	return 0;
1336}
1337
1338static int mmc_test_read_high(struct mmc_test_card *test)
1339{
1340	int ret;
1341	struct scatterlist sg;
1342
1343	sg_init_table(&sg, 1);
1344	sg_set_page(&sg, test->highmem, 512, 0);
1345
1346	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1347	if (ret)
1348		return ret;
1349
1350	return 0;
1351}
1352
1353static int mmc_test_multi_write_high(struct mmc_test_card *test)
1354{
1355	int ret;
1356	unsigned int size;
1357	struct scatterlist sg;
1358
1359	if (test->card->host->max_blk_count == 1)
1360		return RESULT_UNSUP_HOST;
1361
1362	size = PAGE_SIZE * 2;
1363	size = min(size, test->card->host->max_req_size);
1364	size = min(size, test->card->host->max_seg_size);
1365	size = min(size, test->card->host->max_blk_count * 512);
1366
1367	if (size < 1024)
1368		return RESULT_UNSUP_HOST;
1369
1370	sg_init_table(&sg, 1);
1371	sg_set_page(&sg, test->highmem, size, 0);
1372
1373	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1374	if (ret)
1375		return ret;
1376
1377	return 0;
1378}
1379
1380static int mmc_test_multi_read_high(struct mmc_test_card *test)
1381{
1382	int ret;
1383	unsigned int size;
1384	struct scatterlist sg;
1385
1386	if (test->card->host->max_blk_count == 1)
1387		return RESULT_UNSUP_HOST;
1388
1389	size = PAGE_SIZE * 2;
1390	size = min(size, test->card->host->max_req_size);
1391	size = min(size, test->card->host->max_seg_size);
1392	size = min(size, test->card->host->max_blk_count * 512);
1393
1394	if (size < 1024)
1395		return RESULT_UNSUP_HOST;
1396
1397	sg_init_table(&sg, 1);
1398	sg_set_page(&sg, test->highmem, size, 0);
1399
1400	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1401	if (ret)
1402		return ret;
1403
1404	return 0;
1405}
1406
1407#else
1408
1409static int mmc_test_no_highmem(struct mmc_test_card *test)
1410{
1411	printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1412	       mmc_hostname(test->card->host));
1413	return 0;
1414}
1415
1416#endif /* CONFIG_HIGHMEM */
1417
1418/*
1419 * Map sz bytes so that it can be transferred.
1420 */
1421static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1422			     int max_scatter, int min_sg_len)
1423{
1424	struct mmc_test_area *t = &test->area;
1425	int err;
1426
1427	t->blocks = sz >> 9;
1428
1429	if (max_scatter) {
1430		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1431						  t->max_segs, t->max_seg_sz,
1432				       &t->sg_len);
1433	} else {
1434		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1435				      t->max_seg_sz, &t->sg_len, min_sg_len);
1436	}
1437	if (err)
1438		printk(KERN_INFO "%s: Failed to map sg list\n",
1439		       mmc_hostname(test->card->host));
1440	return err;
1441}
1442
1443/*
1444 * Transfer bytes mapped by mmc_test_area_map().
1445 */
1446static int mmc_test_area_transfer(struct mmc_test_card *test,
1447				  unsigned int dev_addr, int write)
1448{
1449	struct mmc_test_area *t = &test->area;
1450
1451	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1452					t->blocks, 512, write);
1453}
1454
1455/*
1456 * Map and transfer bytes for multiple transfers.
1457 */
1458static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1459				unsigned int dev_addr, int write,
1460				int max_scatter, int timed, int count,
1461				bool nonblock, int min_sg_len)
1462{
1463	struct timespec ts1, ts2;
1464	int ret = 0;
1465	int i;
1466	struct mmc_test_area *t = &test->area;
1467
1468	/*
1469	 * In the case of a maximally scattered transfer, the maximum transfer
1470	 * size is further limited by using PAGE_SIZE segments.
1471	 */
1472	if (max_scatter) {
1473		struct mmc_test_area *t = &test->area;
1474		unsigned long max_tfr;
1475
1476		if (t->max_seg_sz >= PAGE_SIZE)
1477			max_tfr = t->max_segs * PAGE_SIZE;
1478		else
1479			max_tfr = t->max_segs * t->max_seg_sz;
1480		if (sz > max_tfr)
1481			sz = max_tfr;
1482	}
1483
1484	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1485	if (ret)
1486		return ret;
1487
1488	if (timed)
1489		getnstimeofday(&ts1);
1490	if (nonblock)
1491		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1492				 dev_addr, t->blocks, 512, write, count);
1493	else
1494		for (i = 0; i < count && ret == 0; i++) {
1495			ret = mmc_test_area_transfer(test, dev_addr, write);
1496			dev_addr += sz >> 9;
1497		}
1498
1499	if (ret)
1500		return ret;
1501
1502	if (timed)
1503		getnstimeofday(&ts2);
1504
1505	if (timed)
1506		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1507
1508	return 0;
1509}
1510
1511static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1512			    unsigned int dev_addr, int write, int max_scatter,
1513			    int timed)
1514{
1515	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1516				    timed, 1, false, 0);
1517}
1518
1519/*
1520 * Write the test area entirely.
1521 */
1522static int mmc_test_area_fill(struct mmc_test_card *test)
1523{
1524	struct mmc_test_area *t = &test->area;
1525
1526	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1527}
1528
1529/*
1530 * Erase the test area entirely.
1531 */
1532static int mmc_test_area_erase(struct mmc_test_card *test)
1533{
1534	struct mmc_test_area *t = &test->area;
1535
1536	if (!mmc_can_erase(test->card))
1537		return 0;
1538
1539	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1540			 MMC_ERASE_ARG);
1541}
1542
1543/*
1544 * Cleanup struct mmc_test_area.
1545 */
1546static int mmc_test_area_cleanup(struct mmc_test_card *test)
1547{
1548	struct mmc_test_area *t = &test->area;
1549
1550	kfree(t->sg);
1551	mmc_test_free_mem(t->mem);
1552
1553	return 0;
1554}
1555
1556/*
1557 * Initialize an area for testing large transfers.  The test area is set to the
1558 * middle of the card because cards may have different charateristics at the
1559 * front (for FAT file system optimization).  Optionally, the area is erased
1560 * (if the card supports it) which may improve write performance.  Optionally,
1561 * the area is filled with data for subsequent read tests.
1562 */
1563static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1564{
1565	struct mmc_test_area *t = &test->area;
1566	unsigned long min_sz = 64 * 1024, sz;
1567	int ret;
1568
1569	ret = mmc_test_set_blksize(test, 512);
1570	if (ret)
1571		return ret;
1572
1573	/* Make the test area size about 4MiB */
1574	sz = (unsigned long)test->card->pref_erase << 9;
1575	t->max_sz = sz;
1576	while (t->max_sz < 4 * 1024 * 1024)
1577		t->max_sz += sz;
1578	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1579		t->max_sz -= sz;
1580
1581	t->max_segs = test->card->host->max_segs;
1582	t->max_seg_sz = test->card->host->max_seg_size;
1583
1584	t->max_tfr = t->max_sz;
1585	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1586		t->max_tfr = test->card->host->max_blk_count << 9;
1587	if (t->max_tfr > test->card->host->max_req_size)
1588		t->max_tfr = test->card->host->max_req_size;
1589	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1590		t->max_tfr = t->max_segs * t->max_seg_sz;
1591
1592	/*
1593	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1594	 * because the same memory can be mapped into the scatterlist more than
1595	 * once.  Also, take into account the limits imposed on scatterlist
1596	 * segments by the host driver.
1597	 */
1598	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1599				    t->max_seg_sz);
1600	if (!t->mem)
1601		return -ENOMEM;
1602
1603	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1604	if (!t->sg) {
1605		ret = -ENOMEM;
1606		goto out_free;
1607	}
1608
1609	t->dev_addr = mmc_test_capacity(test->card) / 2;
1610	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1611
1612	if (erase) {
1613		ret = mmc_test_area_erase(test);
1614		if (ret)
1615			goto out_free;
1616	}
1617
1618	if (fill) {
1619		ret = mmc_test_area_fill(test);
1620		if (ret)
1621			goto out_free;
1622	}
1623
1624	return 0;
1625
1626out_free:
1627	mmc_test_area_cleanup(test);
1628	return ret;
1629}
1630
1631/*
1632 * Prepare for large transfers.  Do not erase the test area.
1633 */
1634static int mmc_test_area_prepare(struct mmc_test_card *test)
1635{
1636	return mmc_test_area_init(test, 0, 0);
1637}
1638
1639/*
1640 * Prepare for large transfers.  Do erase the test area.
1641 */
1642static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1643{
1644	return mmc_test_area_init(test, 1, 0);
1645}
1646
1647/*
1648 * Prepare for large transfers.  Erase and fill the test area.
1649 */
1650static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1651{
1652	return mmc_test_area_init(test, 1, 1);
1653}
1654
1655/*
1656 * Test best-case performance.  Best-case performance is expected from
1657 * a single large transfer.
1658 *
1659 * An additional option (max_scatter) allows the measurement of the same
1660 * transfer but with no contiguous pages in the scatter list.  This tests
1661 * the efficiency of DMA to handle scattered pages.
1662 */
1663static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1664				     int max_scatter)
1665{
1666	struct mmc_test_area *t = &test->area;
1667
1668	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1669				max_scatter, 1);
1670}
1671
1672/*
1673 * Best-case read performance.
1674 */
1675static int mmc_test_best_read_performance(struct mmc_test_card *test)
1676{
1677	return mmc_test_best_performance(test, 0, 0);
1678}
1679
1680/*
1681 * Best-case write performance.
1682 */
1683static int mmc_test_best_write_performance(struct mmc_test_card *test)
1684{
1685	return mmc_test_best_performance(test, 1, 0);
1686}
1687
1688/*
1689 * Best-case read performance into scattered pages.
1690 */
1691static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1692{
1693	return mmc_test_best_performance(test, 0, 1);
1694}
1695
1696/*
1697 * Best-case write performance from scattered pages.
1698 */
1699static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1700{
1701	return mmc_test_best_performance(test, 1, 1);
1702}
1703
1704/*
1705 * Single read performance by transfer size.
1706 */
1707static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1708{
1709	struct mmc_test_area *t = &test->area;
1710	unsigned long sz;
1711	unsigned int dev_addr;
1712	int ret;
1713
1714	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1715		dev_addr = t->dev_addr + (sz >> 9);
1716		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1717		if (ret)
1718			return ret;
1719	}
1720	sz = t->max_tfr;
1721	dev_addr = t->dev_addr;
1722	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1723}
1724
1725/*
1726 * Single write performance by transfer size.
1727 */
1728static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1729{
1730	struct mmc_test_area *t = &test->area;
1731	unsigned long sz;
1732	unsigned int dev_addr;
1733	int ret;
1734
1735	ret = mmc_test_area_erase(test);
1736	if (ret)
1737		return ret;
1738	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1739		dev_addr = t->dev_addr + (sz >> 9);
1740		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1741		if (ret)
1742			return ret;
1743	}
1744	ret = mmc_test_area_erase(test);
1745	if (ret)
1746		return ret;
1747	sz = t->max_tfr;
1748	dev_addr = t->dev_addr;
1749	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1750}
1751
1752/*
1753 * Single trim performance by transfer size.
1754 */
1755static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1756{
1757	struct mmc_test_area *t = &test->area;
1758	unsigned long sz;
1759	unsigned int dev_addr;
1760	struct timespec ts1, ts2;
1761	int ret;
1762
1763	if (!mmc_can_trim(test->card))
1764		return RESULT_UNSUP_CARD;
1765
1766	if (!mmc_can_erase(test->card))
1767		return RESULT_UNSUP_HOST;
1768
1769	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1770		dev_addr = t->dev_addr + (sz >> 9);
1771		getnstimeofday(&ts1);
1772		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1773		if (ret)
1774			return ret;
1775		getnstimeofday(&ts2);
1776		mmc_test_print_rate(test, sz, &ts1, &ts2);
1777	}
1778	dev_addr = t->dev_addr;
1779	getnstimeofday(&ts1);
1780	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1781	if (ret)
1782		return ret;
1783	getnstimeofday(&ts2);
1784	mmc_test_print_rate(test, sz, &ts1, &ts2);
1785	return 0;
1786}
1787
1788static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1789{
1790	struct mmc_test_area *t = &test->area;
1791	unsigned int dev_addr, i, cnt;
1792	struct timespec ts1, ts2;
1793	int ret;
1794
1795	cnt = t->max_sz / sz;
1796	dev_addr = t->dev_addr;
1797	getnstimeofday(&ts1);
1798	for (i = 0; i < cnt; i++) {
1799		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1800		if (ret)
1801			return ret;
1802		dev_addr += (sz >> 9);
1803	}
1804	getnstimeofday(&ts2);
1805	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1806	return 0;
1807}
1808
1809/*
1810 * Consecutive read performance by transfer size.
1811 */
1812static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1813{
1814	struct mmc_test_area *t = &test->area;
1815	unsigned long sz;
1816	int ret;
1817
1818	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1819		ret = mmc_test_seq_read_perf(test, sz);
1820		if (ret)
1821			return ret;
1822	}
1823	sz = t->max_tfr;
1824	return mmc_test_seq_read_perf(test, sz);
1825}
1826
1827static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1828{
1829	struct mmc_test_area *t = &test->area;
1830	unsigned int dev_addr, i, cnt;
1831	struct timespec ts1, ts2;
1832	int ret;
1833
1834	ret = mmc_test_area_erase(test);
1835	if (ret)
1836		return ret;
1837	cnt = t->max_sz / sz;
1838	dev_addr = t->dev_addr;
1839	getnstimeofday(&ts1);
1840	for (i = 0; i < cnt; i++) {
1841		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1842		if (ret)
1843			return ret;
1844		dev_addr += (sz >> 9);
1845	}
1846	getnstimeofday(&ts2);
1847	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1848	return 0;
1849}
1850
1851/*
1852 * Consecutive write performance by transfer size.
1853 */
1854static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1855{
1856	struct mmc_test_area *t = &test->area;
1857	unsigned long sz;
1858	int ret;
1859
1860	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1861		ret = mmc_test_seq_write_perf(test, sz);
1862		if (ret)
1863			return ret;
1864	}
1865	sz = t->max_tfr;
1866	return mmc_test_seq_write_perf(test, sz);
1867}
1868
1869/*
1870 * Consecutive trim performance by transfer size.
1871 */
1872static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1873{
1874	struct mmc_test_area *t = &test->area;
1875	unsigned long sz;
1876	unsigned int dev_addr, i, cnt;
1877	struct timespec ts1, ts2;
1878	int ret;
1879
1880	if (!mmc_can_trim(test->card))
1881		return RESULT_UNSUP_CARD;
1882
1883	if (!mmc_can_erase(test->card))
1884		return RESULT_UNSUP_HOST;
1885
1886	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1887		ret = mmc_test_area_erase(test);
1888		if (ret)
1889			return ret;
1890		ret = mmc_test_area_fill(test);
1891		if (ret)
1892			return ret;
1893		cnt = t->max_sz / sz;
1894		dev_addr = t->dev_addr;
1895		getnstimeofday(&ts1);
1896		for (i = 0; i < cnt; i++) {
1897			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1898					MMC_TRIM_ARG);
1899			if (ret)
1900				return ret;
1901			dev_addr += (sz >> 9);
1902		}
1903		getnstimeofday(&ts2);
1904		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1905	}
1906	return 0;
1907}
1908
1909static unsigned int rnd_next = 1;
1910
1911static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1912{
1913	uint64_t r;
1914
1915	rnd_next = rnd_next * 1103515245 + 12345;
1916	r = (rnd_next >> 16) & 0x7fff;
1917	return (r * rnd_cnt) >> 15;
1918}
1919
1920static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1921			     unsigned long sz)
1922{
1923	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1924	unsigned int ssz;
1925	struct timespec ts1, ts2, ts;
1926	int ret;
1927
1928	ssz = sz >> 9;
1929
1930	rnd_addr = mmc_test_capacity(test->card) / 4;
1931	range1 = rnd_addr / test->card->pref_erase;
1932	range2 = range1 / ssz;
1933
1934	getnstimeofday(&ts1);
1935	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1936		getnstimeofday(&ts2);
1937		ts = timespec_sub(ts2, ts1);
1938		if (ts.tv_sec >= 10)
1939			break;
1940		ea = mmc_test_rnd_num(range1);
1941		if (ea == last_ea)
1942			ea -= 1;
1943		last_ea = ea;
1944		dev_addr = rnd_addr + test->card->pref_erase * ea +
1945			   ssz * mmc_test_rnd_num(range2);
1946		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1947		if (ret)
1948			return ret;
1949	}
1950	if (print)
1951		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1952	return 0;
1953}
1954
1955static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1956{
1957	struct mmc_test_area *t = &test->area;
1958	unsigned int next;
1959	unsigned long sz;
1960	int ret;
1961
1962	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1963		/*
1964		 * When writing, try to get more consistent results by running
1965		 * the test twice with exactly the same I/O but outputting the
1966		 * results only for the 2nd run.
1967		 */
1968		if (write) {
1969			next = rnd_next;
1970			ret = mmc_test_rnd_perf(test, write, 0, sz);
1971			if (ret)
1972				return ret;
1973			rnd_next = next;
1974		}
1975		ret = mmc_test_rnd_perf(test, write, 1, sz);
1976		if (ret)
1977			return ret;
1978	}
1979	sz = t->max_tfr;
1980	if (write) {
1981		next = rnd_next;
1982		ret = mmc_test_rnd_perf(test, write, 0, sz);
1983		if (ret)
1984			return ret;
1985		rnd_next = next;
1986	}
1987	return mmc_test_rnd_perf(test, write, 1, sz);
1988}
1989
1990/*
1991 * Random read performance by transfer size.
1992 */
1993static int mmc_test_random_read_perf(struct mmc_test_card *test)
1994{
1995	return mmc_test_random_perf(test, 0);
1996}
1997
1998/*
1999 * Random write performance by transfer size.
2000 */
2001static int mmc_test_random_write_perf(struct mmc_test_card *test)
2002{
2003	return mmc_test_random_perf(test, 1);
2004}
2005
2006static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2007			     unsigned int tot_sz, int max_scatter)
2008{
2009	struct mmc_test_area *t = &test->area;
2010	unsigned int dev_addr, i, cnt, sz, ssz;
2011	struct timespec ts1, ts2;
2012	int ret;
2013
2014	sz = t->max_tfr;
2015
2016	/*
2017	 * In the case of a maximally scattered transfer, the maximum transfer
2018	 * size is further limited by using PAGE_SIZE segments.
2019	 */
2020	if (max_scatter) {
2021		unsigned long max_tfr;
2022
2023		if (t->max_seg_sz >= PAGE_SIZE)
2024			max_tfr = t->max_segs * PAGE_SIZE;
2025		else
2026			max_tfr = t->max_segs * t->max_seg_sz;
2027		if (sz > max_tfr)
2028			sz = max_tfr;
2029	}
2030
2031	ssz = sz >> 9;
2032	dev_addr = mmc_test_capacity(test->card) / 4;
2033	if (tot_sz > dev_addr << 9)
2034		tot_sz = dev_addr << 9;
2035	cnt = tot_sz / sz;
2036	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2037
2038	getnstimeofday(&ts1);
2039	for (i = 0; i < cnt; i++) {
2040		ret = mmc_test_area_io(test, sz, dev_addr, write,
2041				       max_scatter, 0);
2042		if (ret)
2043			return ret;
2044		dev_addr += ssz;
2045	}
2046	getnstimeofday(&ts2);
2047
2048	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2049
2050	return 0;
2051}
2052
2053static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2054{
2055	int ret, i;
2056
2057	for (i = 0; i < 10; i++) {
2058		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2059		if (ret)
2060			return ret;
2061	}
2062	for (i = 0; i < 5; i++) {
2063		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2064		if (ret)
2065			return ret;
2066	}
2067	for (i = 0; i < 3; i++) {
2068		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2069		if (ret)
2070			return ret;
2071	}
2072
2073	return ret;
2074}
2075
2076/*
2077 * Large sequential read performance.
2078 */
2079static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2080{
2081	return mmc_test_large_seq_perf(test, 0);
2082}
2083
2084/*
2085 * Large sequential write performance.
2086 */
2087static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2088{
2089	return mmc_test_large_seq_perf(test, 1);
2090}
2091
2092static int mmc_test_rw_multiple(struct mmc_test_card *test,
2093				struct mmc_test_multiple_rw *tdata,
2094				unsigned int reqsize, unsigned int size,
2095				int min_sg_len)
2096{
2097	unsigned int dev_addr;
2098	struct mmc_test_area *t = &test->area;
2099	int ret = 0;
2100
2101	/* Set up test area */
2102	if (size > mmc_test_capacity(test->card) / 2 * 512)
2103		size = mmc_test_capacity(test->card) / 2 * 512;
2104	if (reqsize > t->max_tfr)
2105		reqsize = t->max_tfr;
2106	dev_addr = mmc_test_capacity(test->card) / 4;
2107	if ((dev_addr & 0xffff0000))
2108		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2109	else
2110		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2111	if (!dev_addr)
2112		goto err;
2113
2114	if (reqsize > size)
2115		return 0;
2116
2117	/* prepare test area */
2118	if (mmc_can_erase(test->card) &&
2119	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2120		ret = mmc_erase(test->card, dev_addr,
2121				size / 512, MMC_SECURE_ERASE_ARG);
2122		if (ret)
2123			ret = mmc_erase(test->card, dev_addr,
2124					size / 512, MMC_ERASE_ARG);
2125		if (ret)
2126			goto err;
2127	}
2128
2129	/* Run test */
2130	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2131				   tdata->do_write, 0, 1, size / reqsize,
2132				   tdata->do_nonblock_req, min_sg_len);
2133	if (ret)
2134		goto err;
2135
2136	return ret;
2137 err:
2138	printk(KERN_INFO "[%s] error\n", __func__);
2139	return ret;
2140}
2141
2142static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2143				     struct mmc_test_multiple_rw *rw)
2144{
2145	int ret = 0;
2146	int i;
2147	void *pre_req = test->card->host->ops->pre_req;
2148	void *post_req = test->card->host->ops->post_req;
2149
2150	if (rw->do_nonblock_req &&
2151	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2152		printk(KERN_INFO "error: only one of pre/post is defined\n");
2153		return -EINVAL;
2154	}
2155
2156	for (i = 0 ; i < rw->len && ret == 0; i++) {
2157		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2158		if (ret)
2159			break;
2160	}
2161	return ret;
2162}
2163
2164static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2165				       struct mmc_test_multiple_rw *rw)
2166{
2167	int ret = 0;
2168	int i;
2169
2170	for (i = 0 ; i < rw->len && ret == 0; i++) {
2171		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2172					   rw->sg_len[i]);
2173		if (ret)
2174			break;
2175	}
2176	return ret;
2177}
2178
2179/*
2180 * Multiple blocking write 4k to 4 MB chunks
2181 */
2182static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2183{
2184	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2185			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2186	struct mmc_test_multiple_rw test_data = {
2187		.bs = bs,
2188		.size = TEST_AREA_MAX_SIZE,
2189		.len = ARRAY_SIZE(bs),
2190		.do_write = true,
2191		.do_nonblock_req = false,
2192		.prepare = MMC_TEST_PREP_ERASE,
2193	};
2194
2195	return mmc_test_rw_multiple_size(test, &test_data);
2196};
2197
2198/*
2199 * Multiple non-blocking write 4k to 4 MB chunks
2200 */
2201static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2202{
2203	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2204			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2205	struct mmc_test_multiple_rw test_data = {
2206		.bs = bs,
2207		.size = TEST_AREA_MAX_SIZE,
2208		.len = ARRAY_SIZE(bs),
2209		.do_write = true,
2210		.do_nonblock_req = true,
2211		.prepare = MMC_TEST_PREP_ERASE,
2212	};
2213
2214	return mmc_test_rw_multiple_size(test, &test_data);
2215}
2216
2217/*
2218 * Multiple blocking read 4k to 4 MB chunks
2219 */
2220static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2221{
2222	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2223			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2224	struct mmc_test_multiple_rw test_data = {
2225		.bs = bs,
2226		.size = TEST_AREA_MAX_SIZE,
2227		.len = ARRAY_SIZE(bs),
2228		.do_write = false,
2229		.do_nonblock_req = false,
2230		.prepare = MMC_TEST_PREP_NONE,
2231	};
2232
2233	return mmc_test_rw_multiple_size(test, &test_data);
2234}
2235
2236/*
2237 * Multiple non-blocking read 4k to 4 MB chunks
2238 */
2239static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2240{
2241	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2242			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2243	struct mmc_test_multiple_rw test_data = {
2244		.bs = bs,
2245		.size = TEST_AREA_MAX_SIZE,
2246		.len = ARRAY_SIZE(bs),
2247		.do_write = false,
2248		.do_nonblock_req = true,
2249		.prepare = MMC_TEST_PREP_NONE,
2250	};
2251
2252	return mmc_test_rw_multiple_size(test, &test_data);
2253}
2254
2255/*
2256 * Multiple blocking write 1 to 512 sg elements
2257 */
2258static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2259{
2260	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2261				 1 << 7, 1 << 8, 1 << 9};
2262	struct mmc_test_multiple_rw test_data = {
2263		.sg_len = sg_len,
2264		.size = TEST_AREA_MAX_SIZE,
2265		.len = ARRAY_SIZE(sg_len),
2266		.do_write = true,
2267		.do_nonblock_req = false,
2268		.prepare = MMC_TEST_PREP_ERASE,
2269	};
2270
2271	return mmc_test_rw_multiple_sg_len(test, &test_data);
2272};
2273
2274/*
2275 * Multiple non-blocking write 1 to 512 sg elements
2276 */
2277static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2278{
2279	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2280				 1 << 7, 1 << 8, 1 << 9};
2281	struct mmc_test_multiple_rw test_data = {
2282		.sg_len = sg_len,
2283		.size = TEST_AREA_MAX_SIZE,
2284		.len = ARRAY_SIZE(sg_len),
2285		.do_write = true,
2286		.do_nonblock_req = true,
2287		.prepare = MMC_TEST_PREP_ERASE,
2288	};
2289
2290	return mmc_test_rw_multiple_sg_len(test, &test_data);
2291}
2292
2293/*
2294 * Multiple blocking read 1 to 512 sg elements
2295 */
2296static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2297{
2298	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2299				 1 << 7, 1 << 8, 1 << 9};
2300	struct mmc_test_multiple_rw test_data = {
2301		.sg_len = sg_len,
2302		.size = TEST_AREA_MAX_SIZE,
2303		.len = ARRAY_SIZE(sg_len),
2304		.do_write = false,
2305		.do_nonblock_req = false,
2306		.prepare = MMC_TEST_PREP_NONE,
2307	};
2308
2309	return mmc_test_rw_multiple_sg_len(test, &test_data);
2310}
2311
2312/*
2313 * Multiple non-blocking read 1 to 512 sg elements
2314 */
2315static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2316{
2317	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2318				 1 << 7, 1 << 8, 1 << 9};
2319	struct mmc_test_multiple_rw test_data = {
2320		.sg_len = sg_len,
2321		.size = TEST_AREA_MAX_SIZE,
2322		.len = ARRAY_SIZE(sg_len),
2323		.do_write = false,
2324		.do_nonblock_req = true,
2325		.prepare = MMC_TEST_PREP_NONE,
2326	};
2327
2328	return mmc_test_rw_multiple_sg_len(test, &test_data);
2329}
2330
2331static const struct mmc_test_case mmc_test_cases[] = {
2332	{
2333		.name = "Basic write (no data verification)",
2334		.run = mmc_test_basic_write,
2335	},
2336
2337	{
2338		.name = "Basic read (no data verification)",
2339		.run = mmc_test_basic_read,
2340	},
2341
2342	{
2343		.name = "Basic write (with data verification)",
2344		.prepare = mmc_test_prepare_write,
2345		.run = mmc_test_verify_write,
2346		.cleanup = mmc_test_cleanup,
2347	},
2348
2349	{
2350		.name = "Basic read (with data verification)",
2351		.prepare = mmc_test_prepare_read,
2352		.run = mmc_test_verify_read,
2353		.cleanup = mmc_test_cleanup,
2354	},
2355
2356	{
2357		.name = "Multi-block write",
2358		.prepare = mmc_test_prepare_write,
2359		.run = mmc_test_multi_write,
2360		.cleanup = mmc_test_cleanup,
2361	},
2362
2363	{
2364		.name = "Multi-block read",
2365		.prepare = mmc_test_prepare_read,
2366		.run = mmc_test_multi_read,
2367		.cleanup = mmc_test_cleanup,
2368	},
2369
2370	{
2371		.name = "Power of two block writes",
2372		.prepare = mmc_test_prepare_write,
2373		.run = mmc_test_pow2_write,
2374		.cleanup = mmc_test_cleanup,
2375	},
2376
2377	{
2378		.name = "Power of two block reads",
2379		.prepare = mmc_test_prepare_read,
2380		.run = mmc_test_pow2_read,
2381		.cleanup = mmc_test_cleanup,
2382	},
2383
2384	{
2385		.name = "Weird sized block writes",
2386		.prepare = mmc_test_prepare_write,
2387		.run = mmc_test_weird_write,
2388		.cleanup = mmc_test_cleanup,
2389	},
2390
2391	{
2392		.name = "Weird sized block reads",
2393		.prepare = mmc_test_prepare_read,
2394		.run = mmc_test_weird_read,
2395		.cleanup = mmc_test_cleanup,
2396	},
2397
2398	{
2399		.name = "Badly aligned write",
2400		.prepare = mmc_test_prepare_write,
2401		.run = mmc_test_align_write,
2402		.cleanup = mmc_test_cleanup,
2403	},
2404
2405	{
2406		.name = "Badly aligned read",
2407		.prepare = mmc_test_prepare_read,
2408		.run = mmc_test_align_read,
2409		.cleanup = mmc_test_cleanup,
2410	},
2411
2412	{
2413		.name = "Badly aligned multi-block write",
2414		.prepare = mmc_test_prepare_write,
2415		.run = mmc_test_align_multi_write,
2416		.cleanup = mmc_test_cleanup,
2417	},
2418
2419	{
2420		.name = "Badly aligned multi-block read",
2421		.prepare = mmc_test_prepare_read,
2422		.run = mmc_test_align_multi_read,
2423		.cleanup = mmc_test_cleanup,
2424	},
2425
2426	{
2427		.name = "Correct xfer_size at write (start failure)",
2428		.run = mmc_test_xfersize_write,
2429	},
2430
2431	{
2432		.name = "Correct xfer_size at read (start failure)",
2433		.run = mmc_test_xfersize_read,
2434	},
2435
2436	{
2437		.name = "Correct xfer_size at write (midway failure)",
2438		.run = mmc_test_multi_xfersize_write,
2439	},
2440
2441	{
2442		.name = "Correct xfer_size at read (midway failure)",
2443		.run = mmc_test_multi_xfersize_read,
2444	},
2445
2446#ifdef CONFIG_HIGHMEM
2447
2448	{
2449		.name = "Highmem write",
2450		.prepare = mmc_test_prepare_write,
2451		.run = mmc_test_write_high,
2452		.cleanup = mmc_test_cleanup,
2453	},
2454
2455	{
2456		.name = "Highmem read",
2457		.prepare = mmc_test_prepare_read,
2458		.run = mmc_test_read_high,
2459		.cleanup = mmc_test_cleanup,
2460	},
2461
2462	{
2463		.name = "Multi-block highmem write",
2464		.prepare = mmc_test_prepare_write,
2465		.run = mmc_test_multi_write_high,
2466		.cleanup = mmc_test_cleanup,
2467	},
2468
2469	{
2470		.name = "Multi-block highmem read",
2471		.prepare = mmc_test_prepare_read,
2472		.run = mmc_test_multi_read_high,
2473		.cleanup = mmc_test_cleanup,
2474	},
2475
2476#else
2477
2478	{
2479		.name = "Highmem write",
2480		.run = mmc_test_no_highmem,
2481	},
2482
2483	{
2484		.name = "Highmem read",
2485		.run = mmc_test_no_highmem,
2486	},
2487
2488	{
2489		.name = "Multi-block highmem write",
2490		.run = mmc_test_no_highmem,
2491	},
2492
2493	{
2494		.name = "Multi-block highmem read",
2495		.run = mmc_test_no_highmem,
2496	},
2497
2498#endif /* CONFIG_HIGHMEM */
2499
2500	{
2501		.name = "Best-case read performance",
2502		.prepare = mmc_test_area_prepare_fill,
2503		.run = mmc_test_best_read_performance,
2504		.cleanup = mmc_test_area_cleanup,
2505	},
2506
2507	{
2508		.name = "Best-case write performance",
2509		.prepare = mmc_test_area_prepare_erase,
2510		.run = mmc_test_best_write_performance,
2511		.cleanup = mmc_test_area_cleanup,
2512	},
2513
2514	{
2515		.name = "Best-case read performance into scattered pages",
2516		.prepare = mmc_test_area_prepare_fill,
2517		.run = mmc_test_best_read_perf_max_scatter,
2518		.cleanup = mmc_test_area_cleanup,
2519	},
2520
2521	{
2522		.name = "Best-case write performance from scattered pages",
2523		.prepare = mmc_test_area_prepare_erase,
2524		.run = mmc_test_best_write_perf_max_scatter,
2525		.cleanup = mmc_test_area_cleanup,
2526	},
2527
2528	{
2529		.name = "Single read performance by transfer size",
2530		.prepare = mmc_test_area_prepare_fill,
2531		.run = mmc_test_profile_read_perf,
2532		.cleanup = mmc_test_area_cleanup,
2533	},
2534
2535	{
2536		.name = "Single write performance by transfer size",
2537		.prepare = mmc_test_area_prepare,
2538		.run = mmc_test_profile_write_perf,
2539		.cleanup = mmc_test_area_cleanup,
2540	},
2541
2542	{
2543		.name = "Single trim performance by transfer size",
2544		.prepare = mmc_test_area_prepare_fill,
2545		.run = mmc_test_profile_trim_perf,
2546		.cleanup = mmc_test_area_cleanup,
2547	},
2548
2549	{
2550		.name = "Consecutive read performance by transfer size",
2551		.prepare = mmc_test_area_prepare_fill,
2552		.run = mmc_test_profile_seq_read_perf,
2553		.cleanup = mmc_test_area_cleanup,
2554	},
2555
2556	{
2557		.name = "Consecutive write performance by transfer size",
2558		.prepare = mmc_test_area_prepare,
2559		.run = mmc_test_profile_seq_write_perf,
2560		.cleanup = mmc_test_area_cleanup,
2561	},
2562
2563	{
2564		.name = "Consecutive trim performance by transfer size",
2565		.prepare = mmc_test_area_prepare,
2566		.run = mmc_test_profile_seq_trim_perf,
2567		.cleanup = mmc_test_area_cleanup,
2568	},
2569
2570	{
2571		.name = "Random read performance by transfer size",
2572		.prepare = mmc_test_area_prepare,
2573		.run = mmc_test_random_read_perf,
2574		.cleanup = mmc_test_area_cleanup,
2575	},
2576
2577	{
2578		.name = "Random write performance by transfer size",
2579		.prepare = mmc_test_area_prepare,
2580		.run = mmc_test_random_write_perf,
2581		.cleanup = mmc_test_area_cleanup,
2582	},
2583
2584	{
2585		.name = "Large sequential read into scattered pages",
2586		.prepare = mmc_test_area_prepare,
2587		.run = mmc_test_large_seq_read_perf,
2588		.cleanup = mmc_test_area_cleanup,
2589	},
2590
2591	{
2592		.name = "Large sequential write from scattered pages",
2593		.prepare = mmc_test_area_prepare,
2594		.run = mmc_test_large_seq_write_perf,
2595		.cleanup = mmc_test_area_cleanup,
2596	},
2597
2598	{
2599		.name = "Write performance with blocking req 4k to 4MB",
2600		.prepare = mmc_test_area_prepare,
2601		.run = mmc_test_profile_mult_write_blocking_perf,
2602		.cleanup = mmc_test_area_cleanup,
2603	},
2604
2605	{
2606		.name = "Write performance with non-blocking req 4k to 4MB",
2607		.prepare = mmc_test_area_prepare,
2608		.run = mmc_test_profile_mult_write_nonblock_perf,
2609		.cleanup = mmc_test_area_cleanup,
2610	},
2611
2612	{
2613		.name = "Read performance with blocking req 4k to 4MB",
2614		.prepare = mmc_test_area_prepare,
2615		.run = mmc_test_profile_mult_read_blocking_perf,
2616		.cleanup = mmc_test_area_cleanup,
2617	},
2618
2619	{
2620		.name = "Read performance with non-blocking req 4k to 4MB",
2621		.prepare = mmc_test_area_prepare,
2622		.run = mmc_test_profile_mult_read_nonblock_perf,
2623		.cleanup = mmc_test_area_cleanup,
2624	},
2625
2626	{
2627		.name = "Write performance blocking req 1 to 512 sg elems",
2628		.prepare = mmc_test_area_prepare,
2629		.run = mmc_test_profile_sglen_wr_blocking_perf,
2630		.cleanup = mmc_test_area_cleanup,
2631	},
2632
2633	{
2634		.name = "Write performance non-blocking req 1 to 512 sg elems",
2635		.prepare = mmc_test_area_prepare,
2636		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2637		.cleanup = mmc_test_area_cleanup,
2638	},
2639
2640	{
2641		.name = "Read performance blocking req 1 to 512 sg elems",
2642		.prepare = mmc_test_area_prepare,
2643		.run = mmc_test_profile_sglen_r_blocking_perf,
2644		.cleanup = mmc_test_area_cleanup,
2645	},
2646
2647	{
2648		.name = "Read performance non-blocking req 1 to 512 sg elems",
2649		.prepare = mmc_test_area_prepare,
2650		.run = mmc_test_profile_sglen_r_nonblock_perf,
2651		.cleanup = mmc_test_area_cleanup,
2652	},
2653};
2654
2655static DEFINE_MUTEX(mmc_test_lock);
2656
2657static LIST_HEAD(mmc_test_result);
2658
2659static void mmc_test_run(struct mmc_test_card *test, int testcase)
2660{
2661	int i, ret;
2662
2663	printk(KERN_INFO "%s: Starting tests of card %s...\n",
2664		mmc_hostname(test->card->host), mmc_card_id(test->card));
2665
2666	mmc_claim_host(test->card->host);
2667
2668	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2669		struct mmc_test_general_result *gr;
2670
2671		if (testcase && ((i + 1) != testcase))
2672			continue;
2673
2674		printk(KERN_INFO "%s: Test case %d. %s...\n",
2675			mmc_hostname(test->card->host), i + 1,
2676			mmc_test_cases[i].name);
2677
2678		if (mmc_test_cases[i].prepare) {
2679			ret = mmc_test_cases[i].prepare(test);
2680			if (ret) {
2681				printk(KERN_INFO "%s: Result: Prepare "
2682					"stage failed! (%d)\n",
2683					mmc_hostname(test->card->host),
2684					ret);
2685				continue;
2686			}
2687		}
2688
2689		gr = kzalloc(sizeof(struct mmc_test_general_result),
2690			GFP_KERNEL);
2691		if (gr) {
2692			INIT_LIST_HEAD(&gr->tr_lst);
2693
2694			/* Assign data what we know already */
2695			gr->card = test->card;
2696			gr->testcase = i;
2697
2698			/* Append container to global one */
2699			list_add_tail(&gr->link, &mmc_test_result);
2700
2701			/*
2702			 * Save the pointer to created container in our private
2703			 * structure.
2704			 */
2705			test->gr = gr;
2706		}
2707
2708		ret = mmc_test_cases[i].run(test);
2709		switch (ret) {
2710		case RESULT_OK:
2711			printk(KERN_INFO "%s: Result: OK\n",
2712				mmc_hostname(test->card->host));
2713			break;
2714		case RESULT_FAIL:
2715			printk(KERN_INFO "%s: Result: FAILED\n",
2716				mmc_hostname(test->card->host));
2717			break;
2718		case RESULT_UNSUP_HOST:
2719			printk(KERN_INFO "%s: Result: UNSUPPORTED "
2720				"(by host)\n",
2721				mmc_hostname(test->card->host));
2722			break;
2723		case RESULT_UNSUP_CARD:
2724			printk(KERN_INFO "%s: Result: UNSUPPORTED "
2725				"(by card)\n",
2726				mmc_hostname(test->card->host));
2727			break;
2728		default:
2729			printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2730				mmc_hostname(test->card->host), ret);
2731		}
2732
2733		/* Save the result */
2734		if (gr)
2735			gr->result = ret;
2736
2737		if (mmc_test_cases[i].cleanup) {
2738			ret = mmc_test_cases[i].cleanup(test);
2739			if (ret) {
2740				printk(KERN_INFO "%s: Warning: Cleanup "
2741					"stage failed! (%d)\n",
2742					mmc_hostname(test->card->host),
2743					ret);
2744			}
2745		}
2746	}
2747
2748	mmc_release_host(test->card->host);
2749
2750	printk(KERN_INFO "%s: Tests completed.\n",
2751		mmc_hostname(test->card->host));
2752}
2753
2754static void mmc_test_free_result(struct mmc_card *card)
2755{
2756	struct mmc_test_general_result *gr, *grs;
2757
2758	mutex_lock(&mmc_test_lock);
2759
2760	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2761		struct mmc_test_transfer_result *tr, *trs;
2762
2763		if (card && gr->card != card)
2764			continue;
2765
2766		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2767			list_del(&tr->link);
2768			kfree(tr);
2769		}
2770
2771		list_del(&gr->link);
2772		kfree(gr);
2773	}
2774
2775	mutex_unlock(&mmc_test_lock);
2776}
2777
2778static LIST_HEAD(mmc_test_file_test);
2779
2780static int mtf_test_show(struct seq_file *sf, void *data)
2781{
2782	struct mmc_card *card = (struct mmc_card *)sf->private;
2783	struct mmc_test_general_result *gr;
2784
2785	mutex_lock(&mmc_test_lock);
2786
2787	list_for_each_entry(gr, &mmc_test_result, link) {
2788		struct mmc_test_transfer_result *tr;
2789
2790		if (gr->card != card)
2791			continue;
2792
2793		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2794
2795		list_for_each_entry(tr, &gr->tr_lst, link) {
2796			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2797				tr->count, tr->sectors,
2798				(unsigned long)tr->ts.tv_sec,
2799				(unsigned long)tr->ts.tv_nsec,
2800				tr->rate, tr->iops / 100, tr->iops % 100);
2801		}
2802	}
2803
2804	mutex_unlock(&mmc_test_lock);
2805
2806	return 0;
2807}
2808
2809static int mtf_test_open(struct inode *inode, struct file *file)
2810{
2811	return single_open(file, mtf_test_show, inode->i_private);
2812}
2813
2814static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2815	size_t count, loff_t *pos)
2816{
2817	struct seq_file *sf = (struct seq_file *)file->private_data;
2818	struct mmc_card *card = (struct mmc_card *)sf->private;
2819	struct mmc_test_card *test;
2820	char lbuf[12];
2821	long testcase;
2822
2823	if (count >= sizeof(lbuf))
2824		return -EINVAL;
2825
2826	if (copy_from_user(lbuf, buf, count))
2827		return -EFAULT;
2828	lbuf[count] = '\0';
2829
2830	if (strict_strtol(lbuf, 10, &testcase))
2831		return -EINVAL;
2832
2833	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2834	if (!test)
2835		return -ENOMEM;
2836
2837	/*
2838	 * Remove all test cases associated with given card. Thus we have only
2839	 * actual data of the last run.
2840	 */
2841	mmc_test_free_result(card);
2842
2843	test->card = card;
2844
2845	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2846#ifdef CONFIG_HIGHMEM
2847	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2848#endif
2849
2850#ifdef CONFIG_HIGHMEM
2851	if (test->buffer && test->highmem) {
2852#else
2853	if (test->buffer) {
2854#endif
2855		mutex_lock(&mmc_test_lock);
2856		mmc_test_run(test, testcase);
2857		mutex_unlock(&mmc_test_lock);
2858	}
2859
2860#ifdef CONFIG_HIGHMEM
2861	__free_pages(test->highmem, BUFFER_ORDER);
2862#endif
2863	kfree(test->buffer);
2864	kfree(test);
2865
2866	return count;
2867}
2868
2869static const struct file_operations mmc_test_fops_test = {
2870	.open		= mtf_test_open,
2871	.read		= seq_read,
2872	.write		= mtf_test_write,
2873	.llseek		= seq_lseek,
2874	.release	= single_release,
2875};
2876
2877static int mtf_testlist_show(struct seq_file *sf, void *data)
2878{
2879	int i;
2880
2881	mutex_lock(&mmc_test_lock);
2882
2883	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2884		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2885
2886	mutex_unlock(&mmc_test_lock);
2887
2888	return 0;
2889}
2890
2891static int mtf_testlist_open(struct inode *inode, struct file *file)
2892{
2893	return single_open(file, mtf_testlist_show, inode->i_private);
2894}
2895
2896static const struct file_operations mmc_test_fops_testlist = {
2897	.open		= mtf_testlist_open,
2898	.read		= seq_read,
2899	.llseek		= seq_lseek,
2900	.release	= single_release,
2901};
2902
2903static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2904{
2905	struct mmc_test_dbgfs_file *df, *dfs;
2906
2907	mutex_lock(&mmc_test_lock);
2908
2909	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2910		if (card && df->card != card)
2911			continue;
2912		debugfs_remove(df->file);
2913		list_del(&df->link);
2914		kfree(df);
2915	}
2916
2917	mutex_unlock(&mmc_test_lock);
2918}
2919
2920static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2921	const char *name, mode_t mode, const struct file_operations *fops)
2922{
2923	struct dentry *file = NULL;
2924	struct mmc_test_dbgfs_file *df;
2925
2926	if (card->debugfs_root)
2927		file = debugfs_create_file(name, mode, card->debugfs_root,
2928			card, fops);
2929
2930	if (IS_ERR_OR_NULL(file)) {
2931		dev_err(&card->dev,
2932			"Can't create %s. Perhaps debugfs is disabled.\n",
2933			name);
2934		return -ENODEV;
2935	}
2936
2937	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2938	if (!df) {
2939		debugfs_remove(file);
2940		dev_err(&card->dev,
2941			"Can't allocate memory for internal usage.\n");
2942		return -ENOMEM;
2943	}
2944
2945	df->card = card;
2946	df->file = file;
2947
2948	list_add(&df->link, &mmc_test_file_test);
2949	return 0;
2950}
2951
2952static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2953{
2954	int ret;
2955
2956	mutex_lock(&mmc_test_lock);
2957
2958	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2959		&mmc_test_fops_test);
2960	if (ret)
2961		goto err;
2962
2963	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2964		&mmc_test_fops_testlist);
2965	if (ret)
2966		goto err;
2967
2968err:
2969	mutex_unlock(&mmc_test_lock);
2970
2971	return ret;
2972}
2973
2974static int mmc_test_probe(struct mmc_card *card)
2975{
2976	int ret;
2977
2978	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2979		return -ENODEV;
2980
2981	ret = mmc_test_register_dbgfs_file(card);
2982	if (ret)
2983		return ret;
2984
2985	dev_info(&card->dev, "Card claimed for testing.\n");
2986
2987	return 0;
2988}
2989
2990static void mmc_test_remove(struct mmc_card *card)
2991{
2992	mmc_test_free_result(card);
2993	mmc_test_free_dbgfs_file(card);
2994}
2995
2996static struct mmc_driver mmc_driver = {
2997	.drv		= {
2998		.name	= "mmc_test",
2999	},
3000	.probe		= mmc_test_probe,
3001	.remove		= mmc_test_remove,
3002};
3003
3004static int __init mmc_test_init(void)
3005{
3006	return mmc_register_driver(&mmc_driver);
3007}
3008
3009static void __exit mmc_test_exit(void)
3010{
3011	/* Clear stalled data if card is still plugged */
3012	mmc_test_free_result(NULL);
3013	mmc_test_free_dbgfs_file(NULL);
3014
3015	mmc_unregister_driver(&mmc_driver);
3016}
3017
3018module_init(mmc_test_init);
3019module_exit(mmc_test_exit);
3020
3021MODULE_LICENSE("GPL");
3022MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3023MODULE_AUTHOR("Pierre Ossman");