Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  ms_block.c - Sony MemoryStick (legacy) storage support
   3
   4 *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * Minor portions of the driver were copied from mspro_block.c which is
  11 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
  12 *
  13 */
  14#define DRIVER_NAME "ms_block"
  15#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  16
  17#include <linux/module.h>
  18#include <linux/blkdev.h>
  19#include <linux/memstick.h>
  20#include <linux/idr.h>
  21#include <linux/hdreg.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/random.h>
  25#include <linux/bitmap.h>
  26#include <linux/scatterlist.h>
  27#include <linux/jiffies.h>
  28#include <linux/workqueue.h>
  29#include <linux/mutex.h>
  30#include "ms_block.h"
  31
  32static int debug;
  33static int cache_flush_timeout = 1000;
  34static bool verify_writes;
  35
  36/*
  37 * Copies section of 'sg_from' starting from offset 'offset' and with length
  38 * 'len' To another scatterlist of to_nents enties
  39 */
  40static size_t msb_sg_copy(struct scatterlist *sg_from,
  41	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  42{
  43	size_t copied = 0;
  44
  45	while (offset > 0) {
  46		if (offset >= sg_from->length) {
  47			if (sg_is_last(sg_from))
  48				return 0;
  49
  50			offset -= sg_from->length;
  51			sg_from = sg_next(sg_from);
  52			continue;
  53		}
  54
  55		copied = min(len, sg_from->length - offset);
  56		sg_set_page(sg_to, sg_page(sg_from),
  57			copied, sg_from->offset + offset);
  58
  59		len -= copied;
  60		offset = 0;
  61
  62		if (sg_is_last(sg_from) || !len)
  63			goto out;
  64
  65		sg_to = sg_next(sg_to);
  66		to_nents--;
  67		sg_from = sg_next(sg_from);
  68	}
  69
  70	while (len > sg_from->length && to_nents--) {
  71		len -= sg_from->length;
  72		copied += sg_from->length;
  73
  74		sg_set_page(sg_to, sg_page(sg_from),
  75				sg_from->length, sg_from->offset);
  76
  77		if (sg_is_last(sg_from) || !len)
  78			goto out;
  79
  80		sg_from = sg_next(sg_from);
  81		sg_to = sg_next(sg_to);
  82	}
  83
  84	if (len && to_nents) {
  85		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  86		copied += len;
  87	}
  88out:
  89	sg_mark_end(sg_to);
  90	return copied;
  91}
  92
  93/*
  94 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  95 * to linear buffer of length 'len' at address 'buffer'
  96 * Returns 0 if equal and  -1 otherwice
  97 */
  98static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  99					size_t offset, u8 *buffer, size_t len)
 100{
 101	int retval = 0, cmplen;
 102	struct sg_mapping_iter miter;
 103
 104	sg_miter_start(&miter, sg, sg_nents(sg),
 105					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 106
 107	while (sg_miter_next(&miter) && len > 0) {
 108		if (offset >= miter.length) {
 109			offset -= miter.length;
 110			continue;
 111		}
 112
 113		cmplen = min(miter.length - offset, len);
 114		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
 115		if (retval)
 116			break;
 117
 118		buffer += cmplen;
 119		len -= cmplen;
 120		offset = 0;
 121	}
 122
 123	if (!retval && len)
 124		retval = -1;
 125
 126	sg_miter_stop(&miter);
 127	return retval;
 128}
 129
 130
 131/* Get zone at which block with logical address 'lba' lives
 132 * Flash is broken into zones.
 133 * Each zone consists of 512 eraseblocks, out of which in first
 134 * zone 494 are used and 496 are for all following zones.
 135 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
 136*/
 137static int msb_get_zone_from_lba(int lba)
 138{
 139	if (lba < 494)
 140		return 0;
 141	return ((lba - 494) / 496) + 1;
 142}
 143
 144/* Get zone of physical block. Trivial */
 145static int msb_get_zone_from_pba(int pba)
 146{
 147	return pba / MS_BLOCKS_IN_ZONE;
 148}
 149
 150/* Debug test to validate free block counts */
 151static int msb_validate_used_block_bitmap(struct msb_data *msb)
 152{
 153	int total_free_blocks = 0;
 154	int i;
 155
 156	if (!debug)
 157		return 0;
 158
 159	for (i = 0; i < msb->zone_count; i++)
 160		total_free_blocks += msb->free_block_count[i];
 161
 162	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
 163					msb->block_count) == total_free_blocks)
 164		return 0;
 165
 166	pr_err("BUG: free block counts don't match the bitmap");
 167	msb->read_only = true;
 168	return -EINVAL;
 169}
 170
 171/* Mark physical block as used */
 172static void msb_mark_block_used(struct msb_data *msb, int pba)
 173{
 174	int zone = msb_get_zone_from_pba(pba);
 175
 176	if (test_bit(pba, msb->used_blocks_bitmap)) {
 177		pr_err(
 178		"BUG: attempt to mark already used pba %d as used", pba);
 179		msb->read_only = true;
 180		return;
 181	}
 182
 183	if (msb_validate_used_block_bitmap(msb))
 184		return;
 185
 186	/* No races because all IO is single threaded */
 187	__set_bit(pba, msb->used_blocks_bitmap);
 188	msb->free_block_count[zone]--;
 189}
 190
 191/* Mark physical block as free */
 192static void msb_mark_block_unused(struct msb_data *msb, int pba)
 193{
 194	int zone = msb_get_zone_from_pba(pba);
 195
 196	if (!test_bit(pba, msb->used_blocks_bitmap)) {
 197		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
 198		msb->read_only = true;
 199		return;
 200	}
 201
 202	if (msb_validate_used_block_bitmap(msb))
 203		return;
 204
 205	/* No races because all IO is single threaded */
 206	__clear_bit(pba, msb->used_blocks_bitmap);
 207	msb->free_block_count[zone]++;
 208}
 209
 210/* Invalidate current register window */
 211static void msb_invalidate_reg_window(struct msb_data *msb)
 212{
 213	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
 214	msb->reg_addr.w_length = sizeof(struct ms_id_register);
 215	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
 216	msb->reg_addr.r_length = sizeof(struct ms_id_register);
 217	msb->addr_valid = false;
 218}
 219
 220/* Start a state machine */
 221static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
 222		(struct memstick_dev *card, struct memstick_request **req))
 223{
 224	struct memstick_dev *card = msb->card;
 225
 226	WARN_ON(msb->state != -1);
 227	msb->int_polling = false;
 228	msb->state = 0;
 229	msb->exit_error = 0;
 230
 231	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
 232
 233	card->next_request = state_func;
 234	memstick_new_req(card->host);
 235	wait_for_completion(&card->mrq_complete);
 236
 237	WARN_ON(msb->state != -1);
 238	return msb->exit_error;
 239}
 240
 241/* State machines call that to exit */
 242static int msb_exit_state_machine(struct msb_data *msb, int error)
 243{
 244	WARN_ON(msb->state == -1);
 245
 246	msb->state = -1;
 247	msb->exit_error = error;
 248	msb->card->next_request = h_msb_default_bad;
 249
 250	/* Invalidate reg window on errors */
 251	if (error)
 252		msb_invalidate_reg_window(msb);
 253
 254	complete(&msb->card->mrq_complete);
 255	return -ENXIO;
 256}
 257
 258/* read INT register */
 259static int msb_read_int_reg(struct msb_data *msb, long timeout)
 260{
 261	struct memstick_request *mrq = &msb->card->current_mrq;
 262
 263	WARN_ON(msb->state == -1);
 264
 265	if (!msb->int_polling) {
 266		msb->int_timeout = jiffies +
 267			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
 268		msb->int_polling = true;
 269	} else if (time_after(jiffies, msb->int_timeout)) {
 270		mrq->data[0] = MEMSTICK_INT_CMDNAK;
 271		return 0;
 272	}
 273
 274	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
 275				mrq->need_card_int && !mrq->error) {
 276		mrq->data[0] = mrq->int_reg;
 277		mrq->need_card_int = false;
 278		return 0;
 279	} else {
 280		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 281		return 1;
 282	}
 283}
 284
 285/* Read a register */
 286static int msb_read_regs(struct msb_data *msb, int offset, int len)
 287{
 288	struct memstick_request *req = &msb->card->current_mrq;
 289
 290	if (msb->reg_addr.r_offset != offset ||
 291	    msb->reg_addr.r_length != len || !msb->addr_valid) {
 292
 293		msb->reg_addr.r_offset = offset;
 294		msb->reg_addr.r_length = len;
 295		msb->addr_valid = true;
 296
 297		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 298			&msb->reg_addr, sizeof(msb->reg_addr));
 299		return 0;
 300	}
 301
 302	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
 303	return 1;
 304}
 305
 306/* Write a card register */
 307static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
 308{
 309	struct memstick_request *req = &msb->card->current_mrq;
 310
 311	if (msb->reg_addr.w_offset != offset ||
 312		msb->reg_addr.w_length != len  || !msb->addr_valid) {
 313
 314		msb->reg_addr.w_offset = offset;
 315		msb->reg_addr.w_length = len;
 316		msb->addr_valid = true;
 317
 318		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 319			&msb->reg_addr, sizeof(msb->reg_addr));
 320		return 0;
 321	}
 322
 323	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
 324	return 1;
 325}
 326
 327/* Handler for absence of IO */
 328static int h_msb_default_bad(struct memstick_dev *card,
 329						struct memstick_request **mrq)
 330{
 331	return -ENXIO;
 332}
 333
 334/*
 335 * This function is a handler for reads of one page from device.
 336 * Writes output to msb->current_sg, takes sector address from msb->reg.param
 337 * Can also be used to read extra data only. Set params accordintly.
 338 */
 339static int h_msb_read_page(struct memstick_dev *card,
 340					struct memstick_request **out_mrq)
 341{
 342	struct msb_data *msb = memstick_get_drvdata(card);
 343	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 344	struct scatterlist sg[2];
 345	u8 command, intreg;
 346
 347	if (mrq->error) {
 348		dbg("read_page, unknown error");
 349		return msb_exit_state_machine(msb, mrq->error);
 350	}
 351again:
 352	switch (msb->state) {
 353	case MSB_RP_SEND_BLOCK_ADDRESS:
 354		/* msb_write_regs sometimes "fails" because it needs to update
 355			the reg window, and thus it returns request for that.
 356			Then we stay in this state and retry */
 357		if (!msb_write_regs(msb,
 358			offsetof(struct ms_register, param),
 359			sizeof(struct ms_param_register),
 360			(unsigned char *)&msb->regs.param))
 361			return 0;
 362
 363		msb->state = MSB_RP_SEND_READ_COMMAND;
 364		return 0;
 365
 366	case MSB_RP_SEND_READ_COMMAND:
 367		command = MS_CMD_BLOCK_READ;
 368		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 369		msb->state = MSB_RP_SEND_INT_REQ;
 370		return 0;
 371
 372	case MSB_RP_SEND_INT_REQ:
 373		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
 374		/* If dont actually need to send the int read request (only in
 375			serial mode), then just fall through */
 376		if (msb_read_int_reg(msb, -1))
 377			return 0;
 378		/* fallthrough */
 379
 380	case MSB_RP_RECEIVE_INT_REQ_RESULT:
 381		intreg = mrq->data[0];
 382		msb->regs.status.interrupt = intreg;
 383
 384		if (intreg & MEMSTICK_INT_CMDNAK)
 385			return msb_exit_state_machine(msb, -EIO);
 386
 387		if (!(intreg & MEMSTICK_INT_CED)) {
 388			msb->state = MSB_RP_SEND_INT_REQ;
 389			goto again;
 390		}
 391
 392		msb->int_polling = false;
 393		msb->state = (intreg & MEMSTICK_INT_ERR) ?
 394			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
 395		goto again;
 396
 397	case MSB_RP_SEND_READ_STATUS_REG:
 398		 /* read the status register to understand source of the INT_ERR */
 399		if (!msb_read_regs(msb,
 400			offsetof(struct ms_register, status),
 401			sizeof(struct ms_status_register)))
 402			return 0;
 403
 404		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 405		return 0;
 406
 407	case MSB_RP_RECEIVE_STATUS_REG:
 408		msb->regs.status = *(struct ms_status_register *)mrq->data;
 409		msb->state = MSB_RP_SEND_OOB_READ;
 410		/* fallthrough */
 411
 412	case MSB_RP_SEND_OOB_READ:
 413		if (!msb_read_regs(msb,
 414			offsetof(struct ms_register, extra_data),
 415			sizeof(struct ms_extra_data_register)))
 416			return 0;
 417
 418		msb->state = MSB_RP_RECEIVE_OOB_READ;
 419		return 0;
 420
 421	case MSB_RP_RECEIVE_OOB_READ:
 422		msb->regs.extra_data =
 423			*(struct ms_extra_data_register *) mrq->data;
 424		msb->state = MSB_RP_SEND_READ_DATA;
 425		/* fallthrough */
 426
 427	case MSB_RP_SEND_READ_DATA:
 428		/* Skip that state if we only read the oob */
 429		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
 430			msb->state = MSB_RP_RECEIVE_READ_DATA;
 431			goto again;
 432		}
 433
 434		sg_init_table(sg, ARRAY_SIZE(sg));
 435		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 436			msb->current_sg_offset,
 437			msb->page_size);
 438
 439		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
 440		msb->state = MSB_RP_RECEIVE_READ_DATA;
 441		return 0;
 442
 443	case MSB_RP_RECEIVE_READ_DATA:
 444		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
 445			msb->current_sg_offset += msb->page_size;
 446			return msb_exit_state_machine(msb, 0);
 447		}
 448
 449		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
 450			dbg("read_page: uncorrectable error");
 451			return msb_exit_state_machine(msb, -EBADMSG);
 452		}
 453
 454		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
 455			dbg("read_page: correctable error");
 456			msb->current_sg_offset += msb->page_size;
 457			return msb_exit_state_machine(msb, -EUCLEAN);
 458		} else {
 459			dbg("read_page: INT error, but no status error bits");
 460			return msb_exit_state_machine(msb, -EIO);
 461		}
 462	}
 463
 464	BUG();
 465}
 466
 467/*
 468 * Handler of writes of exactly one block.
 469 * Takes address from msb->regs.param.
 470 * Writes same extra data to blocks, also taken
 471 * from msb->regs.extra
 472 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
 473 * device refuses to take the command or something else
 474 */
 475static int h_msb_write_block(struct memstick_dev *card,
 476					struct memstick_request **out_mrq)
 477{
 478	struct msb_data *msb = memstick_get_drvdata(card);
 479	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 480	struct scatterlist sg[2];
 481	u8 intreg, command;
 482
 483	if (mrq->error)
 484		return msb_exit_state_machine(msb, mrq->error);
 485
 486again:
 487	switch (msb->state) {
 488
 489	/* HACK: Jmicon handling of TPCs between 8 and
 490	 *	sizeof(memstick_request.data) is broken due to hardware
 491	 *	bug in PIO mode that is used for these TPCs
 492	 *	Therefore split the write
 493	 */
 494
 495	case MSB_WB_SEND_WRITE_PARAMS:
 496		if (!msb_write_regs(msb,
 497			offsetof(struct ms_register, param),
 498			sizeof(struct ms_param_register),
 499			&msb->regs.param))
 500			return 0;
 501
 502		msb->state = MSB_WB_SEND_WRITE_OOB;
 503		return 0;
 504
 505	case MSB_WB_SEND_WRITE_OOB:
 506		if (!msb_write_regs(msb,
 507			offsetof(struct ms_register, extra_data),
 508			sizeof(struct ms_extra_data_register),
 509			&msb->regs.extra_data))
 510			return 0;
 511		msb->state = MSB_WB_SEND_WRITE_COMMAND;
 512		return 0;
 513
 514
 515	case MSB_WB_SEND_WRITE_COMMAND:
 516		command = MS_CMD_BLOCK_WRITE;
 517		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 518		msb->state = MSB_WB_SEND_INT_REQ;
 519		return 0;
 520
 521	case MSB_WB_SEND_INT_REQ:
 522		msb->state = MSB_WB_RECEIVE_INT_REQ;
 523		if (msb_read_int_reg(msb, -1))
 524			return 0;
 525		/* fallthrough */
 526
 527	case MSB_WB_RECEIVE_INT_REQ:
 528		intreg = mrq->data[0];
 529		msb->regs.status.interrupt = intreg;
 530
 531		/* errors mean out of here, and fast... */
 532		if (intreg & (MEMSTICK_INT_CMDNAK))
 533			return msb_exit_state_machine(msb, -EIO);
 534
 535		if (intreg & MEMSTICK_INT_ERR)
 536			return msb_exit_state_machine(msb, -EBADMSG);
 537
 538
 539		/* for last page we need to poll CED */
 540		if (msb->current_page == msb->pages_in_block) {
 541			if (intreg & MEMSTICK_INT_CED)
 542				return msb_exit_state_machine(msb, 0);
 543			msb->state = MSB_WB_SEND_INT_REQ;
 544			goto again;
 545
 546		}
 547
 548		/* for non-last page we need BREQ before writing next chunk */
 549		if (!(intreg & MEMSTICK_INT_BREQ)) {
 550			msb->state = MSB_WB_SEND_INT_REQ;
 551			goto again;
 552		}
 553
 554		msb->int_polling = false;
 555		msb->state = MSB_WB_SEND_WRITE_DATA;
 556		/* fallthrough */
 557
 558	case MSB_WB_SEND_WRITE_DATA:
 559		sg_init_table(sg, ARRAY_SIZE(sg));
 560
 561		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 562			msb->current_sg_offset,
 563			msb->page_size) < msb->page_size)
 564			return msb_exit_state_machine(msb, -EIO);
 565
 566		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
 567		mrq->need_card_int = 1;
 568		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
 569		return 0;
 570
 571	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
 572		msb->current_page++;
 573		msb->current_sg_offset += msb->page_size;
 574		msb->state = MSB_WB_SEND_INT_REQ;
 575		goto again;
 576	default:
 577		BUG();
 578	}
 579
 580	return 0;
 581}
 582
 583/*
 584 * This function is used to send simple IO requests to device that consist
 585 * of register write + command
 586 */
 587static int h_msb_send_command(struct memstick_dev *card,
 588					struct memstick_request **out_mrq)
 589{
 590	struct msb_data *msb = memstick_get_drvdata(card);
 591	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 592	u8 intreg;
 593
 594	if (mrq->error) {
 595		dbg("send_command: unknown error");
 596		return msb_exit_state_machine(msb, mrq->error);
 597	}
 598again:
 599	switch (msb->state) {
 600
 601	/* HACK: see h_msb_write_block */
 602	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
 603		if (!msb_write_regs(msb,
 604			offsetof(struct ms_register, param),
 605			sizeof(struct ms_param_register),
 606			&msb->regs.param))
 607			return 0;
 608		msb->state = MSB_SC_SEND_WRITE_OOB;
 609		return 0;
 610
 611	case MSB_SC_SEND_WRITE_OOB:
 612		if (!msb->command_need_oob) {
 613			msb->state = MSB_SC_SEND_COMMAND;
 614			goto again;
 615		}
 616
 617		if (!msb_write_regs(msb,
 618			offsetof(struct ms_register, extra_data),
 619			sizeof(struct ms_extra_data_register),
 620			&msb->regs.extra_data))
 621			return 0;
 622
 623		msb->state = MSB_SC_SEND_COMMAND;
 624		return 0;
 625
 626	case MSB_SC_SEND_COMMAND:
 627		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
 628		msb->state = MSB_SC_SEND_INT_REQ;
 629		return 0;
 630
 631	case MSB_SC_SEND_INT_REQ:
 632		msb->state = MSB_SC_RECEIVE_INT_REQ;
 633		if (msb_read_int_reg(msb, -1))
 634			return 0;
 635		/* fallthrough */
 636
 637	case MSB_SC_RECEIVE_INT_REQ:
 638		intreg = mrq->data[0];
 639
 640		if (intreg & MEMSTICK_INT_CMDNAK)
 641			return msb_exit_state_machine(msb, -EIO);
 642		if (intreg & MEMSTICK_INT_ERR)
 643			return msb_exit_state_machine(msb, -EBADMSG);
 644
 645		if (!(intreg & MEMSTICK_INT_CED)) {
 646			msb->state = MSB_SC_SEND_INT_REQ;
 647			goto again;
 648		}
 649
 650		return msb_exit_state_machine(msb, 0);
 651	}
 652
 653	BUG();
 654}
 655
 656/* Small handler for card reset */
 657static int h_msb_reset(struct memstick_dev *card,
 658					struct memstick_request **out_mrq)
 659{
 660	u8 command = MS_CMD_RESET;
 661	struct msb_data *msb = memstick_get_drvdata(card);
 662	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 663
 664	if (mrq->error)
 665		return msb_exit_state_machine(msb, mrq->error);
 666
 667	switch (msb->state) {
 668	case MSB_RS_SEND:
 669		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 670		mrq->need_card_int = 0;
 671		msb->state = MSB_RS_CONFIRM;
 672		return 0;
 673	case MSB_RS_CONFIRM:
 674		return msb_exit_state_machine(msb, 0);
 675	}
 676	BUG();
 677}
 678
 679/* This handler is used to do serial->parallel switch */
 680static int h_msb_parallel_switch(struct memstick_dev *card,
 681					struct memstick_request **out_mrq)
 682{
 683	struct msb_data *msb = memstick_get_drvdata(card);
 684	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 685	struct memstick_host *host = card->host;
 686
 687	if (mrq->error) {
 688		dbg("parallel_switch: error");
 689		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 690		return msb_exit_state_machine(msb, mrq->error);
 691	}
 692
 693	switch (msb->state) {
 694	case MSB_PS_SEND_SWITCH_COMMAND:
 695		/* Set the parallel interface on memstick side */
 696		msb->regs.param.system |= MEMSTICK_SYS_PAM;
 697
 698		if (!msb_write_regs(msb,
 699			offsetof(struct ms_register, param),
 700			1,
 701			(unsigned char *)&msb->regs.param))
 702			return 0;
 703
 704		msb->state = MSB_PS_SWICH_HOST;
 705		return 0;
 706
 707	case MSB_PS_SWICH_HOST:
 708		 /* Set parallel interface on our side + send a dummy request
 709			to see if card responds */
 710		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
 711		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 712		msb->state = MSB_PS_CONFIRM;
 713		return 0;
 714
 715	case MSB_PS_CONFIRM:
 716		return msb_exit_state_machine(msb, 0);
 717	}
 718
 719	BUG();
 720}
 721
 722static int msb_switch_to_parallel(struct msb_data *msb);
 723
 724/* Reset the card, to guard against hw errors beeing treated as bad blocks */
 725static int msb_reset(struct msb_data *msb, bool full)
 726{
 727
 728	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
 729	struct memstick_dev *card = msb->card;
 730	struct memstick_host *host = card->host;
 731	int error;
 732
 733	/* Reset the card */
 734	msb->regs.param.system = MEMSTICK_SYS_BAMD;
 735
 736	if (full) {
 737		error =  host->set_param(host,
 738					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 739		if (error)
 740			goto out_error;
 741
 742		msb_invalidate_reg_window(msb);
 743
 744		error = host->set_param(host,
 745					MEMSTICK_POWER, MEMSTICK_POWER_ON);
 746		if (error)
 747			goto out_error;
 748
 749		error = host->set_param(host,
 750					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
 751		if (error) {
 752out_error:
 753			dbg("Failed to reset the host controller");
 754			msb->read_only = true;
 755			return -EFAULT;
 756		}
 757	}
 758
 759	error = msb_run_state_machine(msb, h_msb_reset);
 760	if (error) {
 761		dbg("Failed to reset the card");
 762		msb->read_only = true;
 763		return -ENODEV;
 764	}
 765
 766	/* Set parallel mode */
 767	if (was_parallel)
 768		msb_switch_to_parallel(msb);
 769	return 0;
 770}
 771
 772/* Attempts to switch interface to parallel mode */
 773static int msb_switch_to_parallel(struct msb_data *msb)
 774{
 775	int error;
 776
 777	error = msb_run_state_machine(msb, h_msb_parallel_switch);
 778	if (error) {
 779		pr_err("Switch to parallel failed");
 780		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 781		msb_reset(msb, true);
 782		return -EFAULT;
 783	}
 784
 785	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
 786	return 0;
 787}
 788
 789/* Changes overwrite flag on a page */
 790static int msb_set_overwrite_flag(struct msb_data *msb,
 791						u16 pba, u8 page, u8 flag)
 792{
 793	if (msb->read_only)
 794		return -EROFS;
 795
 796	msb->regs.param.block_address = cpu_to_be16(pba);
 797	msb->regs.param.page_address = page;
 798	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
 799	msb->regs.extra_data.overwrite_flag = flag;
 800	msb->command_value = MS_CMD_BLOCK_WRITE;
 801	msb->command_need_oob = true;
 802
 803	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
 804							flag, pba, page);
 805	return msb_run_state_machine(msb, h_msb_send_command);
 806}
 807
 808static int msb_mark_bad(struct msb_data *msb, int pba)
 809{
 810	pr_notice("marking pba %d as bad", pba);
 811	msb_reset(msb, true);
 812	return msb_set_overwrite_flag(
 813			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
 814}
 815
 816static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
 817{
 818	dbg("marking page %d of pba %d as bad", page, pba);
 819	msb_reset(msb, true);
 820	return msb_set_overwrite_flag(msb,
 821		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
 822}
 823
 824/* Erases one physical block */
 825static int msb_erase_block(struct msb_data *msb, u16 pba)
 826{
 827	int error, try;
 828	if (msb->read_only)
 829		return -EROFS;
 830
 831	dbg_verbose("erasing pba %d", pba);
 832
 833	for (try = 1; try < 3; try++) {
 834		msb->regs.param.block_address = cpu_to_be16(pba);
 835		msb->regs.param.page_address = 0;
 836		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
 837		msb->command_value = MS_CMD_BLOCK_ERASE;
 838		msb->command_need_oob = false;
 839
 840
 841		error = msb_run_state_machine(msb, h_msb_send_command);
 842		if (!error || msb_reset(msb, true))
 843			break;
 844	}
 845
 846	if (error) {
 847		pr_err("erase failed, marking pba %d as bad", pba);
 848		msb_mark_bad(msb, pba);
 849	}
 850
 851	dbg_verbose("erase success, marking pba %d as unused", pba);
 852	msb_mark_block_unused(msb, pba);
 853	__set_bit(pba, msb->erased_blocks_bitmap);
 854	return error;
 855}
 856
 857/* Reads one page from device */
 858static int msb_read_page(struct msb_data *msb,
 859	u16 pba, u8 page, struct ms_extra_data_register *extra,
 860					struct scatterlist *sg,  int offset)
 861{
 862	int try, error;
 863
 864	if (pba == MS_BLOCK_INVALID) {
 865		unsigned long flags;
 866		struct sg_mapping_iter miter;
 867		size_t len = msb->page_size;
 868
 869		dbg_verbose("read unmapped sector. returning 0xFF");
 870
 871		local_irq_save(flags);
 872		sg_miter_start(&miter, sg, sg_nents(sg),
 873				SG_MITER_ATOMIC | SG_MITER_TO_SG);
 874
 875		while (sg_miter_next(&miter) && len > 0) {
 876
 877			int chunklen;
 878
 879			if (offset && offset >= miter.length) {
 880				offset -= miter.length;
 881				continue;
 882			}
 883
 884			chunklen = min(miter.length - offset, len);
 885			memset(miter.addr + offset, 0xFF, chunklen);
 886			len -= chunklen;
 887			offset = 0;
 888		}
 889
 890		sg_miter_stop(&miter);
 891		local_irq_restore(flags);
 892
 893		if (offset)
 894			return -EFAULT;
 895
 896		if (extra)
 897			memset(extra, 0xFF, sizeof(*extra));
 898		return 0;
 899	}
 900
 901	if (pba >= msb->block_count) {
 902		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
 903		return -EINVAL;
 904	}
 905
 906	for (try = 1; try < 3; try++) {
 907		msb->regs.param.block_address = cpu_to_be16(pba);
 908		msb->regs.param.page_address = page;
 909		msb->regs.param.cp = MEMSTICK_CP_PAGE;
 910
 911		msb->current_sg = sg;
 912		msb->current_sg_offset = offset;
 913		error = msb_run_state_machine(msb, h_msb_read_page);
 914
 915
 916		if (error == -EUCLEAN) {
 917			pr_notice("correctable error on pba %d, page %d",
 918				pba, page);
 919			error = 0;
 920		}
 921
 922		if (!error && extra)
 923			*extra = msb->regs.extra_data;
 924
 925		if (!error || msb_reset(msb, true))
 926			break;
 927
 928	}
 929
 930	/* Mark bad pages */
 931	if (error == -EBADMSG) {
 932		pr_err("uncorrectable error on read of pba %d, page %d",
 933			pba, page);
 934
 935		if (msb->regs.extra_data.overwrite_flag &
 936					MEMSTICK_OVERWRITE_PGST0)
 937			msb_mark_page_bad(msb, pba, page);
 938		return -EBADMSG;
 939	}
 940
 941	if (error)
 942		pr_err("read of pba %d, page %d failed with error %d",
 943			pba, page, error);
 944	return error;
 945}
 946
 947/* Reads oob of page only */
 948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
 949	struct ms_extra_data_register *extra)
 950{
 951	int error;
 952
 953	BUG_ON(!extra);
 954	msb->regs.param.block_address = cpu_to_be16(pba);
 955	msb->regs.param.page_address = page;
 956	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
 957
 958	if (pba > msb->block_count) {
 959		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
 960		return -EINVAL;
 961	}
 962
 963	error = msb_run_state_machine(msb, h_msb_read_page);
 964	*extra = msb->regs.extra_data;
 965
 966	if (error == -EUCLEAN) {
 967		pr_notice("correctable error on pba %d, page %d",
 968			pba, page);
 969		return 0;
 970	}
 971
 972	return error;
 973}
 974
 975/* Reads a block and compares it with data contained in scatterlist orig_sg */
 976static int msb_verify_block(struct msb_data *msb, u16 pba,
 977				struct scatterlist *orig_sg,  int offset)
 978{
 979	struct scatterlist sg;
 980	int page = 0, error;
 981
 982	sg_init_one(&sg, msb->block_buffer, msb->block_size);
 983
 984	while (page < msb->pages_in_block) {
 985
 986		error = msb_read_page(msb, pba, page,
 987				NULL, &sg, page * msb->page_size);
 988		if (error)
 989			return error;
 990		page++;
 991	}
 992
 993	if (msb_sg_compare_to_buffer(orig_sg, offset,
 994				msb->block_buffer, msb->block_size))
 995		return -EIO;
 996	return 0;
 997}
 998
 999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001			u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003	int error, current_try = 1;
1004	BUG_ON(sg->length < msb->page_size);
1005
1006	if (msb->read_only)
1007		return -EROFS;
1008
1009	if (pba == MS_BLOCK_INVALID) {
1010		pr_err(
1011			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1012		return -EINVAL;
1013	}
1014
1015	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1016		pr_err(
1017		"BUG: write: attempt to write beyond the end of device");
1018		return -EINVAL;
1019	}
1020
1021	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1022		pr_err("BUG: write: lba zone mismatch");
1023		return -EINVAL;
1024	}
1025
1026	if (pba == msb->boot_block_locations[0] ||
1027		pba == msb->boot_block_locations[1]) {
1028		pr_err("BUG: write: attempt to write to boot blocks!");
1029		return -EINVAL;
1030	}
1031
1032	while (1) {
1033
1034		if (msb->read_only)
1035			return -EROFS;
1036
1037		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1038		msb->regs.param.page_address = 0;
1039		msb->regs.param.block_address = cpu_to_be16(pba);
1040
1041		msb->regs.extra_data.management_flag = 0xFF;
1042		msb->regs.extra_data.overwrite_flag = 0xF8;
1043		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1044
1045		msb->current_sg = sg;
1046		msb->current_sg_offset = offset;
1047		msb->current_page = 0;
1048
1049		error = msb_run_state_machine(msb, h_msb_write_block);
1050
1051		/* Sector we just wrote to is assumed erased since its pba
1052			was erased. If it wasn't erased, write will succeed
1053			and will just clear the bits that were set in the block
1054			thus test that what we have written,
1055			matches what we expect.
1056			We do trust the blocks that we erased */
1057		if (!error && (verify_writes ||
1058				!test_bit(pba, msb->erased_blocks_bitmap)))
1059			error = msb_verify_block(msb, pba, sg, offset);
1060
1061		if (!error)
1062			break;
1063
1064		if (current_try > 1 || msb_reset(msb, true))
1065			break;
1066
1067		pr_err("write failed, trying to erase the pba %d", pba);
1068		error = msb_erase_block(msb, pba);
1069		if (error)
1070			break;
1071
1072		current_try++;
1073	}
1074	return error;
1075}
1076
1077/* Finds a free block for write replacement */
1078static u16 msb_get_free_block(struct msb_data *msb, int zone)
1079{
1080	u16 pos;
1081	int pba = zone * MS_BLOCKS_IN_ZONE;
1082	int i;
1083
1084	get_random_bytes(&pos, sizeof(pos));
1085
1086	if (!msb->free_block_count[zone]) {
1087		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1088		msb->read_only = true;
1089		return MS_BLOCK_INVALID;
1090	}
1091
1092	pos %= msb->free_block_count[zone];
1093
1094	dbg_verbose("have %d choices for a free block, selected randomally: %d",
1095		msb->free_block_count[zone], pos);
1096
1097	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1098							msb->block_count, pba);
1099	for (i = 0; i < pos; ++i)
1100		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1101						msb->block_count, pba + 1);
1102
1103	dbg_verbose("result of the free blocks scan: pba %d", pba);
1104
1105	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1106		pr_err("BUG: cant get a free block");
1107		msb->read_only = true;
1108		return MS_BLOCK_INVALID;
1109	}
1110
1111	msb_mark_block_used(msb, pba);
1112	return pba;
1113}
1114
1115static int msb_update_block(struct msb_data *msb, u16 lba,
1116	struct scatterlist *sg, int offset)
1117{
1118	u16 pba, new_pba;
1119	int error, try;
1120
1121	pba = msb->lba_to_pba_table[lba];
1122	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1123
1124	if (pba != MS_BLOCK_INVALID) {
1125		dbg_verbose("setting the update flag on the block");
1126		msb_set_overwrite_flag(msb, pba, 0,
1127				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1128	}
1129
1130	for (try = 0; try < 3; try++) {
1131		new_pba = msb_get_free_block(msb,
1132			msb_get_zone_from_lba(lba));
1133
1134		if (new_pba == MS_BLOCK_INVALID) {
1135			error = -EIO;
1136			goto out;
1137		}
1138
1139		dbg_verbose("block update: writing updated block to the pba %d",
1140								new_pba);
1141		error = msb_write_block(msb, new_pba, lba, sg, offset);
1142		if (error == -EBADMSG) {
1143			msb_mark_bad(msb, new_pba);
1144			continue;
1145		}
1146
1147		if (error)
1148			goto out;
1149
1150		dbg_verbose("block update: erasing the old block");
1151		msb_erase_block(msb, pba);
1152		msb->lba_to_pba_table[lba] = new_pba;
1153		return 0;
1154	}
1155out:
1156	if (error) {
1157		pr_err("block update error after %d tries,  switching to r/o mode", try);
1158		msb->read_only = true;
1159	}
1160	return error;
1161}
1162
1163/* Converts endiannes in the boot block for easy use */
1164static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1165{
1166	p->header.block_id = be16_to_cpu(p->header.block_id);
1167	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1168	p->entry.disabled_block.start_addr
1169		= be32_to_cpu(p->entry.disabled_block.start_addr);
1170	p->entry.disabled_block.data_size
1171		= be32_to_cpu(p->entry.disabled_block.data_size);
1172	p->entry.cis_idi.start_addr
1173		= be32_to_cpu(p->entry.cis_idi.start_addr);
1174	p->entry.cis_idi.data_size
1175		= be32_to_cpu(p->entry.cis_idi.data_size);
1176	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1177	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1178	p->attr.number_of_effective_blocks
1179		= be16_to_cpu(p->attr.number_of_effective_blocks);
1180	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1181	p->attr.memory_manufacturer_code
1182		= be16_to_cpu(p->attr.memory_manufacturer_code);
1183	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1184	p->attr.implemented_capacity
1185		= be16_to_cpu(p->attr.implemented_capacity);
1186	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1187	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1188}
1189
1190static int msb_read_boot_blocks(struct msb_data *msb)
1191{
1192	int pba = 0;
1193	struct scatterlist sg;
1194	struct ms_extra_data_register extra;
1195	struct ms_boot_page *page;
1196
1197	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1198	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1199	msb->boot_block_count = 0;
1200
1201	dbg_verbose("Start of a scan for the boot blocks");
1202
1203	if (!msb->boot_page) {
1204		page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
1205		if (!page)
1206			return -ENOMEM;
1207
1208		msb->boot_page = page;
1209	} else
1210		page = msb->boot_page;
1211
1212	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1213
1214	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1215
1216		sg_init_one(&sg, page, sizeof(*page));
1217		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1218			dbg("boot scan: can't read pba %d", pba);
1219			continue;
1220		}
1221
1222		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1223			dbg("managment flag doesn't indicate boot block %d",
1224									pba);
1225			continue;
1226		}
1227
1228		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1229			dbg("the pba at %d doesn' contain boot block ID", pba);
1230			continue;
1231		}
1232
1233		msb_fix_boot_page_endianness(page);
1234		msb->boot_block_locations[msb->boot_block_count] = pba;
1235
1236		page++;
1237		msb->boot_block_count++;
1238
1239		if (msb->boot_block_count == 2)
1240			break;
1241	}
1242
1243	if (!msb->boot_block_count) {
1244		pr_err("media doesn't contain master page, aborting");
1245		return -EIO;
1246	}
1247
1248	dbg_verbose("End of scan for boot blocks");
1249	return 0;
1250}
1251
1252static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1253{
1254	struct ms_boot_page *boot_block;
1255	struct scatterlist sg;
1256	u16 *buffer = NULL;
1257	int offset = 0;
1258	int i, error = 0;
1259	int data_size, data_offset, page, page_offset, size_to_read;
1260	u16 pba;
1261
1262	BUG_ON(block_nr > 1);
1263	boot_block = &msb->boot_page[block_nr];
1264	pba = msb->boot_block_locations[block_nr];
1265
1266	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1267		return -EINVAL;
1268
1269	data_size = boot_block->entry.disabled_block.data_size;
1270	data_offset = sizeof(struct ms_boot_page) +
1271			boot_block->entry.disabled_block.start_addr;
1272	if (!data_size)
1273		return 0;
1274
1275	page = data_offset / msb->page_size;
1276	page_offset = data_offset % msb->page_size;
1277	size_to_read =
1278		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1279			msb->page_size;
1280
1281	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1282		pba, data_offset, data_size);
1283
1284	buffer = kzalloc(size_to_read, GFP_KERNEL);
1285	if (!buffer)
1286		return -ENOMEM;
1287
1288	/* Read the buffer */
1289	sg_init_one(&sg, buffer, size_to_read);
1290
1291	while (offset < size_to_read) {
1292		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1293		if (error)
1294			goto out;
1295
1296		page++;
1297		offset += msb->page_size;
1298
1299		if (page == msb->pages_in_block) {
1300			pr_err(
1301			"bad block table extends beyond the boot block");
1302			break;
1303		}
1304	}
1305
1306	/* Process the bad block table */
1307	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1308
1309		u16 bad_block = be16_to_cpu(buffer[i]);
1310
1311		if (bad_block >= msb->block_count) {
1312			dbg("bad block table contains invalid block %d",
1313								bad_block);
1314			continue;
1315		}
1316
1317		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1318			dbg("duplicate bad block %d in the table",
1319				bad_block);
1320			continue;
1321		}
1322
1323		dbg("block %d is marked as factory bad", bad_block);
1324		msb_mark_block_used(msb, bad_block);
1325	}
1326out:
1327	kfree(buffer);
1328	return error;
1329}
1330
1331static int msb_ftl_initialize(struct msb_data *msb)
1332{
1333	int i;
1334
1335	if (msb->ftl_initialized)
1336		return 0;
1337
1338	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1339	msb->logical_block_count = msb->zone_count * 496 - 2;
1340
1341	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1342	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1343	msb->lba_to_pba_table =
1344		kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
1345
1346	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1347						!msb->erased_blocks_bitmap) {
1348		kfree(msb->used_blocks_bitmap);
1349		kfree(msb->lba_to_pba_table);
1350		kfree(msb->erased_blocks_bitmap);
1351		return -ENOMEM;
1352	}
1353
1354	for (i = 0; i < msb->zone_count; i++)
1355		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1356
1357	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1358			msb->logical_block_count * sizeof(u16));
1359
1360	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1361		msb->zone_count, msb->logical_block_count);
1362
1363	msb->ftl_initialized = true;
1364	return 0;
1365}
1366
1367static int msb_ftl_scan(struct msb_data *msb)
1368{
1369	u16 pba, lba, other_block;
1370	u8 overwrite_flag, managment_flag, other_overwrite_flag;
1371	int error;
1372	struct ms_extra_data_register extra;
1373	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1374
1375	if (!overwrite_flags)
1376		return -ENOMEM;
1377
1378	dbg("Start of media scanning");
1379	for (pba = 0; pba < msb->block_count; pba++) {
1380
1381		if (pba == msb->boot_block_locations[0] ||
1382			pba == msb->boot_block_locations[1]) {
1383			dbg_verbose("pba %05d -> [boot block]", pba);
1384			msb_mark_block_used(msb, pba);
1385			continue;
1386		}
1387
1388		if (test_bit(pba, msb->used_blocks_bitmap)) {
1389			dbg_verbose("pba %05d -> [factory bad]", pba);
1390			continue;
1391		}
1392
1393		memset(&extra, 0, sizeof(extra));
1394		error = msb_read_oob(msb, pba, 0, &extra);
1395
1396		/* can't trust the page if we can't read the oob */
1397		if (error == -EBADMSG) {
1398			pr_notice(
1399			"oob of pba %d damaged, will try to erase it", pba);
1400			msb_mark_block_used(msb, pba);
1401			msb_erase_block(msb, pba);
1402			continue;
1403		} else if (error) {
1404			pr_err("unknown error %d on read of oob of pba %d - aborting",
1405				error, pba);
1406
1407			kfree(overwrite_flags);
1408			return error;
1409		}
1410
1411		lba = be16_to_cpu(extra.logical_address);
1412		managment_flag = extra.management_flag;
1413		overwrite_flag = extra.overwrite_flag;
1414		overwrite_flags[pba] = overwrite_flag;
1415
1416		/* Skip bad blocks */
1417		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1418			dbg("pba %05d -> [BAD]", pba);
1419			msb_mark_block_used(msb, pba);
1420			continue;
1421		}
1422
1423		/* Skip system/drm blocks */
1424		if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
1425			MEMSTICK_MANAGMENT_FLAG_NORMAL) {
1426			dbg("pba %05d -> [reserved managment flag %02x]",
1427							pba, managment_flag);
1428			msb_mark_block_used(msb, pba);
1429			continue;
1430		}
1431
1432		/* Erase temporary tables */
1433		if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1434			dbg("pba %05d -> [temp table] - will erase", pba);
1435
1436			msb_mark_block_used(msb, pba);
1437			msb_erase_block(msb, pba);
1438			continue;
1439		}
1440
1441		if (lba == MS_BLOCK_INVALID) {
1442			dbg_verbose("pba %05d -> [free]", pba);
1443			continue;
1444		}
1445
1446		msb_mark_block_used(msb, pba);
1447
1448		/* Block has LBA not according to zoning*/
1449		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1450			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1451								pba, lba);
1452			msb_erase_block(msb, pba);
1453			continue;
1454		}
1455
1456		/* No collisions - great */
1457		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1458			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1459			msb->lba_to_pba_table[lba] = pba;
1460			continue;
1461		}
1462
1463		other_block = msb->lba_to_pba_table[lba];
1464		other_overwrite_flag = overwrite_flags[other_block];
1465
1466		pr_notice("Collision between pba %d and pba %d",
1467			pba, other_block);
1468
1469		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1470			pr_notice("pba %d is marked as stable, use it", pba);
1471			msb_erase_block(msb, other_block);
1472			msb->lba_to_pba_table[lba] = pba;
1473			continue;
1474		}
1475
1476		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1477			pr_notice("pba %d is marked as stable, use it",
1478								other_block);
1479			msb_erase_block(msb, pba);
1480			continue;
1481		}
1482
1483		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1484				pba, other_block, other_block);
1485
1486		msb_erase_block(msb, other_block);
1487		msb->lba_to_pba_table[lba] = pba;
1488	}
1489
1490	dbg("End of media scanning");
1491	kfree(overwrite_flags);
1492	return 0;
1493}
1494
1495static void msb_cache_flush_timer(unsigned long data)
1496{
1497	struct msb_data *msb = (struct msb_data *)data;
1498	msb->need_flush_cache = true;
1499	queue_work(msb->io_queue, &msb->io_work);
1500}
1501
1502
1503static void msb_cache_discard(struct msb_data *msb)
1504{
1505	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1506		return;
1507
1508	del_timer_sync(&msb->cache_flush_timer);
1509
1510	dbg_verbose("Discarding the write cache");
1511	msb->cache_block_lba = MS_BLOCK_INVALID;
1512	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1513}
1514
1515static int msb_cache_init(struct msb_data *msb)
1516{
1517	setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
1518		(unsigned long)msb);
1519
1520	if (!msb->cache)
1521		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1522	if (!msb->cache)
1523		return -ENOMEM;
1524
1525	msb_cache_discard(msb);
1526	return 0;
1527}
1528
1529static int msb_cache_flush(struct msb_data *msb)
1530{
1531	struct scatterlist sg;
1532	struct ms_extra_data_register extra;
1533	int page, offset, error;
1534	u16 pba, lba;
1535
1536	if (msb->read_only)
1537		return -EROFS;
1538
1539	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1540		return 0;
1541
1542	lba = msb->cache_block_lba;
1543	pba = msb->lba_to_pba_table[lba];
1544
1545	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1546						pba, msb->cache_block_lba);
1547
1548	sg_init_one(&sg, msb->cache , msb->block_size);
1549
1550	/* Read all missing pages in cache */
1551	for (page = 0; page < msb->pages_in_block; page++) {
1552
1553		if (test_bit(page, &msb->valid_cache_bitmap))
1554			continue;
1555
1556		offset = page * msb->page_size;
1557
1558		dbg_verbose("reading non-present sector %d of cache block %d",
1559			page, lba);
1560		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1561
1562		/* Bad pages are copied with 00 page status */
1563		if (error == -EBADMSG) {
1564			pr_err("read error on sector %d, contents probably damaged", page);
1565			continue;
1566		}
1567
1568		if (error)
1569			return error;
1570
1571		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1572							MEMSTICK_OV_PG_NORMAL) {
1573			dbg("page %d is marked as bad", page);
1574			continue;
1575		}
1576
1577		set_bit(page, &msb->valid_cache_bitmap);
1578	}
1579
1580	/* Write the cache now */
1581	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1582	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1583
1584	/* Mark invalid pages */
1585	if (!error) {
1586		for (page = 0; page < msb->pages_in_block; page++) {
1587
1588			if (test_bit(page, &msb->valid_cache_bitmap))
1589				continue;
1590
1591			dbg("marking page %d as containing damaged data",
1592				page);
1593			msb_set_overwrite_flag(msb,
1594				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1595		}
1596	}
1597
1598	msb_cache_discard(msb);
1599	return error;
1600}
1601
1602static int msb_cache_write(struct msb_data *msb, int lba,
1603	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1604{
1605	int error;
1606	struct scatterlist sg_tmp[10];
1607
1608	if (msb->read_only)
1609		return -EROFS;
1610
1611	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1612						lba != msb->cache_block_lba)
1613		if (add_to_cache_only)
1614			return 0;
1615
1616	/* If we need to write different block */
1617	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1618						lba != msb->cache_block_lba) {
1619		dbg_verbose("first flush the cache");
1620		error = msb_cache_flush(msb);
1621		if (error)
1622			return error;
1623	}
1624
1625	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1626		msb->cache_block_lba  = lba;
1627		mod_timer(&msb->cache_flush_timer,
1628			jiffies + msecs_to_jiffies(cache_flush_timeout));
1629	}
1630
1631	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1632
1633	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1634	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1635
1636	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1637		msb->cache + page * msb->page_size, msb->page_size);
1638
1639	set_bit(page, &msb->valid_cache_bitmap);
1640	return 0;
1641}
1642
1643static int msb_cache_read(struct msb_data *msb, int lba,
1644				int page, struct scatterlist *sg, int offset)
1645{
1646	int pba = msb->lba_to_pba_table[lba];
1647	struct scatterlist sg_tmp[10];
1648	int error = 0;
1649
1650	if (lba == msb->cache_block_lba &&
1651			test_bit(page, &msb->valid_cache_bitmap)) {
1652
1653		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1654							lba, pba, page);
1655
1656		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1657		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1658			offset, msb->page_size);
1659		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1660			msb->cache + msb->page_size * page,
1661							msb->page_size);
1662	} else {
1663		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1664							lba, pba, page);
1665
1666		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1667		if (error)
1668			return error;
1669
1670		msb_cache_write(msb, lba, page, true, sg, offset);
1671	}
1672	return error;
1673}
1674
1675/* Emulated geometry table
1676 * This table content isn't that importaint,
1677 * One could put here different values, providing that they still
1678 * cover whole disk.
1679 * 64 MB entry is what windows reports for my 64M memstick */
1680
1681static const struct chs_entry chs_table[] = {
1682/*        size sectors cylynders  heads */
1683	{ 4,    16,    247,       2  },
1684	{ 8,    16,    495,       2  },
1685	{ 16,   16,    495,       4  },
1686	{ 32,   16,    991,       4  },
1687	{ 64,   16,    991,       8  },
1688	{128,   16,    991,       16 },
1689	{ 0 }
1690};
1691
1692/* Load information about the card */
1693static int msb_init_card(struct memstick_dev *card)
1694{
1695	struct msb_data *msb = memstick_get_drvdata(card);
1696	struct memstick_host *host = card->host;
1697	struct ms_boot_page *boot_block;
1698	int error = 0, i, raw_size_in_megs;
1699
1700	msb->caps = 0;
1701
1702	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1703				card->id.class <= MEMSTICK_CLASS_ROM)
1704		msb->read_only = true;
1705
1706	msb->state = -1;
1707	error = msb_reset(msb, false);
1708	if (error)
1709		return error;
1710
1711	/* Due to a bug in Jmicron driver written by Alex Dubov,
1712	 its serial mode barely works,
1713	 so we switch to parallel mode right away */
1714	if (host->caps & MEMSTICK_CAP_PAR4)
1715		msb_switch_to_parallel(msb);
1716
1717	msb->page_size = sizeof(struct ms_boot_page);
1718
1719	/* Read the boot page */
1720	error = msb_read_boot_blocks(msb);
1721	if (error)
1722		return -EIO;
1723
1724	boot_block = &msb->boot_page[0];
1725
1726	/* Save intersting attributes from boot page */
1727	msb->block_count = boot_block->attr.number_of_blocks;
1728	msb->page_size = boot_block->attr.page_size;
1729
1730	msb->pages_in_block = boot_block->attr.block_size * 2;
1731	msb->block_size = msb->page_size * msb->pages_in_block;
1732
1733	if (msb->page_size > PAGE_SIZE) {
1734		/* this isn't supported by linux at all, anyway*/
1735		dbg("device page %d size isn't supported", msb->page_size);
1736		return -EINVAL;
1737	}
1738
1739	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1740	if (!msb->block_buffer)
1741		return -ENOMEM;
1742
1743	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1744
1745	for (i = 0; chs_table[i].size; i++) {
1746
1747		if (chs_table[i].size != raw_size_in_megs)
1748			continue;
1749
1750		msb->geometry.cylinders = chs_table[i].cyl;
1751		msb->geometry.heads = chs_table[i].head;
1752		msb->geometry.sectors = chs_table[i].sec;
1753		break;
1754	}
1755
1756	if (boot_block->attr.transfer_supporting == 1)
1757		msb->caps |= MEMSTICK_CAP_PAR4;
1758
1759	if (boot_block->attr.device_type & 0x03)
1760		msb->read_only = true;
1761
1762	dbg("Total block count = %d", msb->block_count);
1763	dbg("Each block consists of %d pages", msb->pages_in_block);
1764	dbg("Page size = %d bytes", msb->page_size);
1765	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1766	dbg("Read only: %d", msb->read_only);
1767
1768#if 0
1769	/* Now we can switch the interface */
1770	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1771		msb_switch_to_parallel(msb);
1772#endif
1773
1774	error = msb_cache_init(msb);
1775	if (error)
1776		return error;
1777
1778	error = msb_ftl_initialize(msb);
1779	if (error)
1780		return error;
1781
1782
1783	/* Read the bad block table */
1784	error = msb_read_bad_block_table(msb, 0);
1785
1786	if (error && error != -ENOMEM) {
1787		dbg("failed to read bad block table from primary boot block, trying from backup");
1788		error = msb_read_bad_block_table(msb, 1);
1789	}
1790
1791	if (error)
1792		return error;
1793
1794	/* *drum roll* Scan the media */
1795	error = msb_ftl_scan(msb);
1796	if (error) {
1797		pr_err("Scan of media failed");
1798		return error;
1799	}
1800
1801	return 0;
1802
1803}
1804
1805static int msb_do_write_request(struct msb_data *msb, int lba,
1806	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1807{
1808	int error = 0;
1809	off_t offset = 0;
1810	*sucessfuly_written = 0;
1811
1812	while (offset < len) {
1813		if (page == 0 && len - offset >= msb->block_size) {
1814
1815			if (msb->cache_block_lba == lba)
1816				msb_cache_discard(msb);
1817
1818			dbg_verbose("Writing whole lba %d", lba);
1819			error = msb_update_block(msb, lba, sg, offset);
1820			if (error)
1821				return error;
1822
1823			offset += msb->block_size;
1824			*sucessfuly_written += msb->block_size;
1825			lba++;
1826			continue;
1827		}
1828
1829		error = msb_cache_write(msb, lba, page, false, sg, offset);
1830		if (error)
1831			return error;
1832
1833		offset += msb->page_size;
1834		*sucessfuly_written += msb->page_size;
1835
1836		page++;
1837		if (page == msb->pages_in_block) {
1838			page = 0;
1839			lba++;
1840		}
1841	}
1842	return 0;
1843}
1844
1845static int msb_do_read_request(struct msb_data *msb, int lba,
1846		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1847{
1848	int error = 0;
1849	int offset = 0;
1850	*sucessfuly_read = 0;
1851
1852	while (offset < len) {
1853
1854		error = msb_cache_read(msb, lba, page, sg, offset);
1855		if (error)
1856			return error;
1857
1858		offset += msb->page_size;
1859		*sucessfuly_read += msb->page_size;
1860
1861		page++;
1862		if (page == msb->pages_in_block) {
1863			page = 0;
1864			lba++;
1865		}
1866	}
1867	return 0;
1868}
1869
1870static void msb_io_work(struct work_struct *work)
1871{
1872	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1873	int page, error, len;
1874	sector_t lba;
1875	unsigned long flags;
1876	struct scatterlist *sg = msb->prealloc_sg;
1877
1878	dbg_verbose("IO: work started");
1879
1880	while (1) {
1881		spin_lock_irqsave(&msb->q_lock, flags);
1882
1883		if (msb->need_flush_cache) {
1884			msb->need_flush_cache = false;
1885			spin_unlock_irqrestore(&msb->q_lock, flags);
1886			msb_cache_flush(msb);
1887			continue;
1888		}
1889
1890		if (!msb->req) {
1891			msb->req = blk_fetch_request(msb->queue);
1892			if (!msb->req) {
1893				dbg_verbose("IO: no more requests exiting");
1894				spin_unlock_irqrestore(&msb->q_lock, flags);
1895				return;
1896			}
1897		}
1898
1899		spin_unlock_irqrestore(&msb->q_lock, flags);
1900
1901		/* If card was removed meanwhile */
1902		if (!msb->req)
1903			return;
1904
1905		/* process the request */
1906		dbg_verbose("IO: processing new request");
1907		blk_rq_map_sg(msb->queue, msb->req, sg);
1908
1909		lba = blk_rq_pos(msb->req);
1910
1911		sector_div(lba, msb->page_size / 512);
1912		page = sector_div(lba, msb->pages_in_block);
1913
1914		if (rq_data_dir(msb->req) == READ)
1915			error = msb_do_read_request(msb, lba, page, sg,
1916				blk_rq_bytes(msb->req), &len);
1917		else
1918			error = msb_do_write_request(msb, lba, page, sg,
1919				blk_rq_bytes(msb->req), &len);
1920
1921		spin_lock_irqsave(&msb->q_lock, flags);
1922
1923		if (len)
1924			if (!__blk_end_request(msb->req, 0, len))
1925				msb->req = NULL;
1926
1927		if (error && msb->req) {
 
1928			dbg_verbose("IO: ending one sector of the request with error");
1929			if (!__blk_end_request(msb->req, error, msb->page_size))
1930				msb->req = NULL;
1931		}
1932
1933		if (msb->req)
1934			dbg_verbose("IO: request still pending");
1935
1936		spin_unlock_irqrestore(&msb->q_lock, flags);
1937	}
1938}
1939
1940static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1941static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1942
1943static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1944{
1945	struct gendisk *disk = bdev->bd_disk;
1946	struct msb_data *msb = disk->private_data;
1947
1948	dbg_verbose("block device open");
1949
1950	mutex_lock(&msb_disk_lock);
1951
1952	if (msb && msb->card)
1953		msb->usage_count++;
1954
1955	mutex_unlock(&msb_disk_lock);
1956	return 0;
1957}
1958
1959static void msb_data_clear(struct msb_data *msb)
1960{
1961	kfree(msb->boot_page);
1962	kfree(msb->used_blocks_bitmap);
1963	kfree(msb->lba_to_pba_table);
1964	kfree(msb->cache);
1965	msb->card = NULL;
1966}
1967
1968static int msb_disk_release(struct gendisk *disk)
1969{
1970	struct msb_data *msb = disk->private_data;
1971
1972	dbg_verbose("block device release");
1973	mutex_lock(&msb_disk_lock);
1974
1975	if (msb) {
1976		if (msb->usage_count)
1977			msb->usage_count--;
1978
1979		if (!msb->usage_count) {
1980			disk->private_data = NULL;
1981			idr_remove(&msb_disk_idr, msb->disk_id);
1982			put_disk(disk);
1983			kfree(msb);
1984		}
1985	}
1986	mutex_unlock(&msb_disk_lock);
1987	return 0;
1988}
1989
1990static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1991{
1992	msb_disk_release(disk);
1993}
1994
1995static int msb_bd_getgeo(struct block_device *bdev,
1996				 struct hd_geometry *geo)
1997{
1998	struct msb_data *msb = bdev->bd_disk->private_data;
1999	*geo = msb->geometry;
2000	return 0;
2001}
2002
2003static int msb_prepare_req(struct request_queue *q, struct request *req)
2004{
2005	if (req->cmd_type != REQ_TYPE_FS &&
2006				req->cmd_type != REQ_TYPE_BLOCK_PC) {
2007		blk_dump_rq_flags(req, "MS unsupported request");
2008		return BLKPREP_KILL;
2009	}
2010	req->cmd_flags |= REQ_DONTPREP;
2011	return BLKPREP_OK;
2012}
2013
2014static void msb_submit_req(struct request_queue *q)
2015{
2016	struct memstick_dev *card = q->queuedata;
2017	struct msb_data *msb = memstick_get_drvdata(card);
2018	struct request *req = NULL;
2019
2020	dbg_verbose("Submit request");
2021
2022	if (msb->card_dead) {
2023		dbg("Refusing requests on removed card");
2024
2025		WARN_ON(!msb->io_queue_stopped);
2026
2027		while ((req = blk_fetch_request(q)) != NULL)
2028			__blk_end_request_all(req, -ENODEV);
2029		return;
2030	}
2031
2032	if (msb->req)
2033		return;
2034
2035	if (!msb->io_queue_stopped)
2036		queue_work(msb->io_queue, &msb->io_work);
2037}
2038
2039static int msb_check_card(struct memstick_dev *card)
2040{
2041	struct msb_data *msb = memstick_get_drvdata(card);
2042	return (msb->card_dead == 0);
2043}
2044
2045static void msb_stop(struct memstick_dev *card)
2046{
2047	struct msb_data *msb = memstick_get_drvdata(card);
2048	unsigned long flags;
2049
2050	dbg("Stopping all msblock IO");
2051
2052	spin_lock_irqsave(&msb->q_lock, flags);
2053	blk_stop_queue(msb->queue);
2054	msb->io_queue_stopped = true;
2055	spin_unlock_irqrestore(&msb->q_lock, flags);
2056
2057	del_timer_sync(&msb->cache_flush_timer);
2058	flush_workqueue(msb->io_queue);
2059
2060	if (msb->req) {
2061		spin_lock_irqsave(&msb->q_lock, flags);
2062		blk_requeue_request(msb->queue, msb->req);
2063		msb->req = NULL;
2064		spin_unlock_irqrestore(&msb->q_lock, flags);
2065	}
2066
2067}
2068
2069static void msb_start(struct memstick_dev *card)
2070{
2071	struct msb_data *msb = memstick_get_drvdata(card);
2072	unsigned long flags;
2073
2074	dbg("Resuming IO from msblock");
2075
2076	msb_invalidate_reg_window(msb);
2077
2078	spin_lock_irqsave(&msb->q_lock, flags);
2079	if (!msb->io_queue_stopped || msb->card_dead) {
2080		spin_unlock_irqrestore(&msb->q_lock, flags);
2081		return;
2082	}
2083	spin_unlock_irqrestore(&msb->q_lock, flags);
2084
2085	/* Kick cache flush anyway, its harmless */
2086	msb->need_flush_cache = true;
2087	msb->io_queue_stopped = false;
2088
2089	spin_lock_irqsave(&msb->q_lock, flags);
2090	blk_start_queue(msb->queue);
2091	spin_unlock_irqrestore(&msb->q_lock, flags);
2092
2093	queue_work(msb->io_queue, &msb->io_work);
2094
2095}
2096
2097static const struct block_device_operations msb_bdops = {
2098	.open    = msb_bd_open,
2099	.release = msb_bd_release,
2100	.getgeo  = msb_bd_getgeo,
2101	.owner   = THIS_MODULE
2102};
2103
2104/* Registers the block device */
2105static int msb_init_disk(struct memstick_dev *card)
2106{
2107	struct msb_data *msb = memstick_get_drvdata(card);
2108	struct memstick_host *host = card->host;
2109	int rc;
2110	u64 limit = BLK_BOUNCE_HIGH;
2111	unsigned long capacity;
2112
2113	if (host->dev.dma_mask && *(host->dev.dma_mask))
2114		limit = *(host->dev.dma_mask);
2115
2116	mutex_lock(&msb_disk_lock);
2117	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2118	mutex_unlock(&msb_disk_lock);
2119
2120	if (msb->disk_id  < 0)
2121		return msb->disk_id;
2122
2123	msb->disk = alloc_disk(0);
2124	if (!msb->disk) {
2125		rc = -ENOMEM;
2126		goto out_release_id;
2127	}
2128
2129	msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2130	if (!msb->queue) {
2131		rc = -ENOMEM;
2132		goto out_put_disk;
2133	}
2134
2135	msb->queue->queuedata = card;
2136	blk_queue_prep_rq(msb->queue, msb_prepare_req);
2137
2138	blk_queue_bounce_limit(msb->queue, limit);
2139	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2140	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2141	blk_queue_max_segment_size(msb->queue,
2142				   MS_BLOCK_MAX_PAGES * msb->page_size);
2143	blk_queue_logical_block_size(msb->queue, msb->page_size);
2144
2145	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2146	msb->disk->fops = &msb_bdops;
2147	msb->disk->private_data = msb;
2148	msb->disk->queue = msb->queue;
2149	msb->disk->driverfs_dev = &card->dev;
2150	msb->disk->flags |= GENHD_FL_EXT_DEVT;
2151
2152	capacity = msb->pages_in_block * msb->logical_block_count;
2153	capacity *= (msb->page_size / 512);
2154	set_capacity(msb->disk, capacity);
2155	dbg("Set total disk size to %lu sectors", capacity);
2156
2157	msb->usage_count = 1;
2158	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2159	INIT_WORK(&msb->io_work, msb_io_work);
2160	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2161
2162	if (msb->read_only)
2163		set_disk_ro(msb->disk, 1);
2164
2165	msb_start(card);
2166	add_disk(msb->disk);
2167	dbg("Disk added");
2168	return 0;
2169
2170out_put_disk:
2171	put_disk(msb->disk);
2172out_release_id:
2173	mutex_lock(&msb_disk_lock);
2174	idr_remove(&msb_disk_idr, msb->disk_id);
2175	mutex_unlock(&msb_disk_lock);
2176	return rc;
2177}
2178
2179static int msb_probe(struct memstick_dev *card)
2180{
2181	struct msb_data *msb;
2182	int rc = 0;
2183
2184	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2185	if (!msb)
2186		return -ENOMEM;
2187	memstick_set_drvdata(card, msb);
2188	msb->card = card;
2189	spin_lock_init(&msb->q_lock);
2190
2191	rc = msb_init_card(card);
2192	if (rc)
2193		goto out_free;
2194
2195	rc = msb_init_disk(card);
2196	if (!rc) {
2197		card->check = msb_check_card;
2198		card->stop = msb_stop;
2199		card->start = msb_start;
2200		return 0;
2201	}
2202out_free:
2203	memstick_set_drvdata(card, NULL);
2204	msb_data_clear(msb);
2205	kfree(msb);
2206	return rc;
2207}
2208
2209static void msb_remove(struct memstick_dev *card)
2210{
2211	struct msb_data *msb = memstick_get_drvdata(card);
2212	unsigned long flags;
2213
2214	if (!msb->io_queue_stopped)
2215		msb_stop(card);
2216
2217	dbg("Removing the disk device");
2218
2219	/* Take care of unhandled + new requests from now on */
2220	spin_lock_irqsave(&msb->q_lock, flags);
2221	msb->card_dead = true;
2222	blk_start_queue(msb->queue);
2223	spin_unlock_irqrestore(&msb->q_lock, flags);
2224
2225	/* Remove the disk */
2226	del_gendisk(msb->disk);
2227	blk_cleanup_queue(msb->queue);
2228	msb->queue = NULL;
2229
2230	mutex_lock(&msb_disk_lock);
2231	msb_data_clear(msb);
2232	mutex_unlock(&msb_disk_lock);
2233
2234	msb_disk_release(msb->disk);
2235	memstick_set_drvdata(card, NULL);
2236}
2237
2238#ifdef CONFIG_PM
2239
2240static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2241{
2242	msb_stop(card);
2243	return 0;
2244}
2245
2246static int msb_resume(struct memstick_dev *card)
2247{
2248	struct msb_data *msb = memstick_get_drvdata(card);
2249	struct msb_data *new_msb = NULL;
2250	bool card_dead = true;
2251
2252#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2253	msb->card_dead = true;
2254	return 0;
2255#endif
2256	mutex_lock(&card->host->lock);
2257
2258	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2259	if (!new_msb)
2260		goto out;
2261
2262	new_msb->card = card;
2263	memstick_set_drvdata(card, new_msb);
2264	spin_lock_init(&new_msb->q_lock);
2265	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2266
2267	if (msb_init_card(card))
2268		goto out;
2269
2270	if (msb->block_size != new_msb->block_size)
2271		goto out;
2272
2273	if (memcmp(msb->boot_page, new_msb->boot_page,
2274					sizeof(struct ms_boot_page)))
2275		goto out;
2276
2277	if (msb->logical_block_count != new_msb->logical_block_count ||
2278		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2279						msb->logical_block_count))
2280		goto out;
2281
2282	if (msb->block_count != new_msb->block_count ||
2283		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2284							msb->block_count / 8))
2285		goto out;
2286
2287	card_dead = false;
2288out:
2289	if (card_dead)
2290		dbg("Card was removed/replaced during suspend");
2291
2292	msb->card_dead = card_dead;
2293	memstick_set_drvdata(card, msb);
2294
2295	if (new_msb) {
2296		msb_data_clear(new_msb);
2297		kfree(new_msb);
2298	}
2299
2300	msb_start(card);
2301	mutex_unlock(&card->host->lock);
2302	return 0;
2303}
2304#else
2305
2306#define msb_suspend NULL
2307#define msb_resume NULL
2308
2309#endif /* CONFIG_PM */
2310
2311static struct memstick_device_id msb_id_tbl[] = {
2312	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2313	 MEMSTICK_CLASS_FLASH},
2314
2315	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2316	 MEMSTICK_CLASS_ROM},
2317
2318	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2319	 MEMSTICK_CLASS_RO},
2320
2321	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2322	 MEMSTICK_CLASS_WP},
2323
2324	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2325	 MEMSTICK_CLASS_DUO},
2326	{}
2327};
2328MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2329
2330
2331static struct memstick_driver msb_driver = {
2332	.driver = {
2333		.name  = DRIVER_NAME,
2334		.owner = THIS_MODULE
2335	},
2336	.id_table = msb_id_tbl,
2337	.probe    = msb_probe,
2338	.remove   = msb_remove,
2339	.suspend  = msb_suspend,
2340	.resume   = msb_resume
2341};
2342
2343static int major;
2344
2345static int __init msb_init(void)
2346{
2347	int rc = register_blkdev(0, DRIVER_NAME);
2348
2349	if (rc < 0) {
2350		pr_err("failed to register major (error %d)\n", rc);
2351		return rc;
2352	}
2353
2354	major = rc;
2355	rc = memstick_register_driver(&msb_driver);
2356	if (rc) {
2357		unregister_blkdev(major, DRIVER_NAME);
2358		pr_err("failed to register memstick driver (error %d)\n", rc);
2359	}
2360
2361	return rc;
2362}
2363
2364static void __exit msb_exit(void)
2365{
2366	memstick_unregister_driver(&msb_driver);
2367	unregister_blkdev(major, DRIVER_NAME);
2368	idr_destroy(&msb_disk_idr);
2369}
2370
2371module_init(msb_init);
2372module_exit(msb_exit);
2373
2374module_param(cache_flush_timeout, int, S_IRUGO);
2375MODULE_PARM_DESC(cache_flush_timeout,
2376				"Cache flush timeout in msec (1000 default)");
2377module_param(debug, int, S_IRUGO | S_IWUSR);
2378MODULE_PARM_DESC(debug, "Debug level (0-2)");
2379
2380module_param(verify_writes, bool, S_IRUGO);
2381MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2382
2383MODULE_LICENSE("GPL");
2384MODULE_AUTHOR("Maxim Levitsky");
2385MODULE_DESCRIPTION("Sony MemoryStick block device driver");
v4.17
   1/*
   2 *  ms_block.c - Sony MemoryStick (legacy) storage support
   3
   4 *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * Minor portions of the driver were copied from mspro_block.c which is
  11 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
  12 *
  13 */
  14#define DRIVER_NAME "ms_block"
  15#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  16
  17#include <linux/module.h>
  18#include <linux/blkdev.h>
  19#include <linux/memstick.h>
  20#include <linux/idr.h>
  21#include <linux/hdreg.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/random.h>
  25#include <linux/bitmap.h>
  26#include <linux/scatterlist.h>
  27#include <linux/jiffies.h>
  28#include <linux/workqueue.h>
  29#include <linux/mutex.h>
  30#include "ms_block.h"
  31
  32static int debug;
  33static int cache_flush_timeout = 1000;
  34static bool verify_writes;
  35
  36/*
  37 * Copies section of 'sg_from' starting from offset 'offset' and with length
  38 * 'len' To another scatterlist of to_nents enties
  39 */
  40static size_t msb_sg_copy(struct scatterlist *sg_from,
  41	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  42{
  43	size_t copied = 0;
  44
  45	while (offset > 0) {
  46		if (offset >= sg_from->length) {
  47			if (sg_is_last(sg_from))
  48				return 0;
  49
  50			offset -= sg_from->length;
  51			sg_from = sg_next(sg_from);
  52			continue;
  53		}
  54
  55		copied = min(len, sg_from->length - offset);
  56		sg_set_page(sg_to, sg_page(sg_from),
  57			copied, sg_from->offset + offset);
  58
  59		len -= copied;
  60		offset = 0;
  61
  62		if (sg_is_last(sg_from) || !len)
  63			goto out;
  64
  65		sg_to = sg_next(sg_to);
  66		to_nents--;
  67		sg_from = sg_next(sg_from);
  68	}
  69
  70	while (len > sg_from->length && to_nents--) {
  71		len -= sg_from->length;
  72		copied += sg_from->length;
  73
  74		sg_set_page(sg_to, sg_page(sg_from),
  75				sg_from->length, sg_from->offset);
  76
  77		if (sg_is_last(sg_from) || !len)
  78			goto out;
  79
  80		sg_from = sg_next(sg_from);
  81		sg_to = sg_next(sg_to);
  82	}
  83
  84	if (len && to_nents) {
  85		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  86		copied += len;
  87	}
  88out:
  89	sg_mark_end(sg_to);
  90	return copied;
  91}
  92
  93/*
  94 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  95 * to linear buffer of length 'len' at address 'buffer'
  96 * Returns 0 if equal and  -1 otherwice
  97 */
  98static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  99					size_t offset, u8 *buffer, size_t len)
 100{
 101	int retval = 0, cmplen;
 102	struct sg_mapping_iter miter;
 103
 104	sg_miter_start(&miter, sg, sg_nents(sg),
 105					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 106
 107	while (sg_miter_next(&miter) && len > 0) {
 108		if (offset >= miter.length) {
 109			offset -= miter.length;
 110			continue;
 111		}
 112
 113		cmplen = min(miter.length - offset, len);
 114		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
 115		if (retval)
 116			break;
 117
 118		buffer += cmplen;
 119		len -= cmplen;
 120		offset = 0;
 121	}
 122
 123	if (!retval && len)
 124		retval = -1;
 125
 126	sg_miter_stop(&miter);
 127	return retval;
 128}
 129
 130
 131/* Get zone at which block with logical address 'lba' lives
 132 * Flash is broken into zones.
 133 * Each zone consists of 512 eraseblocks, out of which in first
 134 * zone 494 are used and 496 are for all following zones.
 135 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
 136*/
 137static int msb_get_zone_from_lba(int lba)
 138{
 139	if (lba < 494)
 140		return 0;
 141	return ((lba - 494) / 496) + 1;
 142}
 143
 144/* Get zone of physical block. Trivial */
 145static int msb_get_zone_from_pba(int pba)
 146{
 147	return pba / MS_BLOCKS_IN_ZONE;
 148}
 149
 150/* Debug test to validate free block counts */
 151static int msb_validate_used_block_bitmap(struct msb_data *msb)
 152{
 153	int total_free_blocks = 0;
 154	int i;
 155
 156	if (!debug)
 157		return 0;
 158
 159	for (i = 0; i < msb->zone_count; i++)
 160		total_free_blocks += msb->free_block_count[i];
 161
 162	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
 163					msb->block_count) == total_free_blocks)
 164		return 0;
 165
 166	pr_err("BUG: free block counts don't match the bitmap");
 167	msb->read_only = true;
 168	return -EINVAL;
 169}
 170
 171/* Mark physical block as used */
 172static void msb_mark_block_used(struct msb_data *msb, int pba)
 173{
 174	int zone = msb_get_zone_from_pba(pba);
 175
 176	if (test_bit(pba, msb->used_blocks_bitmap)) {
 177		pr_err(
 178		"BUG: attempt to mark already used pba %d as used", pba);
 179		msb->read_only = true;
 180		return;
 181	}
 182
 183	if (msb_validate_used_block_bitmap(msb))
 184		return;
 185
 186	/* No races because all IO is single threaded */
 187	__set_bit(pba, msb->used_blocks_bitmap);
 188	msb->free_block_count[zone]--;
 189}
 190
 191/* Mark physical block as free */
 192static void msb_mark_block_unused(struct msb_data *msb, int pba)
 193{
 194	int zone = msb_get_zone_from_pba(pba);
 195
 196	if (!test_bit(pba, msb->used_blocks_bitmap)) {
 197		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
 198		msb->read_only = true;
 199		return;
 200	}
 201
 202	if (msb_validate_used_block_bitmap(msb))
 203		return;
 204
 205	/* No races because all IO is single threaded */
 206	__clear_bit(pba, msb->used_blocks_bitmap);
 207	msb->free_block_count[zone]++;
 208}
 209
 210/* Invalidate current register window */
 211static void msb_invalidate_reg_window(struct msb_data *msb)
 212{
 213	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
 214	msb->reg_addr.w_length = sizeof(struct ms_id_register);
 215	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
 216	msb->reg_addr.r_length = sizeof(struct ms_id_register);
 217	msb->addr_valid = false;
 218}
 219
 220/* Start a state machine */
 221static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
 222		(struct memstick_dev *card, struct memstick_request **req))
 223{
 224	struct memstick_dev *card = msb->card;
 225
 226	WARN_ON(msb->state != -1);
 227	msb->int_polling = false;
 228	msb->state = 0;
 229	msb->exit_error = 0;
 230
 231	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
 232
 233	card->next_request = state_func;
 234	memstick_new_req(card->host);
 235	wait_for_completion(&card->mrq_complete);
 236
 237	WARN_ON(msb->state != -1);
 238	return msb->exit_error;
 239}
 240
 241/* State machines call that to exit */
 242static int msb_exit_state_machine(struct msb_data *msb, int error)
 243{
 244	WARN_ON(msb->state == -1);
 245
 246	msb->state = -1;
 247	msb->exit_error = error;
 248	msb->card->next_request = h_msb_default_bad;
 249
 250	/* Invalidate reg window on errors */
 251	if (error)
 252		msb_invalidate_reg_window(msb);
 253
 254	complete(&msb->card->mrq_complete);
 255	return -ENXIO;
 256}
 257
 258/* read INT register */
 259static int msb_read_int_reg(struct msb_data *msb, long timeout)
 260{
 261	struct memstick_request *mrq = &msb->card->current_mrq;
 262
 263	WARN_ON(msb->state == -1);
 264
 265	if (!msb->int_polling) {
 266		msb->int_timeout = jiffies +
 267			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
 268		msb->int_polling = true;
 269	} else if (time_after(jiffies, msb->int_timeout)) {
 270		mrq->data[0] = MEMSTICK_INT_CMDNAK;
 271		return 0;
 272	}
 273
 274	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
 275				mrq->need_card_int && !mrq->error) {
 276		mrq->data[0] = mrq->int_reg;
 277		mrq->need_card_int = false;
 278		return 0;
 279	} else {
 280		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 281		return 1;
 282	}
 283}
 284
 285/* Read a register */
 286static int msb_read_regs(struct msb_data *msb, int offset, int len)
 287{
 288	struct memstick_request *req = &msb->card->current_mrq;
 289
 290	if (msb->reg_addr.r_offset != offset ||
 291	    msb->reg_addr.r_length != len || !msb->addr_valid) {
 292
 293		msb->reg_addr.r_offset = offset;
 294		msb->reg_addr.r_length = len;
 295		msb->addr_valid = true;
 296
 297		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 298			&msb->reg_addr, sizeof(msb->reg_addr));
 299		return 0;
 300	}
 301
 302	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
 303	return 1;
 304}
 305
 306/* Write a card register */
 307static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
 308{
 309	struct memstick_request *req = &msb->card->current_mrq;
 310
 311	if (msb->reg_addr.w_offset != offset ||
 312		msb->reg_addr.w_length != len  || !msb->addr_valid) {
 313
 314		msb->reg_addr.w_offset = offset;
 315		msb->reg_addr.w_length = len;
 316		msb->addr_valid = true;
 317
 318		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 319			&msb->reg_addr, sizeof(msb->reg_addr));
 320		return 0;
 321	}
 322
 323	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
 324	return 1;
 325}
 326
 327/* Handler for absence of IO */
 328static int h_msb_default_bad(struct memstick_dev *card,
 329						struct memstick_request **mrq)
 330{
 331	return -ENXIO;
 332}
 333
 334/*
 335 * This function is a handler for reads of one page from device.
 336 * Writes output to msb->current_sg, takes sector address from msb->reg.param
 337 * Can also be used to read extra data only. Set params accordintly.
 338 */
 339static int h_msb_read_page(struct memstick_dev *card,
 340					struct memstick_request **out_mrq)
 341{
 342	struct msb_data *msb = memstick_get_drvdata(card);
 343	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 344	struct scatterlist sg[2];
 345	u8 command, intreg;
 346
 347	if (mrq->error) {
 348		dbg("read_page, unknown error");
 349		return msb_exit_state_machine(msb, mrq->error);
 350	}
 351again:
 352	switch (msb->state) {
 353	case MSB_RP_SEND_BLOCK_ADDRESS:
 354		/* msb_write_regs sometimes "fails" because it needs to update
 355			the reg window, and thus it returns request for that.
 356			Then we stay in this state and retry */
 357		if (!msb_write_regs(msb,
 358			offsetof(struct ms_register, param),
 359			sizeof(struct ms_param_register),
 360			(unsigned char *)&msb->regs.param))
 361			return 0;
 362
 363		msb->state = MSB_RP_SEND_READ_COMMAND;
 364		return 0;
 365
 366	case MSB_RP_SEND_READ_COMMAND:
 367		command = MS_CMD_BLOCK_READ;
 368		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 369		msb->state = MSB_RP_SEND_INT_REQ;
 370		return 0;
 371
 372	case MSB_RP_SEND_INT_REQ:
 373		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
 374		/* If dont actually need to send the int read request (only in
 375			serial mode), then just fall through */
 376		if (msb_read_int_reg(msb, -1))
 377			return 0;
 378		/* fallthrough */
 379
 380	case MSB_RP_RECEIVE_INT_REQ_RESULT:
 381		intreg = mrq->data[0];
 382		msb->regs.status.interrupt = intreg;
 383
 384		if (intreg & MEMSTICK_INT_CMDNAK)
 385			return msb_exit_state_machine(msb, -EIO);
 386
 387		if (!(intreg & MEMSTICK_INT_CED)) {
 388			msb->state = MSB_RP_SEND_INT_REQ;
 389			goto again;
 390		}
 391
 392		msb->int_polling = false;
 393		msb->state = (intreg & MEMSTICK_INT_ERR) ?
 394			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
 395		goto again;
 396
 397	case MSB_RP_SEND_READ_STATUS_REG:
 398		 /* read the status register to understand source of the INT_ERR */
 399		if (!msb_read_regs(msb,
 400			offsetof(struct ms_register, status),
 401			sizeof(struct ms_status_register)))
 402			return 0;
 403
 404		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 405		return 0;
 406
 407	case MSB_RP_RECEIVE_STATUS_REG:
 408		msb->regs.status = *(struct ms_status_register *)mrq->data;
 409		msb->state = MSB_RP_SEND_OOB_READ;
 410		/* fallthrough */
 411
 412	case MSB_RP_SEND_OOB_READ:
 413		if (!msb_read_regs(msb,
 414			offsetof(struct ms_register, extra_data),
 415			sizeof(struct ms_extra_data_register)))
 416			return 0;
 417
 418		msb->state = MSB_RP_RECEIVE_OOB_READ;
 419		return 0;
 420
 421	case MSB_RP_RECEIVE_OOB_READ:
 422		msb->regs.extra_data =
 423			*(struct ms_extra_data_register *) mrq->data;
 424		msb->state = MSB_RP_SEND_READ_DATA;
 425		/* fallthrough */
 426
 427	case MSB_RP_SEND_READ_DATA:
 428		/* Skip that state if we only read the oob */
 429		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
 430			msb->state = MSB_RP_RECEIVE_READ_DATA;
 431			goto again;
 432		}
 433
 434		sg_init_table(sg, ARRAY_SIZE(sg));
 435		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 436			msb->current_sg_offset,
 437			msb->page_size);
 438
 439		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
 440		msb->state = MSB_RP_RECEIVE_READ_DATA;
 441		return 0;
 442
 443	case MSB_RP_RECEIVE_READ_DATA:
 444		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
 445			msb->current_sg_offset += msb->page_size;
 446			return msb_exit_state_machine(msb, 0);
 447		}
 448
 449		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
 450			dbg("read_page: uncorrectable error");
 451			return msb_exit_state_machine(msb, -EBADMSG);
 452		}
 453
 454		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
 455			dbg("read_page: correctable error");
 456			msb->current_sg_offset += msb->page_size;
 457			return msb_exit_state_machine(msb, -EUCLEAN);
 458		} else {
 459			dbg("read_page: INT error, but no status error bits");
 460			return msb_exit_state_machine(msb, -EIO);
 461		}
 462	}
 463
 464	BUG();
 465}
 466
 467/*
 468 * Handler of writes of exactly one block.
 469 * Takes address from msb->regs.param.
 470 * Writes same extra data to blocks, also taken
 471 * from msb->regs.extra
 472 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
 473 * device refuses to take the command or something else
 474 */
 475static int h_msb_write_block(struct memstick_dev *card,
 476					struct memstick_request **out_mrq)
 477{
 478	struct msb_data *msb = memstick_get_drvdata(card);
 479	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 480	struct scatterlist sg[2];
 481	u8 intreg, command;
 482
 483	if (mrq->error)
 484		return msb_exit_state_machine(msb, mrq->error);
 485
 486again:
 487	switch (msb->state) {
 488
 489	/* HACK: Jmicon handling of TPCs between 8 and
 490	 *	sizeof(memstick_request.data) is broken due to hardware
 491	 *	bug in PIO mode that is used for these TPCs
 492	 *	Therefore split the write
 493	 */
 494
 495	case MSB_WB_SEND_WRITE_PARAMS:
 496		if (!msb_write_regs(msb,
 497			offsetof(struct ms_register, param),
 498			sizeof(struct ms_param_register),
 499			&msb->regs.param))
 500			return 0;
 501
 502		msb->state = MSB_WB_SEND_WRITE_OOB;
 503		return 0;
 504
 505	case MSB_WB_SEND_WRITE_OOB:
 506		if (!msb_write_regs(msb,
 507			offsetof(struct ms_register, extra_data),
 508			sizeof(struct ms_extra_data_register),
 509			&msb->regs.extra_data))
 510			return 0;
 511		msb->state = MSB_WB_SEND_WRITE_COMMAND;
 512		return 0;
 513
 514
 515	case MSB_WB_SEND_WRITE_COMMAND:
 516		command = MS_CMD_BLOCK_WRITE;
 517		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 518		msb->state = MSB_WB_SEND_INT_REQ;
 519		return 0;
 520
 521	case MSB_WB_SEND_INT_REQ:
 522		msb->state = MSB_WB_RECEIVE_INT_REQ;
 523		if (msb_read_int_reg(msb, -1))
 524			return 0;
 525		/* fallthrough */
 526
 527	case MSB_WB_RECEIVE_INT_REQ:
 528		intreg = mrq->data[0];
 529		msb->regs.status.interrupt = intreg;
 530
 531		/* errors mean out of here, and fast... */
 532		if (intreg & (MEMSTICK_INT_CMDNAK))
 533			return msb_exit_state_machine(msb, -EIO);
 534
 535		if (intreg & MEMSTICK_INT_ERR)
 536			return msb_exit_state_machine(msb, -EBADMSG);
 537
 538
 539		/* for last page we need to poll CED */
 540		if (msb->current_page == msb->pages_in_block) {
 541			if (intreg & MEMSTICK_INT_CED)
 542				return msb_exit_state_machine(msb, 0);
 543			msb->state = MSB_WB_SEND_INT_REQ;
 544			goto again;
 545
 546		}
 547
 548		/* for non-last page we need BREQ before writing next chunk */
 549		if (!(intreg & MEMSTICK_INT_BREQ)) {
 550			msb->state = MSB_WB_SEND_INT_REQ;
 551			goto again;
 552		}
 553
 554		msb->int_polling = false;
 555		msb->state = MSB_WB_SEND_WRITE_DATA;
 556		/* fallthrough */
 557
 558	case MSB_WB_SEND_WRITE_DATA:
 559		sg_init_table(sg, ARRAY_SIZE(sg));
 560
 561		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 562			msb->current_sg_offset,
 563			msb->page_size) < msb->page_size)
 564			return msb_exit_state_machine(msb, -EIO);
 565
 566		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
 567		mrq->need_card_int = 1;
 568		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
 569		return 0;
 570
 571	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
 572		msb->current_page++;
 573		msb->current_sg_offset += msb->page_size;
 574		msb->state = MSB_WB_SEND_INT_REQ;
 575		goto again;
 576	default:
 577		BUG();
 578	}
 579
 580	return 0;
 581}
 582
 583/*
 584 * This function is used to send simple IO requests to device that consist
 585 * of register write + command
 586 */
 587static int h_msb_send_command(struct memstick_dev *card,
 588					struct memstick_request **out_mrq)
 589{
 590	struct msb_data *msb = memstick_get_drvdata(card);
 591	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 592	u8 intreg;
 593
 594	if (mrq->error) {
 595		dbg("send_command: unknown error");
 596		return msb_exit_state_machine(msb, mrq->error);
 597	}
 598again:
 599	switch (msb->state) {
 600
 601	/* HACK: see h_msb_write_block */
 602	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
 603		if (!msb_write_regs(msb,
 604			offsetof(struct ms_register, param),
 605			sizeof(struct ms_param_register),
 606			&msb->regs.param))
 607			return 0;
 608		msb->state = MSB_SC_SEND_WRITE_OOB;
 609		return 0;
 610
 611	case MSB_SC_SEND_WRITE_OOB:
 612		if (!msb->command_need_oob) {
 613			msb->state = MSB_SC_SEND_COMMAND;
 614			goto again;
 615		}
 616
 617		if (!msb_write_regs(msb,
 618			offsetof(struct ms_register, extra_data),
 619			sizeof(struct ms_extra_data_register),
 620			&msb->regs.extra_data))
 621			return 0;
 622
 623		msb->state = MSB_SC_SEND_COMMAND;
 624		return 0;
 625
 626	case MSB_SC_SEND_COMMAND:
 627		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
 628		msb->state = MSB_SC_SEND_INT_REQ;
 629		return 0;
 630
 631	case MSB_SC_SEND_INT_REQ:
 632		msb->state = MSB_SC_RECEIVE_INT_REQ;
 633		if (msb_read_int_reg(msb, -1))
 634			return 0;
 635		/* fallthrough */
 636
 637	case MSB_SC_RECEIVE_INT_REQ:
 638		intreg = mrq->data[0];
 639
 640		if (intreg & MEMSTICK_INT_CMDNAK)
 641			return msb_exit_state_machine(msb, -EIO);
 642		if (intreg & MEMSTICK_INT_ERR)
 643			return msb_exit_state_machine(msb, -EBADMSG);
 644
 645		if (!(intreg & MEMSTICK_INT_CED)) {
 646			msb->state = MSB_SC_SEND_INT_REQ;
 647			goto again;
 648		}
 649
 650		return msb_exit_state_machine(msb, 0);
 651	}
 652
 653	BUG();
 654}
 655
 656/* Small handler for card reset */
 657static int h_msb_reset(struct memstick_dev *card,
 658					struct memstick_request **out_mrq)
 659{
 660	u8 command = MS_CMD_RESET;
 661	struct msb_data *msb = memstick_get_drvdata(card);
 662	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 663
 664	if (mrq->error)
 665		return msb_exit_state_machine(msb, mrq->error);
 666
 667	switch (msb->state) {
 668	case MSB_RS_SEND:
 669		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 670		mrq->need_card_int = 0;
 671		msb->state = MSB_RS_CONFIRM;
 672		return 0;
 673	case MSB_RS_CONFIRM:
 674		return msb_exit_state_machine(msb, 0);
 675	}
 676	BUG();
 677}
 678
 679/* This handler is used to do serial->parallel switch */
 680static int h_msb_parallel_switch(struct memstick_dev *card,
 681					struct memstick_request **out_mrq)
 682{
 683	struct msb_data *msb = memstick_get_drvdata(card);
 684	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 685	struct memstick_host *host = card->host;
 686
 687	if (mrq->error) {
 688		dbg("parallel_switch: error");
 689		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 690		return msb_exit_state_machine(msb, mrq->error);
 691	}
 692
 693	switch (msb->state) {
 694	case MSB_PS_SEND_SWITCH_COMMAND:
 695		/* Set the parallel interface on memstick side */
 696		msb->regs.param.system |= MEMSTICK_SYS_PAM;
 697
 698		if (!msb_write_regs(msb,
 699			offsetof(struct ms_register, param),
 700			1,
 701			(unsigned char *)&msb->regs.param))
 702			return 0;
 703
 704		msb->state = MSB_PS_SWICH_HOST;
 705		return 0;
 706
 707	case MSB_PS_SWICH_HOST:
 708		 /* Set parallel interface on our side + send a dummy request
 709			to see if card responds */
 710		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
 711		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 712		msb->state = MSB_PS_CONFIRM;
 713		return 0;
 714
 715	case MSB_PS_CONFIRM:
 716		return msb_exit_state_machine(msb, 0);
 717	}
 718
 719	BUG();
 720}
 721
 722static int msb_switch_to_parallel(struct msb_data *msb);
 723
 724/* Reset the card, to guard against hw errors beeing treated as bad blocks */
 725static int msb_reset(struct msb_data *msb, bool full)
 726{
 727
 728	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
 729	struct memstick_dev *card = msb->card;
 730	struct memstick_host *host = card->host;
 731	int error;
 732
 733	/* Reset the card */
 734	msb->regs.param.system = MEMSTICK_SYS_BAMD;
 735
 736	if (full) {
 737		error =  host->set_param(host,
 738					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 739		if (error)
 740			goto out_error;
 741
 742		msb_invalidate_reg_window(msb);
 743
 744		error = host->set_param(host,
 745					MEMSTICK_POWER, MEMSTICK_POWER_ON);
 746		if (error)
 747			goto out_error;
 748
 749		error = host->set_param(host,
 750					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
 751		if (error) {
 752out_error:
 753			dbg("Failed to reset the host controller");
 754			msb->read_only = true;
 755			return -EFAULT;
 756		}
 757	}
 758
 759	error = msb_run_state_machine(msb, h_msb_reset);
 760	if (error) {
 761		dbg("Failed to reset the card");
 762		msb->read_only = true;
 763		return -ENODEV;
 764	}
 765
 766	/* Set parallel mode */
 767	if (was_parallel)
 768		msb_switch_to_parallel(msb);
 769	return 0;
 770}
 771
 772/* Attempts to switch interface to parallel mode */
 773static int msb_switch_to_parallel(struct msb_data *msb)
 774{
 775	int error;
 776
 777	error = msb_run_state_machine(msb, h_msb_parallel_switch);
 778	if (error) {
 779		pr_err("Switch to parallel failed");
 780		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 781		msb_reset(msb, true);
 782		return -EFAULT;
 783	}
 784
 785	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
 786	return 0;
 787}
 788
 789/* Changes overwrite flag on a page */
 790static int msb_set_overwrite_flag(struct msb_data *msb,
 791						u16 pba, u8 page, u8 flag)
 792{
 793	if (msb->read_only)
 794		return -EROFS;
 795
 796	msb->regs.param.block_address = cpu_to_be16(pba);
 797	msb->regs.param.page_address = page;
 798	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
 799	msb->regs.extra_data.overwrite_flag = flag;
 800	msb->command_value = MS_CMD_BLOCK_WRITE;
 801	msb->command_need_oob = true;
 802
 803	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
 804							flag, pba, page);
 805	return msb_run_state_machine(msb, h_msb_send_command);
 806}
 807
 808static int msb_mark_bad(struct msb_data *msb, int pba)
 809{
 810	pr_notice("marking pba %d as bad", pba);
 811	msb_reset(msb, true);
 812	return msb_set_overwrite_flag(
 813			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
 814}
 815
 816static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
 817{
 818	dbg("marking page %d of pba %d as bad", page, pba);
 819	msb_reset(msb, true);
 820	return msb_set_overwrite_flag(msb,
 821		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
 822}
 823
 824/* Erases one physical block */
 825static int msb_erase_block(struct msb_data *msb, u16 pba)
 826{
 827	int error, try;
 828	if (msb->read_only)
 829		return -EROFS;
 830
 831	dbg_verbose("erasing pba %d", pba);
 832
 833	for (try = 1; try < 3; try++) {
 834		msb->regs.param.block_address = cpu_to_be16(pba);
 835		msb->regs.param.page_address = 0;
 836		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
 837		msb->command_value = MS_CMD_BLOCK_ERASE;
 838		msb->command_need_oob = false;
 839
 840
 841		error = msb_run_state_machine(msb, h_msb_send_command);
 842		if (!error || msb_reset(msb, true))
 843			break;
 844	}
 845
 846	if (error) {
 847		pr_err("erase failed, marking pba %d as bad", pba);
 848		msb_mark_bad(msb, pba);
 849	}
 850
 851	dbg_verbose("erase success, marking pba %d as unused", pba);
 852	msb_mark_block_unused(msb, pba);
 853	__set_bit(pba, msb->erased_blocks_bitmap);
 854	return error;
 855}
 856
 857/* Reads one page from device */
 858static int msb_read_page(struct msb_data *msb,
 859	u16 pba, u8 page, struct ms_extra_data_register *extra,
 860					struct scatterlist *sg,  int offset)
 861{
 862	int try, error;
 863
 864	if (pba == MS_BLOCK_INVALID) {
 865		unsigned long flags;
 866		struct sg_mapping_iter miter;
 867		size_t len = msb->page_size;
 868
 869		dbg_verbose("read unmapped sector. returning 0xFF");
 870
 871		local_irq_save(flags);
 872		sg_miter_start(&miter, sg, sg_nents(sg),
 873				SG_MITER_ATOMIC | SG_MITER_TO_SG);
 874
 875		while (sg_miter_next(&miter) && len > 0) {
 876
 877			int chunklen;
 878
 879			if (offset && offset >= miter.length) {
 880				offset -= miter.length;
 881				continue;
 882			}
 883
 884			chunklen = min(miter.length - offset, len);
 885			memset(miter.addr + offset, 0xFF, chunklen);
 886			len -= chunklen;
 887			offset = 0;
 888		}
 889
 890		sg_miter_stop(&miter);
 891		local_irq_restore(flags);
 892
 893		if (offset)
 894			return -EFAULT;
 895
 896		if (extra)
 897			memset(extra, 0xFF, sizeof(*extra));
 898		return 0;
 899	}
 900
 901	if (pba >= msb->block_count) {
 902		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
 903		return -EINVAL;
 904	}
 905
 906	for (try = 1; try < 3; try++) {
 907		msb->regs.param.block_address = cpu_to_be16(pba);
 908		msb->regs.param.page_address = page;
 909		msb->regs.param.cp = MEMSTICK_CP_PAGE;
 910
 911		msb->current_sg = sg;
 912		msb->current_sg_offset = offset;
 913		error = msb_run_state_machine(msb, h_msb_read_page);
 914
 915
 916		if (error == -EUCLEAN) {
 917			pr_notice("correctable error on pba %d, page %d",
 918				pba, page);
 919			error = 0;
 920		}
 921
 922		if (!error && extra)
 923			*extra = msb->regs.extra_data;
 924
 925		if (!error || msb_reset(msb, true))
 926			break;
 927
 928	}
 929
 930	/* Mark bad pages */
 931	if (error == -EBADMSG) {
 932		pr_err("uncorrectable error on read of pba %d, page %d",
 933			pba, page);
 934
 935		if (msb->regs.extra_data.overwrite_flag &
 936					MEMSTICK_OVERWRITE_PGST0)
 937			msb_mark_page_bad(msb, pba, page);
 938		return -EBADMSG;
 939	}
 940
 941	if (error)
 942		pr_err("read of pba %d, page %d failed with error %d",
 943			pba, page, error);
 944	return error;
 945}
 946
 947/* Reads oob of page only */
 948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
 949	struct ms_extra_data_register *extra)
 950{
 951	int error;
 952
 953	BUG_ON(!extra);
 954	msb->regs.param.block_address = cpu_to_be16(pba);
 955	msb->regs.param.page_address = page;
 956	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
 957
 958	if (pba > msb->block_count) {
 959		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
 960		return -EINVAL;
 961	}
 962
 963	error = msb_run_state_machine(msb, h_msb_read_page);
 964	*extra = msb->regs.extra_data;
 965
 966	if (error == -EUCLEAN) {
 967		pr_notice("correctable error on pba %d, page %d",
 968			pba, page);
 969		return 0;
 970	}
 971
 972	return error;
 973}
 974
 975/* Reads a block and compares it with data contained in scatterlist orig_sg */
 976static int msb_verify_block(struct msb_data *msb, u16 pba,
 977				struct scatterlist *orig_sg,  int offset)
 978{
 979	struct scatterlist sg;
 980	int page = 0, error;
 981
 982	sg_init_one(&sg, msb->block_buffer, msb->block_size);
 983
 984	while (page < msb->pages_in_block) {
 985
 986		error = msb_read_page(msb, pba, page,
 987				NULL, &sg, page * msb->page_size);
 988		if (error)
 989			return error;
 990		page++;
 991	}
 992
 993	if (msb_sg_compare_to_buffer(orig_sg, offset,
 994				msb->block_buffer, msb->block_size))
 995		return -EIO;
 996	return 0;
 997}
 998
 999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001			u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003	int error, current_try = 1;
1004	BUG_ON(sg->length < msb->page_size);
1005
1006	if (msb->read_only)
1007		return -EROFS;
1008
1009	if (pba == MS_BLOCK_INVALID) {
1010		pr_err(
1011			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1012		return -EINVAL;
1013	}
1014
1015	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1016		pr_err(
1017		"BUG: write: attempt to write beyond the end of device");
1018		return -EINVAL;
1019	}
1020
1021	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1022		pr_err("BUG: write: lba zone mismatch");
1023		return -EINVAL;
1024	}
1025
1026	if (pba == msb->boot_block_locations[0] ||
1027		pba == msb->boot_block_locations[1]) {
1028		pr_err("BUG: write: attempt to write to boot blocks!");
1029		return -EINVAL;
1030	}
1031
1032	while (1) {
1033
1034		if (msb->read_only)
1035			return -EROFS;
1036
1037		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1038		msb->regs.param.page_address = 0;
1039		msb->regs.param.block_address = cpu_to_be16(pba);
1040
1041		msb->regs.extra_data.management_flag = 0xFF;
1042		msb->regs.extra_data.overwrite_flag = 0xF8;
1043		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1044
1045		msb->current_sg = sg;
1046		msb->current_sg_offset = offset;
1047		msb->current_page = 0;
1048
1049		error = msb_run_state_machine(msb, h_msb_write_block);
1050
1051		/* Sector we just wrote to is assumed erased since its pba
1052			was erased. If it wasn't erased, write will succeed
1053			and will just clear the bits that were set in the block
1054			thus test that what we have written,
1055			matches what we expect.
1056			We do trust the blocks that we erased */
1057		if (!error && (verify_writes ||
1058				!test_bit(pba, msb->erased_blocks_bitmap)))
1059			error = msb_verify_block(msb, pba, sg, offset);
1060
1061		if (!error)
1062			break;
1063
1064		if (current_try > 1 || msb_reset(msb, true))
1065			break;
1066
1067		pr_err("write failed, trying to erase the pba %d", pba);
1068		error = msb_erase_block(msb, pba);
1069		if (error)
1070			break;
1071
1072		current_try++;
1073	}
1074	return error;
1075}
1076
1077/* Finds a free block for write replacement */
1078static u16 msb_get_free_block(struct msb_data *msb, int zone)
1079{
1080	u16 pos;
1081	int pba = zone * MS_BLOCKS_IN_ZONE;
1082	int i;
1083
1084	get_random_bytes(&pos, sizeof(pos));
1085
1086	if (!msb->free_block_count[zone]) {
1087		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1088		msb->read_only = true;
1089		return MS_BLOCK_INVALID;
1090	}
1091
1092	pos %= msb->free_block_count[zone];
1093
1094	dbg_verbose("have %d choices for a free block, selected randomally: %d",
1095		msb->free_block_count[zone], pos);
1096
1097	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1098							msb->block_count, pba);
1099	for (i = 0; i < pos; ++i)
1100		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1101						msb->block_count, pba + 1);
1102
1103	dbg_verbose("result of the free blocks scan: pba %d", pba);
1104
1105	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1106		pr_err("BUG: cant get a free block");
1107		msb->read_only = true;
1108		return MS_BLOCK_INVALID;
1109	}
1110
1111	msb_mark_block_used(msb, pba);
1112	return pba;
1113}
1114
1115static int msb_update_block(struct msb_data *msb, u16 lba,
1116	struct scatterlist *sg, int offset)
1117{
1118	u16 pba, new_pba;
1119	int error, try;
1120
1121	pba = msb->lba_to_pba_table[lba];
1122	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1123
1124	if (pba != MS_BLOCK_INVALID) {
1125		dbg_verbose("setting the update flag on the block");
1126		msb_set_overwrite_flag(msb, pba, 0,
1127				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1128	}
1129
1130	for (try = 0; try < 3; try++) {
1131		new_pba = msb_get_free_block(msb,
1132			msb_get_zone_from_lba(lba));
1133
1134		if (new_pba == MS_BLOCK_INVALID) {
1135			error = -EIO;
1136			goto out;
1137		}
1138
1139		dbg_verbose("block update: writing updated block to the pba %d",
1140								new_pba);
1141		error = msb_write_block(msb, new_pba, lba, sg, offset);
1142		if (error == -EBADMSG) {
1143			msb_mark_bad(msb, new_pba);
1144			continue;
1145		}
1146
1147		if (error)
1148			goto out;
1149
1150		dbg_verbose("block update: erasing the old block");
1151		msb_erase_block(msb, pba);
1152		msb->lba_to_pba_table[lba] = new_pba;
1153		return 0;
1154	}
1155out:
1156	if (error) {
1157		pr_err("block update error after %d tries,  switching to r/o mode", try);
1158		msb->read_only = true;
1159	}
1160	return error;
1161}
1162
1163/* Converts endiannes in the boot block for easy use */
1164static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1165{
1166	p->header.block_id = be16_to_cpu(p->header.block_id);
1167	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1168	p->entry.disabled_block.start_addr
1169		= be32_to_cpu(p->entry.disabled_block.start_addr);
1170	p->entry.disabled_block.data_size
1171		= be32_to_cpu(p->entry.disabled_block.data_size);
1172	p->entry.cis_idi.start_addr
1173		= be32_to_cpu(p->entry.cis_idi.start_addr);
1174	p->entry.cis_idi.data_size
1175		= be32_to_cpu(p->entry.cis_idi.data_size);
1176	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1177	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1178	p->attr.number_of_effective_blocks
1179		= be16_to_cpu(p->attr.number_of_effective_blocks);
1180	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1181	p->attr.memory_manufacturer_code
1182		= be16_to_cpu(p->attr.memory_manufacturer_code);
1183	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1184	p->attr.implemented_capacity
1185		= be16_to_cpu(p->attr.implemented_capacity);
1186	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1187	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1188}
1189
1190static int msb_read_boot_blocks(struct msb_data *msb)
1191{
1192	int pba = 0;
1193	struct scatterlist sg;
1194	struct ms_extra_data_register extra;
1195	struct ms_boot_page *page;
1196
1197	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1198	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1199	msb->boot_block_count = 0;
1200
1201	dbg_verbose("Start of a scan for the boot blocks");
1202
1203	if (!msb->boot_page) {
1204		page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
1205		if (!page)
1206			return -ENOMEM;
1207
1208		msb->boot_page = page;
1209	} else
1210		page = msb->boot_page;
1211
1212	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1213
1214	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1215
1216		sg_init_one(&sg, page, sizeof(*page));
1217		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1218			dbg("boot scan: can't read pba %d", pba);
1219			continue;
1220		}
1221
1222		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1223			dbg("management flag doesn't indicate boot block %d",
1224									pba);
1225			continue;
1226		}
1227
1228		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1229			dbg("the pba at %d doesn' contain boot block ID", pba);
1230			continue;
1231		}
1232
1233		msb_fix_boot_page_endianness(page);
1234		msb->boot_block_locations[msb->boot_block_count] = pba;
1235
1236		page++;
1237		msb->boot_block_count++;
1238
1239		if (msb->boot_block_count == 2)
1240			break;
1241	}
1242
1243	if (!msb->boot_block_count) {
1244		pr_err("media doesn't contain master page, aborting");
1245		return -EIO;
1246	}
1247
1248	dbg_verbose("End of scan for boot blocks");
1249	return 0;
1250}
1251
1252static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1253{
1254	struct ms_boot_page *boot_block;
1255	struct scatterlist sg;
1256	u16 *buffer = NULL;
1257	int offset = 0;
1258	int i, error = 0;
1259	int data_size, data_offset, page, page_offset, size_to_read;
1260	u16 pba;
1261
1262	BUG_ON(block_nr > 1);
1263	boot_block = &msb->boot_page[block_nr];
1264	pba = msb->boot_block_locations[block_nr];
1265
1266	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1267		return -EINVAL;
1268
1269	data_size = boot_block->entry.disabled_block.data_size;
1270	data_offset = sizeof(struct ms_boot_page) +
1271			boot_block->entry.disabled_block.start_addr;
1272	if (!data_size)
1273		return 0;
1274
1275	page = data_offset / msb->page_size;
1276	page_offset = data_offset % msb->page_size;
1277	size_to_read =
1278		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1279			msb->page_size;
1280
1281	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1282		pba, data_offset, data_size);
1283
1284	buffer = kzalloc(size_to_read, GFP_KERNEL);
1285	if (!buffer)
1286		return -ENOMEM;
1287
1288	/* Read the buffer */
1289	sg_init_one(&sg, buffer, size_to_read);
1290
1291	while (offset < size_to_read) {
1292		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1293		if (error)
1294			goto out;
1295
1296		page++;
1297		offset += msb->page_size;
1298
1299		if (page == msb->pages_in_block) {
1300			pr_err(
1301			"bad block table extends beyond the boot block");
1302			break;
1303		}
1304	}
1305
1306	/* Process the bad block table */
1307	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1308
1309		u16 bad_block = be16_to_cpu(buffer[i]);
1310
1311		if (bad_block >= msb->block_count) {
1312			dbg("bad block table contains invalid block %d",
1313								bad_block);
1314			continue;
1315		}
1316
1317		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1318			dbg("duplicate bad block %d in the table",
1319				bad_block);
1320			continue;
1321		}
1322
1323		dbg("block %d is marked as factory bad", bad_block);
1324		msb_mark_block_used(msb, bad_block);
1325	}
1326out:
1327	kfree(buffer);
1328	return error;
1329}
1330
1331static int msb_ftl_initialize(struct msb_data *msb)
1332{
1333	int i;
1334
1335	if (msb->ftl_initialized)
1336		return 0;
1337
1338	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1339	msb->logical_block_count = msb->zone_count * 496 - 2;
1340
1341	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1342	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1343	msb->lba_to_pba_table =
1344		kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
1345
1346	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1347						!msb->erased_blocks_bitmap) {
1348		kfree(msb->used_blocks_bitmap);
1349		kfree(msb->lba_to_pba_table);
1350		kfree(msb->erased_blocks_bitmap);
1351		return -ENOMEM;
1352	}
1353
1354	for (i = 0; i < msb->zone_count; i++)
1355		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1356
1357	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1358			msb->logical_block_count * sizeof(u16));
1359
1360	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1361		msb->zone_count, msb->logical_block_count);
1362
1363	msb->ftl_initialized = true;
1364	return 0;
1365}
1366
1367static int msb_ftl_scan(struct msb_data *msb)
1368{
1369	u16 pba, lba, other_block;
1370	u8 overwrite_flag, management_flag, other_overwrite_flag;
1371	int error;
1372	struct ms_extra_data_register extra;
1373	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1374
1375	if (!overwrite_flags)
1376		return -ENOMEM;
1377
1378	dbg("Start of media scanning");
1379	for (pba = 0; pba < msb->block_count; pba++) {
1380
1381		if (pba == msb->boot_block_locations[0] ||
1382			pba == msb->boot_block_locations[1]) {
1383			dbg_verbose("pba %05d -> [boot block]", pba);
1384			msb_mark_block_used(msb, pba);
1385			continue;
1386		}
1387
1388		if (test_bit(pba, msb->used_blocks_bitmap)) {
1389			dbg_verbose("pba %05d -> [factory bad]", pba);
1390			continue;
1391		}
1392
1393		memset(&extra, 0, sizeof(extra));
1394		error = msb_read_oob(msb, pba, 0, &extra);
1395
1396		/* can't trust the page if we can't read the oob */
1397		if (error == -EBADMSG) {
1398			pr_notice(
1399			"oob of pba %d damaged, will try to erase it", pba);
1400			msb_mark_block_used(msb, pba);
1401			msb_erase_block(msb, pba);
1402			continue;
1403		} else if (error) {
1404			pr_err("unknown error %d on read of oob of pba %d - aborting",
1405				error, pba);
1406
1407			kfree(overwrite_flags);
1408			return error;
1409		}
1410
1411		lba = be16_to_cpu(extra.logical_address);
1412		management_flag = extra.management_flag;
1413		overwrite_flag = extra.overwrite_flag;
1414		overwrite_flags[pba] = overwrite_flag;
1415
1416		/* Skip bad blocks */
1417		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1418			dbg("pba %05d -> [BAD]", pba);
1419			msb_mark_block_used(msb, pba);
1420			continue;
1421		}
1422
1423		/* Skip system/drm blocks */
1424		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1425			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1426			dbg("pba %05d -> [reserved management flag %02x]",
1427							pba, management_flag);
1428			msb_mark_block_used(msb, pba);
1429			continue;
1430		}
1431
1432		/* Erase temporary tables */
1433		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1434			dbg("pba %05d -> [temp table] - will erase", pba);
1435
1436			msb_mark_block_used(msb, pba);
1437			msb_erase_block(msb, pba);
1438			continue;
1439		}
1440
1441		if (lba == MS_BLOCK_INVALID) {
1442			dbg_verbose("pba %05d -> [free]", pba);
1443			continue;
1444		}
1445
1446		msb_mark_block_used(msb, pba);
1447
1448		/* Block has LBA not according to zoning*/
1449		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1450			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1451								pba, lba);
1452			msb_erase_block(msb, pba);
1453			continue;
1454		}
1455
1456		/* No collisions - great */
1457		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1458			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1459			msb->lba_to_pba_table[lba] = pba;
1460			continue;
1461		}
1462
1463		other_block = msb->lba_to_pba_table[lba];
1464		other_overwrite_flag = overwrite_flags[other_block];
1465
1466		pr_notice("Collision between pba %d and pba %d",
1467			pba, other_block);
1468
1469		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1470			pr_notice("pba %d is marked as stable, use it", pba);
1471			msb_erase_block(msb, other_block);
1472			msb->lba_to_pba_table[lba] = pba;
1473			continue;
1474		}
1475
1476		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1477			pr_notice("pba %d is marked as stable, use it",
1478								other_block);
1479			msb_erase_block(msb, pba);
1480			continue;
1481		}
1482
1483		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1484				pba, other_block, other_block);
1485
1486		msb_erase_block(msb, other_block);
1487		msb->lba_to_pba_table[lba] = pba;
1488	}
1489
1490	dbg("End of media scanning");
1491	kfree(overwrite_flags);
1492	return 0;
1493}
1494
1495static void msb_cache_flush_timer(struct timer_list *t)
1496{
1497	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1498	msb->need_flush_cache = true;
1499	queue_work(msb->io_queue, &msb->io_work);
1500}
1501
1502
1503static void msb_cache_discard(struct msb_data *msb)
1504{
1505	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1506		return;
1507
1508	del_timer_sync(&msb->cache_flush_timer);
1509
1510	dbg_verbose("Discarding the write cache");
1511	msb->cache_block_lba = MS_BLOCK_INVALID;
1512	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1513}
1514
1515static int msb_cache_init(struct msb_data *msb)
1516{
1517	timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
 
1518
1519	if (!msb->cache)
1520		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1521	if (!msb->cache)
1522		return -ENOMEM;
1523
1524	msb_cache_discard(msb);
1525	return 0;
1526}
1527
1528static int msb_cache_flush(struct msb_data *msb)
1529{
1530	struct scatterlist sg;
1531	struct ms_extra_data_register extra;
1532	int page, offset, error;
1533	u16 pba, lba;
1534
1535	if (msb->read_only)
1536		return -EROFS;
1537
1538	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1539		return 0;
1540
1541	lba = msb->cache_block_lba;
1542	pba = msb->lba_to_pba_table[lba];
1543
1544	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1545						pba, msb->cache_block_lba);
1546
1547	sg_init_one(&sg, msb->cache , msb->block_size);
1548
1549	/* Read all missing pages in cache */
1550	for (page = 0; page < msb->pages_in_block; page++) {
1551
1552		if (test_bit(page, &msb->valid_cache_bitmap))
1553			continue;
1554
1555		offset = page * msb->page_size;
1556
1557		dbg_verbose("reading non-present sector %d of cache block %d",
1558			page, lba);
1559		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1560
1561		/* Bad pages are copied with 00 page status */
1562		if (error == -EBADMSG) {
1563			pr_err("read error on sector %d, contents probably damaged", page);
1564			continue;
1565		}
1566
1567		if (error)
1568			return error;
1569
1570		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1571							MEMSTICK_OV_PG_NORMAL) {
1572			dbg("page %d is marked as bad", page);
1573			continue;
1574		}
1575
1576		set_bit(page, &msb->valid_cache_bitmap);
1577	}
1578
1579	/* Write the cache now */
1580	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1581	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1582
1583	/* Mark invalid pages */
1584	if (!error) {
1585		for (page = 0; page < msb->pages_in_block; page++) {
1586
1587			if (test_bit(page, &msb->valid_cache_bitmap))
1588				continue;
1589
1590			dbg("marking page %d as containing damaged data",
1591				page);
1592			msb_set_overwrite_flag(msb,
1593				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1594		}
1595	}
1596
1597	msb_cache_discard(msb);
1598	return error;
1599}
1600
1601static int msb_cache_write(struct msb_data *msb, int lba,
1602	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1603{
1604	int error;
1605	struct scatterlist sg_tmp[10];
1606
1607	if (msb->read_only)
1608		return -EROFS;
1609
1610	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1611						lba != msb->cache_block_lba)
1612		if (add_to_cache_only)
1613			return 0;
1614
1615	/* If we need to write different block */
1616	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1617						lba != msb->cache_block_lba) {
1618		dbg_verbose("first flush the cache");
1619		error = msb_cache_flush(msb);
1620		if (error)
1621			return error;
1622	}
1623
1624	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1625		msb->cache_block_lba  = lba;
1626		mod_timer(&msb->cache_flush_timer,
1627			jiffies + msecs_to_jiffies(cache_flush_timeout));
1628	}
1629
1630	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1631
1632	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1633	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1634
1635	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1636		msb->cache + page * msb->page_size, msb->page_size);
1637
1638	set_bit(page, &msb->valid_cache_bitmap);
1639	return 0;
1640}
1641
1642static int msb_cache_read(struct msb_data *msb, int lba,
1643				int page, struct scatterlist *sg, int offset)
1644{
1645	int pba = msb->lba_to_pba_table[lba];
1646	struct scatterlist sg_tmp[10];
1647	int error = 0;
1648
1649	if (lba == msb->cache_block_lba &&
1650			test_bit(page, &msb->valid_cache_bitmap)) {
1651
1652		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1653							lba, pba, page);
1654
1655		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1656		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1657			offset, msb->page_size);
1658		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1659			msb->cache + msb->page_size * page,
1660							msb->page_size);
1661	} else {
1662		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1663							lba, pba, page);
1664
1665		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1666		if (error)
1667			return error;
1668
1669		msb_cache_write(msb, lba, page, true, sg, offset);
1670	}
1671	return error;
1672}
1673
1674/* Emulated geometry table
1675 * This table content isn't that importaint,
1676 * One could put here different values, providing that they still
1677 * cover whole disk.
1678 * 64 MB entry is what windows reports for my 64M memstick */
1679
1680static const struct chs_entry chs_table[] = {
1681/*        size sectors cylynders  heads */
1682	{ 4,    16,    247,       2  },
1683	{ 8,    16,    495,       2  },
1684	{ 16,   16,    495,       4  },
1685	{ 32,   16,    991,       4  },
1686	{ 64,   16,    991,       8  },
1687	{128,   16,    991,       16 },
1688	{ 0 }
1689};
1690
1691/* Load information about the card */
1692static int msb_init_card(struct memstick_dev *card)
1693{
1694	struct msb_data *msb = memstick_get_drvdata(card);
1695	struct memstick_host *host = card->host;
1696	struct ms_boot_page *boot_block;
1697	int error = 0, i, raw_size_in_megs;
1698
1699	msb->caps = 0;
1700
1701	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1702				card->id.class <= MEMSTICK_CLASS_ROM)
1703		msb->read_only = true;
1704
1705	msb->state = -1;
1706	error = msb_reset(msb, false);
1707	if (error)
1708		return error;
1709
1710	/* Due to a bug in Jmicron driver written by Alex Dubov,
1711	 its serial mode barely works,
1712	 so we switch to parallel mode right away */
1713	if (host->caps & MEMSTICK_CAP_PAR4)
1714		msb_switch_to_parallel(msb);
1715
1716	msb->page_size = sizeof(struct ms_boot_page);
1717
1718	/* Read the boot page */
1719	error = msb_read_boot_blocks(msb);
1720	if (error)
1721		return -EIO;
1722
1723	boot_block = &msb->boot_page[0];
1724
1725	/* Save intersting attributes from boot page */
1726	msb->block_count = boot_block->attr.number_of_blocks;
1727	msb->page_size = boot_block->attr.page_size;
1728
1729	msb->pages_in_block = boot_block->attr.block_size * 2;
1730	msb->block_size = msb->page_size * msb->pages_in_block;
1731
1732	if (msb->page_size > PAGE_SIZE) {
1733		/* this isn't supported by linux at all, anyway*/
1734		dbg("device page %d size isn't supported", msb->page_size);
1735		return -EINVAL;
1736	}
1737
1738	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1739	if (!msb->block_buffer)
1740		return -ENOMEM;
1741
1742	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1743
1744	for (i = 0; chs_table[i].size; i++) {
1745
1746		if (chs_table[i].size != raw_size_in_megs)
1747			continue;
1748
1749		msb->geometry.cylinders = chs_table[i].cyl;
1750		msb->geometry.heads = chs_table[i].head;
1751		msb->geometry.sectors = chs_table[i].sec;
1752		break;
1753	}
1754
1755	if (boot_block->attr.transfer_supporting == 1)
1756		msb->caps |= MEMSTICK_CAP_PAR4;
1757
1758	if (boot_block->attr.device_type & 0x03)
1759		msb->read_only = true;
1760
1761	dbg("Total block count = %d", msb->block_count);
1762	dbg("Each block consists of %d pages", msb->pages_in_block);
1763	dbg("Page size = %d bytes", msb->page_size);
1764	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1765	dbg("Read only: %d", msb->read_only);
1766
1767#if 0
1768	/* Now we can switch the interface */
1769	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1770		msb_switch_to_parallel(msb);
1771#endif
1772
1773	error = msb_cache_init(msb);
1774	if (error)
1775		return error;
1776
1777	error = msb_ftl_initialize(msb);
1778	if (error)
1779		return error;
1780
1781
1782	/* Read the bad block table */
1783	error = msb_read_bad_block_table(msb, 0);
1784
1785	if (error && error != -ENOMEM) {
1786		dbg("failed to read bad block table from primary boot block, trying from backup");
1787		error = msb_read_bad_block_table(msb, 1);
1788	}
1789
1790	if (error)
1791		return error;
1792
1793	/* *drum roll* Scan the media */
1794	error = msb_ftl_scan(msb);
1795	if (error) {
1796		pr_err("Scan of media failed");
1797		return error;
1798	}
1799
1800	return 0;
1801
1802}
1803
1804static int msb_do_write_request(struct msb_data *msb, int lba,
1805	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1806{
1807	int error = 0;
1808	off_t offset = 0;
1809	*sucessfuly_written = 0;
1810
1811	while (offset < len) {
1812		if (page == 0 && len - offset >= msb->block_size) {
1813
1814			if (msb->cache_block_lba == lba)
1815				msb_cache_discard(msb);
1816
1817			dbg_verbose("Writing whole lba %d", lba);
1818			error = msb_update_block(msb, lba, sg, offset);
1819			if (error)
1820				return error;
1821
1822			offset += msb->block_size;
1823			*sucessfuly_written += msb->block_size;
1824			lba++;
1825			continue;
1826		}
1827
1828		error = msb_cache_write(msb, lba, page, false, sg, offset);
1829		if (error)
1830			return error;
1831
1832		offset += msb->page_size;
1833		*sucessfuly_written += msb->page_size;
1834
1835		page++;
1836		if (page == msb->pages_in_block) {
1837			page = 0;
1838			lba++;
1839		}
1840	}
1841	return 0;
1842}
1843
1844static int msb_do_read_request(struct msb_data *msb, int lba,
1845		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1846{
1847	int error = 0;
1848	int offset = 0;
1849	*sucessfuly_read = 0;
1850
1851	while (offset < len) {
1852
1853		error = msb_cache_read(msb, lba, page, sg, offset);
1854		if (error)
1855			return error;
1856
1857		offset += msb->page_size;
1858		*sucessfuly_read += msb->page_size;
1859
1860		page++;
1861		if (page == msb->pages_in_block) {
1862			page = 0;
1863			lba++;
1864		}
1865	}
1866	return 0;
1867}
1868
1869static void msb_io_work(struct work_struct *work)
1870{
1871	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1872	int page, error, len;
1873	sector_t lba;
1874	unsigned long flags;
1875	struct scatterlist *sg = msb->prealloc_sg;
1876
1877	dbg_verbose("IO: work started");
1878
1879	while (1) {
1880		spin_lock_irqsave(&msb->q_lock, flags);
1881
1882		if (msb->need_flush_cache) {
1883			msb->need_flush_cache = false;
1884			spin_unlock_irqrestore(&msb->q_lock, flags);
1885			msb_cache_flush(msb);
1886			continue;
1887		}
1888
1889		if (!msb->req) {
1890			msb->req = blk_fetch_request(msb->queue);
1891			if (!msb->req) {
1892				dbg_verbose("IO: no more requests exiting");
1893				spin_unlock_irqrestore(&msb->q_lock, flags);
1894				return;
1895			}
1896		}
1897
1898		spin_unlock_irqrestore(&msb->q_lock, flags);
1899
1900		/* If card was removed meanwhile */
1901		if (!msb->req)
1902			return;
1903
1904		/* process the request */
1905		dbg_verbose("IO: processing new request");
1906		blk_rq_map_sg(msb->queue, msb->req, sg);
1907
1908		lba = blk_rq_pos(msb->req);
1909
1910		sector_div(lba, msb->page_size / 512);
1911		page = sector_div(lba, msb->pages_in_block);
1912
1913		if (rq_data_dir(msb->req) == READ)
1914			error = msb_do_read_request(msb, lba, page, sg,
1915				blk_rq_bytes(msb->req), &len);
1916		else
1917			error = msb_do_write_request(msb, lba, page, sg,
1918				blk_rq_bytes(msb->req), &len);
1919
1920		spin_lock_irqsave(&msb->q_lock, flags);
1921
1922		if (len)
1923			if (!__blk_end_request(msb->req, BLK_STS_OK, len))
1924				msb->req = NULL;
1925
1926		if (error && msb->req) {
1927			blk_status_t ret = errno_to_blk_status(error);
1928			dbg_verbose("IO: ending one sector of the request with error");
1929			if (!__blk_end_request(msb->req, ret, msb->page_size))
1930				msb->req = NULL;
1931		}
1932
1933		if (msb->req)
1934			dbg_verbose("IO: request still pending");
1935
1936		spin_unlock_irqrestore(&msb->q_lock, flags);
1937	}
1938}
1939
1940static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1941static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1942
1943static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1944{
1945	struct gendisk *disk = bdev->bd_disk;
1946	struct msb_data *msb = disk->private_data;
1947
1948	dbg_verbose("block device open");
1949
1950	mutex_lock(&msb_disk_lock);
1951
1952	if (msb && msb->card)
1953		msb->usage_count++;
1954
1955	mutex_unlock(&msb_disk_lock);
1956	return 0;
1957}
1958
1959static void msb_data_clear(struct msb_data *msb)
1960{
1961	kfree(msb->boot_page);
1962	kfree(msb->used_blocks_bitmap);
1963	kfree(msb->lba_to_pba_table);
1964	kfree(msb->cache);
1965	msb->card = NULL;
1966}
1967
1968static int msb_disk_release(struct gendisk *disk)
1969{
1970	struct msb_data *msb = disk->private_data;
1971
1972	dbg_verbose("block device release");
1973	mutex_lock(&msb_disk_lock);
1974
1975	if (msb) {
1976		if (msb->usage_count)
1977			msb->usage_count--;
1978
1979		if (!msb->usage_count) {
1980			disk->private_data = NULL;
1981			idr_remove(&msb_disk_idr, msb->disk_id);
1982			put_disk(disk);
1983			kfree(msb);
1984		}
1985	}
1986	mutex_unlock(&msb_disk_lock);
1987	return 0;
1988}
1989
1990static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1991{
1992	msb_disk_release(disk);
1993}
1994
1995static int msb_bd_getgeo(struct block_device *bdev,
1996				 struct hd_geometry *geo)
1997{
1998	struct msb_data *msb = bdev->bd_disk->private_data;
1999	*geo = msb->geometry;
2000	return 0;
2001}
2002
 
 
 
 
 
 
 
 
 
 
 
2003static void msb_submit_req(struct request_queue *q)
2004{
2005	struct memstick_dev *card = q->queuedata;
2006	struct msb_data *msb = memstick_get_drvdata(card);
2007	struct request *req = NULL;
2008
2009	dbg_verbose("Submit request");
2010
2011	if (msb->card_dead) {
2012		dbg("Refusing requests on removed card");
2013
2014		WARN_ON(!msb->io_queue_stopped);
2015
2016		while ((req = blk_fetch_request(q)) != NULL)
2017			__blk_end_request_all(req, BLK_STS_IOERR);
2018		return;
2019	}
2020
2021	if (msb->req)
2022		return;
2023
2024	if (!msb->io_queue_stopped)
2025		queue_work(msb->io_queue, &msb->io_work);
2026}
2027
2028static int msb_check_card(struct memstick_dev *card)
2029{
2030	struct msb_data *msb = memstick_get_drvdata(card);
2031	return (msb->card_dead == 0);
2032}
2033
2034static void msb_stop(struct memstick_dev *card)
2035{
2036	struct msb_data *msb = memstick_get_drvdata(card);
2037	unsigned long flags;
2038
2039	dbg("Stopping all msblock IO");
2040
2041	spin_lock_irqsave(&msb->q_lock, flags);
2042	blk_stop_queue(msb->queue);
2043	msb->io_queue_stopped = true;
2044	spin_unlock_irqrestore(&msb->q_lock, flags);
2045
2046	del_timer_sync(&msb->cache_flush_timer);
2047	flush_workqueue(msb->io_queue);
2048
2049	if (msb->req) {
2050		spin_lock_irqsave(&msb->q_lock, flags);
2051		blk_requeue_request(msb->queue, msb->req);
2052		msb->req = NULL;
2053		spin_unlock_irqrestore(&msb->q_lock, flags);
2054	}
2055
2056}
2057
2058static void msb_start(struct memstick_dev *card)
2059{
2060	struct msb_data *msb = memstick_get_drvdata(card);
2061	unsigned long flags;
2062
2063	dbg("Resuming IO from msblock");
2064
2065	msb_invalidate_reg_window(msb);
2066
2067	spin_lock_irqsave(&msb->q_lock, flags);
2068	if (!msb->io_queue_stopped || msb->card_dead) {
2069		spin_unlock_irqrestore(&msb->q_lock, flags);
2070		return;
2071	}
2072	spin_unlock_irqrestore(&msb->q_lock, flags);
2073
2074	/* Kick cache flush anyway, its harmless */
2075	msb->need_flush_cache = true;
2076	msb->io_queue_stopped = false;
2077
2078	spin_lock_irqsave(&msb->q_lock, flags);
2079	blk_start_queue(msb->queue);
2080	spin_unlock_irqrestore(&msb->q_lock, flags);
2081
2082	queue_work(msb->io_queue, &msb->io_work);
2083
2084}
2085
2086static const struct block_device_operations msb_bdops = {
2087	.open    = msb_bd_open,
2088	.release = msb_bd_release,
2089	.getgeo  = msb_bd_getgeo,
2090	.owner   = THIS_MODULE
2091};
2092
2093/* Registers the block device */
2094static int msb_init_disk(struct memstick_dev *card)
2095{
2096	struct msb_data *msb = memstick_get_drvdata(card);
2097	struct memstick_host *host = card->host;
2098	int rc;
2099	u64 limit = BLK_BOUNCE_HIGH;
2100	unsigned long capacity;
2101
2102	if (host->dev.dma_mask && *(host->dev.dma_mask))
2103		limit = *(host->dev.dma_mask);
2104
2105	mutex_lock(&msb_disk_lock);
2106	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2107	mutex_unlock(&msb_disk_lock);
2108
2109	if (msb->disk_id  < 0)
2110		return msb->disk_id;
2111
2112	msb->disk = alloc_disk(0);
2113	if (!msb->disk) {
2114		rc = -ENOMEM;
2115		goto out_release_id;
2116	}
2117
2118	msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2119	if (!msb->queue) {
2120		rc = -ENOMEM;
2121		goto out_put_disk;
2122	}
2123
2124	msb->queue->queuedata = card;
 
2125
2126	blk_queue_bounce_limit(msb->queue, limit);
2127	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2128	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2129	blk_queue_max_segment_size(msb->queue,
2130				   MS_BLOCK_MAX_PAGES * msb->page_size);
2131	blk_queue_logical_block_size(msb->queue, msb->page_size);
2132
2133	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2134	msb->disk->fops = &msb_bdops;
2135	msb->disk->private_data = msb;
2136	msb->disk->queue = msb->queue;
 
2137	msb->disk->flags |= GENHD_FL_EXT_DEVT;
2138
2139	capacity = msb->pages_in_block * msb->logical_block_count;
2140	capacity *= (msb->page_size / 512);
2141	set_capacity(msb->disk, capacity);
2142	dbg("Set total disk size to %lu sectors", capacity);
2143
2144	msb->usage_count = 1;
2145	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2146	INIT_WORK(&msb->io_work, msb_io_work);
2147	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2148
2149	if (msb->read_only)
2150		set_disk_ro(msb->disk, 1);
2151
2152	msb_start(card);
2153	device_add_disk(&card->dev, msb->disk);
2154	dbg("Disk added");
2155	return 0;
2156
2157out_put_disk:
2158	put_disk(msb->disk);
2159out_release_id:
2160	mutex_lock(&msb_disk_lock);
2161	idr_remove(&msb_disk_idr, msb->disk_id);
2162	mutex_unlock(&msb_disk_lock);
2163	return rc;
2164}
2165
2166static int msb_probe(struct memstick_dev *card)
2167{
2168	struct msb_data *msb;
2169	int rc = 0;
2170
2171	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2172	if (!msb)
2173		return -ENOMEM;
2174	memstick_set_drvdata(card, msb);
2175	msb->card = card;
2176	spin_lock_init(&msb->q_lock);
2177
2178	rc = msb_init_card(card);
2179	if (rc)
2180		goto out_free;
2181
2182	rc = msb_init_disk(card);
2183	if (!rc) {
2184		card->check = msb_check_card;
2185		card->stop = msb_stop;
2186		card->start = msb_start;
2187		return 0;
2188	}
2189out_free:
2190	memstick_set_drvdata(card, NULL);
2191	msb_data_clear(msb);
2192	kfree(msb);
2193	return rc;
2194}
2195
2196static void msb_remove(struct memstick_dev *card)
2197{
2198	struct msb_data *msb = memstick_get_drvdata(card);
2199	unsigned long flags;
2200
2201	if (!msb->io_queue_stopped)
2202		msb_stop(card);
2203
2204	dbg("Removing the disk device");
2205
2206	/* Take care of unhandled + new requests from now on */
2207	spin_lock_irqsave(&msb->q_lock, flags);
2208	msb->card_dead = true;
2209	blk_start_queue(msb->queue);
2210	spin_unlock_irqrestore(&msb->q_lock, flags);
2211
2212	/* Remove the disk */
2213	del_gendisk(msb->disk);
2214	blk_cleanup_queue(msb->queue);
2215	msb->queue = NULL;
2216
2217	mutex_lock(&msb_disk_lock);
2218	msb_data_clear(msb);
2219	mutex_unlock(&msb_disk_lock);
2220
2221	msb_disk_release(msb->disk);
2222	memstick_set_drvdata(card, NULL);
2223}
2224
2225#ifdef CONFIG_PM
2226
2227static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2228{
2229	msb_stop(card);
2230	return 0;
2231}
2232
2233static int msb_resume(struct memstick_dev *card)
2234{
2235	struct msb_data *msb = memstick_get_drvdata(card);
2236	struct msb_data *new_msb = NULL;
2237	bool card_dead = true;
2238
2239#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2240	msb->card_dead = true;
2241	return 0;
2242#endif
2243	mutex_lock(&card->host->lock);
2244
2245	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2246	if (!new_msb)
2247		goto out;
2248
2249	new_msb->card = card;
2250	memstick_set_drvdata(card, new_msb);
2251	spin_lock_init(&new_msb->q_lock);
2252	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2253
2254	if (msb_init_card(card))
2255		goto out;
2256
2257	if (msb->block_size != new_msb->block_size)
2258		goto out;
2259
2260	if (memcmp(msb->boot_page, new_msb->boot_page,
2261					sizeof(struct ms_boot_page)))
2262		goto out;
2263
2264	if (msb->logical_block_count != new_msb->logical_block_count ||
2265		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2266						msb->logical_block_count))
2267		goto out;
2268
2269	if (msb->block_count != new_msb->block_count ||
2270		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2271							msb->block_count / 8))
2272		goto out;
2273
2274	card_dead = false;
2275out:
2276	if (card_dead)
2277		dbg("Card was removed/replaced during suspend");
2278
2279	msb->card_dead = card_dead;
2280	memstick_set_drvdata(card, msb);
2281
2282	if (new_msb) {
2283		msb_data_clear(new_msb);
2284		kfree(new_msb);
2285	}
2286
2287	msb_start(card);
2288	mutex_unlock(&card->host->lock);
2289	return 0;
2290}
2291#else
2292
2293#define msb_suspend NULL
2294#define msb_resume NULL
2295
2296#endif /* CONFIG_PM */
2297
2298static struct memstick_device_id msb_id_tbl[] = {
2299	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2300	 MEMSTICK_CLASS_FLASH},
2301
2302	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2303	 MEMSTICK_CLASS_ROM},
2304
2305	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2306	 MEMSTICK_CLASS_RO},
2307
2308	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2309	 MEMSTICK_CLASS_WP},
2310
2311	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2312	 MEMSTICK_CLASS_DUO},
2313	{}
2314};
2315MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2316
2317
2318static struct memstick_driver msb_driver = {
2319	.driver = {
2320		.name  = DRIVER_NAME,
2321		.owner = THIS_MODULE
2322	},
2323	.id_table = msb_id_tbl,
2324	.probe    = msb_probe,
2325	.remove   = msb_remove,
2326	.suspend  = msb_suspend,
2327	.resume   = msb_resume
2328};
2329
 
 
2330static int __init msb_init(void)
2331{
2332	int rc = memstick_register_driver(&msb_driver);
2333	if (rc)
 
 
 
 
 
 
 
 
 
2334		pr_err("failed to register memstick driver (error %d)\n", rc);
 
2335
2336	return rc;
2337}
2338
2339static void __exit msb_exit(void)
2340{
2341	memstick_unregister_driver(&msb_driver);
 
2342	idr_destroy(&msb_disk_idr);
2343}
2344
2345module_init(msb_init);
2346module_exit(msb_exit);
2347
2348module_param(cache_flush_timeout, int, S_IRUGO);
2349MODULE_PARM_DESC(cache_flush_timeout,
2350				"Cache flush timeout in msec (1000 default)");
2351module_param(debug, int, S_IRUGO | S_IWUSR);
2352MODULE_PARM_DESC(debug, "Debug level (0-2)");
2353
2354module_param(verify_writes, bool, S_IRUGO);
2355MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2356
2357MODULE_LICENSE("GPL");
2358MODULE_AUTHOR("Maxim Levitsky");
2359MODULE_DESCRIPTION("Sony MemoryStick block device driver");