Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  ms_block.c - Sony MemoryStick (legacy) storage support
   4
   5 *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
   6 *
   7 * Minor portions of the driver were copied from mspro_block.c which is
   8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
   9 */
  10#define DRIVER_NAME "ms_block"
  11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  12
  13#include <linux/module.h>
  14#include <linux/blk-mq.h>
  15#include <linux/memstick.h>
  16#include <linux/idr.h>
  17#include <linux/hdreg.h>
  18#include <linux/delay.h>
  19#include <linux/slab.h>
  20#include <linux/random.h>
  21#include <linux/bitmap.h>
  22#include <linux/scatterlist.h>
  23#include <linux/jiffies.h>
  24#include <linux/workqueue.h>
  25#include <linux/mutex.h>
  26#include "ms_block.h"
  27
  28static int debug;
  29static int cache_flush_timeout = 1000;
  30static bool verify_writes;
  31
  32/*
  33 * Copies section of 'sg_from' starting from offset 'offset' and with length
  34 * 'len' To another scatterlist of to_nents enties
  35 */
  36static size_t msb_sg_copy(struct scatterlist *sg_from,
  37	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  38{
  39	size_t copied = 0;
  40
  41	while (offset > 0) {
  42		if (offset >= sg_from->length) {
  43			if (sg_is_last(sg_from))
  44				return 0;
  45
  46			offset -= sg_from->length;
  47			sg_from = sg_next(sg_from);
  48			continue;
  49		}
  50
  51		copied = min(len, sg_from->length - offset);
  52		sg_set_page(sg_to, sg_page(sg_from),
  53			copied, sg_from->offset + offset);
  54
  55		len -= copied;
  56		offset = 0;
  57
  58		if (sg_is_last(sg_from) || !len)
  59			goto out;
  60
  61		sg_to = sg_next(sg_to);
  62		to_nents--;
  63		sg_from = sg_next(sg_from);
  64	}
  65
  66	while (len > sg_from->length && to_nents--) {
  67		len -= sg_from->length;
  68		copied += sg_from->length;
  69
  70		sg_set_page(sg_to, sg_page(sg_from),
  71				sg_from->length, sg_from->offset);
  72
  73		if (sg_is_last(sg_from) || !len)
  74			goto out;
  75
  76		sg_from = sg_next(sg_from);
  77		sg_to = sg_next(sg_to);
  78	}
  79
  80	if (len && to_nents) {
  81		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  82		copied += len;
  83	}
  84out:
  85	sg_mark_end(sg_to);
  86	return copied;
  87}
  88
  89/*
  90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  91 * to linear buffer of length 'len' at address 'buffer'
  92 * Returns 0 if equal and  -1 otherwice
  93 */
  94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  95					size_t offset, u8 *buffer, size_t len)
  96{
  97	int retval = 0, cmplen;
  98	struct sg_mapping_iter miter;
  99
 100	sg_miter_start(&miter, sg, sg_nents(sg),
 101					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 102
 103	while (sg_miter_next(&miter) && len > 0) {
 104		if (offset >= miter.length) {
 105			offset -= miter.length;
 106			continue;
 107		}
 108
 109		cmplen = min(miter.length - offset, len);
 110		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
 111		if (retval)
 112			break;
 113
 114		buffer += cmplen;
 115		len -= cmplen;
 116		offset = 0;
 117	}
 118
 119	if (!retval && len)
 120		retval = -1;
 121
 122	sg_miter_stop(&miter);
 123	return retval;
 124}
 125
 126
 127/* Get zone at which block with logical address 'lba' lives
 128 * Flash is broken into zones.
 129 * Each zone consists of 512 eraseblocks, out of which in first
 130 * zone 494 are used and 496 are for all following zones.
 131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
 132*/
 133static int msb_get_zone_from_lba(int lba)
 134{
 135	if (lba < 494)
 136		return 0;
 137	return ((lba - 494) / 496) + 1;
 138}
 139
 140/* Get zone of physical block. Trivial */
 141static int msb_get_zone_from_pba(int pba)
 142{
 143	return pba / MS_BLOCKS_IN_ZONE;
 144}
 145
 146/* Debug test to validate free block counts */
 147static int msb_validate_used_block_bitmap(struct msb_data *msb)
 148{
 149	int total_free_blocks = 0;
 150	int i;
 151
 152	if (!debug)
 153		return 0;
 154
 155	for (i = 0; i < msb->zone_count; i++)
 156		total_free_blocks += msb->free_block_count[i];
 157
 158	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
 159					msb->block_count) == total_free_blocks)
 160		return 0;
 161
 162	pr_err("BUG: free block counts don't match the bitmap");
 163	msb->read_only = true;
 164	return -EINVAL;
 165}
 166
 167/* Mark physical block as used */
 168static void msb_mark_block_used(struct msb_data *msb, int pba)
 169{
 170	int zone = msb_get_zone_from_pba(pba);
 171
 172	if (test_bit(pba, msb->used_blocks_bitmap)) {
 173		pr_err(
 174		"BUG: attempt to mark already used pba %d as used", pba);
 175		msb->read_only = true;
 176		return;
 177	}
 178
 179	if (msb_validate_used_block_bitmap(msb))
 180		return;
 181
 182	/* No races because all IO is single threaded */
 183	__set_bit(pba, msb->used_blocks_bitmap);
 184	msb->free_block_count[zone]--;
 185}
 186
 187/* Mark physical block as free */
 188static void msb_mark_block_unused(struct msb_data *msb, int pba)
 189{
 190	int zone = msb_get_zone_from_pba(pba);
 191
 192	if (!test_bit(pba, msb->used_blocks_bitmap)) {
 193		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
 194		msb->read_only = true;
 195		return;
 196	}
 197
 198	if (msb_validate_used_block_bitmap(msb))
 199		return;
 200
 201	/* No races because all IO is single threaded */
 202	__clear_bit(pba, msb->used_blocks_bitmap);
 203	msb->free_block_count[zone]++;
 204}
 205
 206/* Invalidate current register window */
 207static void msb_invalidate_reg_window(struct msb_data *msb)
 208{
 209	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
 210	msb->reg_addr.w_length = sizeof(struct ms_id_register);
 211	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
 212	msb->reg_addr.r_length = sizeof(struct ms_id_register);
 213	msb->addr_valid = false;
 214}
 215
 216/* Start a state machine */
 217static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
 218		(struct memstick_dev *card, struct memstick_request **req))
 219{
 220	struct memstick_dev *card = msb->card;
 221
 222	WARN_ON(msb->state != -1);
 223	msb->int_polling = false;
 224	msb->state = 0;
 225	msb->exit_error = 0;
 226
 227	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
 228
 229	card->next_request = state_func;
 230	memstick_new_req(card->host);
 231	wait_for_completion(&card->mrq_complete);
 232
 233	WARN_ON(msb->state != -1);
 234	return msb->exit_error;
 235}
 236
 237/* State machines call that to exit */
 238static int msb_exit_state_machine(struct msb_data *msb, int error)
 239{
 240	WARN_ON(msb->state == -1);
 241
 242	msb->state = -1;
 243	msb->exit_error = error;
 244	msb->card->next_request = h_msb_default_bad;
 245
 246	/* Invalidate reg window on errors */
 247	if (error)
 248		msb_invalidate_reg_window(msb);
 249
 250	complete(&msb->card->mrq_complete);
 251	return -ENXIO;
 252}
 253
 254/* read INT register */
 255static int msb_read_int_reg(struct msb_data *msb, long timeout)
 256{
 257	struct memstick_request *mrq = &msb->card->current_mrq;
 258
 259	WARN_ON(msb->state == -1);
 260
 261	if (!msb->int_polling) {
 262		msb->int_timeout = jiffies +
 263			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
 264		msb->int_polling = true;
 265	} else if (time_after(jiffies, msb->int_timeout)) {
 266		mrq->data[0] = MEMSTICK_INT_CMDNAK;
 267		return 0;
 268	}
 269
 270	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
 271				mrq->need_card_int && !mrq->error) {
 272		mrq->data[0] = mrq->int_reg;
 273		mrq->need_card_int = false;
 274		return 0;
 275	} else {
 276		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 277		return 1;
 278	}
 279}
 280
 281/* Read a register */
 282static int msb_read_regs(struct msb_data *msb, int offset, int len)
 283{
 284	struct memstick_request *req = &msb->card->current_mrq;
 285
 286	if (msb->reg_addr.r_offset != offset ||
 287	    msb->reg_addr.r_length != len || !msb->addr_valid) {
 288
 289		msb->reg_addr.r_offset = offset;
 290		msb->reg_addr.r_length = len;
 291		msb->addr_valid = true;
 292
 293		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 294			&msb->reg_addr, sizeof(msb->reg_addr));
 295		return 0;
 296	}
 297
 298	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
 299	return 1;
 300}
 301
 302/* Write a card register */
 303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
 304{
 305	struct memstick_request *req = &msb->card->current_mrq;
 306
 307	if (msb->reg_addr.w_offset != offset ||
 308		msb->reg_addr.w_length != len  || !msb->addr_valid) {
 309
 310		msb->reg_addr.w_offset = offset;
 311		msb->reg_addr.w_length = len;
 312		msb->addr_valid = true;
 313
 314		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 315			&msb->reg_addr, sizeof(msb->reg_addr));
 316		return 0;
 317	}
 318
 319	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
 320	return 1;
 321}
 322
 323/* Handler for absence of IO */
 324static int h_msb_default_bad(struct memstick_dev *card,
 325						struct memstick_request **mrq)
 326{
 327	return -ENXIO;
 328}
 329
 330/*
 331 * This function is a handler for reads of one page from device.
 332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
 333 * Can also be used to read extra data only. Set params accordintly.
 334 */
 335static int h_msb_read_page(struct memstick_dev *card,
 336					struct memstick_request **out_mrq)
 337{
 338	struct msb_data *msb = memstick_get_drvdata(card);
 339	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 340	struct scatterlist sg[2];
 341	u8 command, intreg;
 342
 343	if (mrq->error) {
 344		dbg("read_page, unknown error");
 345		return msb_exit_state_machine(msb, mrq->error);
 346	}
 347again:
 348	switch (msb->state) {
 349	case MSB_RP_SEND_BLOCK_ADDRESS:
 350		/* msb_write_regs sometimes "fails" because it needs to update
 351			the reg window, and thus it returns request for that.
 352			Then we stay in this state and retry */
 353		if (!msb_write_regs(msb,
 354			offsetof(struct ms_register, param),
 355			sizeof(struct ms_param_register),
 356			(unsigned char *)&msb->regs.param))
 357			return 0;
 358
 359		msb->state = MSB_RP_SEND_READ_COMMAND;
 360		return 0;
 361
 362	case MSB_RP_SEND_READ_COMMAND:
 363		command = MS_CMD_BLOCK_READ;
 364		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 365		msb->state = MSB_RP_SEND_INT_REQ;
 366		return 0;
 367
 368	case MSB_RP_SEND_INT_REQ:
 369		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
 370		/* If dont actually need to send the int read request (only in
 371			serial mode), then just fall through */
 372		if (msb_read_int_reg(msb, -1))
 373			return 0;
 374		/* fallthrough */
 375
 376	case MSB_RP_RECEIVE_INT_REQ_RESULT:
 377		intreg = mrq->data[0];
 378		msb->regs.status.interrupt = intreg;
 379
 380		if (intreg & MEMSTICK_INT_CMDNAK)
 381			return msb_exit_state_machine(msb, -EIO);
 382
 383		if (!(intreg & MEMSTICK_INT_CED)) {
 384			msb->state = MSB_RP_SEND_INT_REQ;
 385			goto again;
 386		}
 387
 388		msb->int_polling = false;
 389		msb->state = (intreg & MEMSTICK_INT_ERR) ?
 390			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
 391		goto again;
 392
 393	case MSB_RP_SEND_READ_STATUS_REG:
 394		 /* read the status register to understand source of the INT_ERR */
 395		if (!msb_read_regs(msb,
 396			offsetof(struct ms_register, status),
 397			sizeof(struct ms_status_register)))
 398			return 0;
 399
 400		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 401		return 0;
 402
 403	case MSB_RP_RECEIVE_STATUS_REG:
 404		msb->regs.status = *(struct ms_status_register *)mrq->data;
 405		msb->state = MSB_RP_SEND_OOB_READ;
 406		/* fallthrough */
 407
 408	case MSB_RP_SEND_OOB_READ:
 409		if (!msb_read_regs(msb,
 410			offsetof(struct ms_register, extra_data),
 411			sizeof(struct ms_extra_data_register)))
 412			return 0;
 413
 414		msb->state = MSB_RP_RECEIVE_OOB_READ;
 415		return 0;
 416
 417	case MSB_RP_RECEIVE_OOB_READ:
 418		msb->regs.extra_data =
 419			*(struct ms_extra_data_register *) mrq->data;
 420		msb->state = MSB_RP_SEND_READ_DATA;
 421		/* fallthrough */
 422
 423	case MSB_RP_SEND_READ_DATA:
 424		/* Skip that state if we only read the oob */
 425		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
 426			msb->state = MSB_RP_RECEIVE_READ_DATA;
 427			goto again;
 428		}
 429
 430		sg_init_table(sg, ARRAY_SIZE(sg));
 431		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 432			msb->current_sg_offset,
 433			msb->page_size);
 434
 435		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
 436		msb->state = MSB_RP_RECEIVE_READ_DATA;
 437		return 0;
 438
 439	case MSB_RP_RECEIVE_READ_DATA:
 440		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
 441			msb->current_sg_offset += msb->page_size;
 442			return msb_exit_state_machine(msb, 0);
 443		}
 444
 445		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
 446			dbg("read_page: uncorrectable error");
 447			return msb_exit_state_machine(msb, -EBADMSG);
 448		}
 449
 450		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
 451			dbg("read_page: correctable error");
 452			msb->current_sg_offset += msb->page_size;
 453			return msb_exit_state_machine(msb, -EUCLEAN);
 454		} else {
 455			dbg("read_page: INT error, but no status error bits");
 456			return msb_exit_state_machine(msb, -EIO);
 457		}
 458	}
 459
 460	BUG();
 461}
 462
 463/*
 464 * Handler of writes of exactly one block.
 465 * Takes address from msb->regs.param.
 466 * Writes same extra data to blocks, also taken
 467 * from msb->regs.extra
 468 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
 469 * device refuses to take the command or something else
 470 */
 471static int h_msb_write_block(struct memstick_dev *card,
 472					struct memstick_request **out_mrq)
 473{
 474	struct msb_data *msb = memstick_get_drvdata(card);
 475	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 476	struct scatterlist sg[2];
 477	u8 intreg, command;
 478
 479	if (mrq->error)
 480		return msb_exit_state_machine(msb, mrq->error);
 481
 482again:
 483	switch (msb->state) {
 484
 485	/* HACK: Jmicon handling of TPCs between 8 and
 486	 *	sizeof(memstick_request.data) is broken due to hardware
 487	 *	bug in PIO mode that is used for these TPCs
 488	 *	Therefore split the write
 489	 */
 490
 491	case MSB_WB_SEND_WRITE_PARAMS:
 492		if (!msb_write_regs(msb,
 493			offsetof(struct ms_register, param),
 494			sizeof(struct ms_param_register),
 495			&msb->regs.param))
 496			return 0;
 497
 498		msb->state = MSB_WB_SEND_WRITE_OOB;
 499		return 0;
 500
 501	case MSB_WB_SEND_WRITE_OOB:
 502		if (!msb_write_regs(msb,
 503			offsetof(struct ms_register, extra_data),
 504			sizeof(struct ms_extra_data_register),
 505			&msb->regs.extra_data))
 506			return 0;
 507		msb->state = MSB_WB_SEND_WRITE_COMMAND;
 508		return 0;
 509
 510
 511	case MSB_WB_SEND_WRITE_COMMAND:
 512		command = MS_CMD_BLOCK_WRITE;
 513		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 514		msb->state = MSB_WB_SEND_INT_REQ;
 515		return 0;
 516
 517	case MSB_WB_SEND_INT_REQ:
 518		msb->state = MSB_WB_RECEIVE_INT_REQ;
 519		if (msb_read_int_reg(msb, -1))
 520			return 0;
 521		/* fallthrough */
 522
 523	case MSB_WB_RECEIVE_INT_REQ:
 524		intreg = mrq->data[0];
 525		msb->regs.status.interrupt = intreg;
 526
 527		/* errors mean out of here, and fast... */
 528		if (intreg & (MEMSTICK_INT_CMDNAK))
 529			return msb_exit_state_machine(msb, -EIO);
 530
 531		if (intreg & MEMSTICK_INT_ERR)
 532			return msb_exit_state_machine(msb, -EBADMSG);
 533
 534
 535		/* for last page we need to poll CED */
 536		if (msb->current_page == msb->pages_in_block) {
 537			if (intreg & MEMSTICK_INT_CED)
 538				return msb_exit_state_machine(msb, 0);
 539			msb->state = MSB_WB_SEND_INT_REQ;
 540			goto again;
 541
 542		}
 543
 544		/* for non-last page we need BREQ before writing next chunk */
 545		if (!(intreg & MEMSTICK_INT_BREQ)) {
 546			msb->state = MSB_WB_SEND_INT_REQ;
 547			goto again;
 548		}
 549
 550		msb->int_polling = false;
 551		msb->state = MSB_WB_SEND_WRITE_DATA;
 552		/* fallthrough */
 553
 554	case MSB_WB_SEND_WRITE_DATA:
 555		sg_init_table(sg, ARRAY_SIZE(sg));
 556
 557		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 558			msb->current_sg_offset,
 559			msb->page_size) < msb->page_size)
 560			return msb_exit_state_machine(msb, -EIO);
 561
 562		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
 563		mrq->need_card_int = 1;
 564		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
 565		return 0;
 566
 567	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
 568		msb->current_page++;
 569		msb->current_sg_offset += msb->page_size;
 570		msb->state = MSB_WB_SEND_INT_REQ;
 571		goto again;
 572	default:
 573		BUG();
 574	}
 575
 576	return 0;
 577}
 578
 579/*
 580 * This function is used to send simple IO requests to device that consist
 581 * of register write + command
 582 */
 583static int h_msb_send_command(struct memstick_dev *card,
 584					struct memstick_request **out_mrq)
 585{
 586	struct msb_data *msb = memstick_get_drvdata(card);
 587	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 588	u8 intreg;
 589
 590	if (mrq->error) {
 591		dbg("send_command: unknown error");
 592		return msb_exit_state_machine(msb, mrq->error);
 593	}
 594again:
 595	switch (msb->state) {
 596
 597	/* HACK: see h_msb_write_block */
 598	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
 599		if (!msb_write_regs(msb,
 600			offsetof(struct ms_register, param),
 601			sizeof(struct ms_param_register),
 602			&msb->regs.param))
 603			return 0;
 604		msb->state = MSB_SC_SEND_WRITE_OOB;
 605		return 0;
 606
 607	case MSB_SC_SEND_WRITE_OOB:
 608		if (!msb->command_need_oob) {
 609			msb->state = MSB_SC_SEND_COMMAND;
 610			goto again;
 611		}
 612
 613		if (!msb_write_regs(msb,
 614			offsetof(struct ms_register, extra_data),
 615			sizeof(struct ms_extra_data_register),
 616			&msb->regs.extra_data))
 617			return 0;
 618
 619		msb->state = MSB_SC_SEND_COMMAND;
 620		return 0;
 621
 622	case MSB_SC_SEND_COMMAND:
 623		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
 624		msb->state = MSB_SC_SEND_INT_REQ;
 625		return 0;
 626
 627	case MSB_SC_SEND_INT_REQ:
 628		msb->state = MSB_SC_RECEIVE_INT_REQ;
 629		if (msb_read_int_reg(msb, -1))
 630			return 0;
 631		/* fallthrough */
 632
 633	case MSB_SC_RECEIVE_INT_REQ:
 634		intreg = mrq->data[0];
 635
 636		if (intreg & MEMSTICK_INT_CMDNAK)
 637			return msb_exit_state_machine(msb, -EIO);
 638		if (intreg & MEMSTICK_INT_ERR)
 639			return msb_exit_state_machine(msb, -EBADMSG);
 640
 641		if (!(intreg & MEMSTICK_INT_CED)) {
 642			msb->state = MSB_SC_SEND_INT_REQ;
 643			goto again;
 644		}
 645
 646		return msb_exit_state_machine(msb, 0);
 647	}
 648
 649	BUG();
 650}
 651
 652/* Small handler for card reset */
 653static int h_msb_reset(struct memstick_dev *card,
 654					struct memstick_request **out_mrq)
 655{
 656	u8 command = MS_CMD_RESET;
 657	struct msb_data *msb = memstick_get_drvdata(card);
 658	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 659
 660	if (mrq->error)
 661		return msb_exit_state_machine(msb, mrq->error);
 662
 663	switch (msb->state) {
 664	case MSB_RS_SEND:
 665		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 666		mrq->need_card_int = 0;
 667		msb->state = MSB_RS_CONFIRM;
 668		return 0;
 669	case MSB_RS_CONFIRM:
 670		return msb_exit_state_machine(msb, 0);
 671	}
 672	BUG();
 673}
 674
 675/* This handler is used to do serial->parallel switch */
 676static int h_msb_parallel_switch(struct memstick_dev *card,
 677					struct memstick_request **out_mrq)
 678{
 679	struct msb_data *msb = memstick_get_drvdata(card);
 680	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 681	struct memstick_host *host = card->host;
 682
 683	if (mrq->error) {
 684		dbg("parallel_switch: error");
 685		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 686		return msb_exit_state_machine(msb, mrq->error);
 687	}
 688
 689	switch (msb->state) {
 690	case MSB_PS_SEND_SWITCH_COMMAND:
 691		/* Set the parallel interface on memstick side */
 692		msb->regs.param.system |= MEMSTICK_SYS_PAM;
 693
 694		if (!msb_write_regs(msb,
 695			offsetof(struct ms_register, param),
 696			1,
 697			(unsigned char *)&msb->regs.param))
 698			return 0;
 699
 700		msb->state = MSB_PS_SWICH_HOST;
 701		return 0;
 702
 703	case MSB_PS_SWICH_HOST:
 704		 /* Set parallel interface on our side + send a dummy request
 705			to see if card responds */
 706		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
 707		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 708		msb->state = MSB_PS_CONFIRM;
 709		return 0;
 710
 711	case MSB_PS_CONFIRM:
 712		return msb_exit_state_machine(msb, 0);
 713	}
 714
 715	BUG();
 716}
 717
 718static int msb_switch_to_parallel(struct msb_data *msb);
 719
 720/* Reset the card, to guard against hw errors beeing treated as bad blocks */
 721static int msb_reset(struct msb_data *msb, bool full)
 722{
 723
 724	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
 725	struct memstick_dev *card = msb->card;
 726	struct memstick_host *host = card->host;
 727	int error;
 728
 729	/* Reset the card */
 730	msb->regs.param.system = MEMSTICK_SYS_BAMD;
 731
 732	if (full) {
 733		error =  host->set_param(host,
 734					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 735		if (error)
 736			goto out_error;
 737
 738		msb_invalidate_reg_window(msb);
 739
 740		error = host->set_param(host,
 741					MEMSTICK_POWER, MEMSTICK_POWER_ON);
 742		if (error)
 743			goto out_error;
 744
 745		error = host->set_param(host,
 746					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
 747		if (error) {
 748out_error:
 749			dbg("Failed to reset the host controller");
 750			msb->read_only = true;
 751			return -EFAULT;
 752		}
 753	}
 754
 755	error = msb_run_state_machine(msb, h_msb_reset);
 756	if (error) {
 757		dbg("Failed to reset the card");
 758		msb->read_only = true;
 759		return -ENODEV;
 760	}
 761
 762	/* Set parallel mode */
 763	if (was_parallel)
 764		msb_switch_to_parallel(msb);
 765	return 0;
 766}
 767
 768/* Attempts to switch interface to parallel mode */
 769static int msb_switch_to_parallel(struct msb_data *msb)
 770{
 771	int error;
 772
 773	error = msb_run_state_machine(msb, h_msb_parallel_switch);
 774	if (error) {
 775		pr_err("Switch to parallel failed");
 776		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 777		msb_reset(msb, true);
 778		return -EFAULT;
 779	}
 780
 781	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
 782	return 0;
 783}
 784
 785/* Changes overwrite flag on a page */
 786static int msb_set_overwrite_flag(struct msb_data *msb,
 787						u16 pba, u8 page, u8 flag)
 788{
 789	if (msb->read_only)
 790		return -EROFS;
 791
 792	msb->regs.param.block_address = cpu_to_be16(pba);
 793	msb->regs.param.page_address = page;
 794	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
 795	msb->regs.extra_data.overwrite_flag = flag;
 796	msb->command_value = MS_CMD_BLOCK_WRITE;
 797	msb->command_need_oob = true;
 798
 799	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
 800							flag, pba, page);
 801	return msb_run_state_machine(msb, h_msb_send_command);
 802}
 803
 804static int msb_mark_bad(struct msb_data *msb, int pba)
 805{
 806	pr_notice("marking pba %d as bad", pba);
 807	msb_reset(msb, true);
 808	return msb_set_overwrite_flag(
 809			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
 810}
 811
 812static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
 813{
 814	dbg("marking page %d of pba %d as bad", page, pba);
 815	msb_reset(msb, true);
 816	return msb_set_overwrite_flag(msb,
 817		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
 818}
 819
 820/* Erases one physical block */
 821static int msb_erase_block(struct msb_data *msb, u16 pba)
 822{
 823	int error, try;
 824	if (msb->read_only)
 825		return -EROFS;
 826
 827	dbg_verbose("erasing pba %d", pba);
 828
 829	for (try = 1; try < 3; try++) {
 830		msb->regs.param.block_address = cpu_to_be16(pba);
 831		msb->regs.param.page_address = 0;
 832		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
 833		msb->command_value = MS_CMD_BLOCK_ERASE;
 834		msb->command_need_oob = false;
 835
 836
 837		error = msb_run_state_machine(msb, h_msb_send_command);
 838		if (!error || msb_reset(msb, true))
 839			break;
 840	}
 841
 842	if (error) {
 843		pr_err("erase failed, marking pba %d as bad", pba);
 844		msb_mark_bad(msb, pba);
 845	}
 846
 847	dbg_verbose("erase success, marking pba %d as unused", pba);
 848	msb_mark_block_unused(msb, pba);
 849	__set_bit(pba, msb->erased_blocks_bitmap);
 850	return error;
 851}
 852
 853/* Reads one page from device */
 854static int msb_read_page(struct msb_data *msb,
 855	u16 pba, u8 page, struct ms_extra_data_register *extra,
 856					struct scatterlist *sg,  int offset)
 857{
 858	int try, error;
 859
 860	if (pba == MS_BLOCK_INVALID) {
 861		unsigned long flags;
 862		struct sg_mapping_iter miter;
 863		size_t len = msb->page_size;
 864
 865		dbg_verbose("read unmapped sector. returning 0xFF");
 866
 867		local_irq_save(flags);
 868		sg_miter_start(&miter, sg, sg_nents(sg),
 869				SG_MITER_ATOMIC | SG_MITER_TO_SG);
 870
 871		while (sg_miter_next(&miter) && len > 0) {
 872
 873			int chunklen;
 874
 875			if (offset && offset >= miter.length) {
 876				offset -= miter.length;
 877				continue;
 878			}
 879
 880			chunklen = min(miter.length - offset, len);
 881			memset(miter.addr + offset, 0xFF, chunklen);
 882			len -= chunklen;
 883			offset = 0;
 884		}
 885
 886		sg_miter_stop(&miter);
 887		local_irq_restore(flags);
 888
 889		if (offset)
 890			return -EFAULT;
 891
 892		if (extra)
 893			memset(extra, 0xFF, sizeof(*extra));
 894		return 0;
 895	}
 896
 897	if (pba >= msb->block_count) {
 898		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
 899		return -EINVAL;
 900	}
 901
 902	for (try = 1; try < 3; try++) {
 903		msb->regs.param.block_address = cpu_to_be16(pba);
 904		msb->regs.param.page_address = page;
 905		msb->regs.param.cp = MEMSTICK_CP_PAGE;
 906
 907		msb->current_sg = sg;
 908		msb->current_sg_offset = offset;
 909		error = msb_run_state_machine(msb, h_msb_read_page);
 910
 911
 912		if (error == -EUCLEAN) {
 913			pr_notice("correctable error on pba %d, page %d",
 914				pba, page);
 915			error = 0;
 916		}
 917
 918		if (!error && extra)
 919			*extra = msb->regs.extra_data;
 920
 921		if (!error || msb_reset(msb, true))
 922			break;
 923
 924	}
 925
 926	/* Mark bad pages */
 927	if (error == -EBADMSG) {
 928		pr_err("uncorrectable error on read of pba %d, page %d",
 929			pba, page);
 930
 931		if (msb->regs.extra_data.overwrite_flag &
 932					MEMSTICK_OVERWRITE_PGST0)
 933			msb_mark_page_bad(msb, pba, page);
 934		return -EBADMSG;
 935	}
 936
 937	if (error)
 938		pr_err("read of pba %d, page %d failed with error %d",
 939			pba, page, error);
 940	return error;
 941}
 942
 943/* Reads oob of page only */
 944static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
 945	struct ms_extra_data_register *extra)
 946{
 947	int error;
 948
 949	BUG_ON(!extra);
 950	msb->regs.param.block_address = cpu_to_be16(pba);
 951	msb->regs.param.page_address = page;
 952	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
 953
 954	if (pba > msb->block_count) {
 955		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
 956		return -EINVAL;
 957	}
 958
 959	error = msb_run_state_machine(msb, h_msb_read_page);
 960	*extra = msb->regs.extra_data;
 961
 962	if (error == -EUCLEAN) {
 963		pr_notice("correctable error on pba %d, page %d",
 964			pba, page);
 965		return 0;
 966	}
 967
 968	return error;
 969}
 970
 971/* Reads a block and compares it with data contained in scatterlist orig_sg */
 972static int msb_verify_block(struct msb_data *msb, u16 pba,
 973				struct scatterlist *orig_sg,  int offset)
 974{
 975	struct scatterlist sg;
 976	int page = 0, error;
 977
 978	sg_init_one(&sg, msb->block_buffer, msb->block_size);
 979
 980	while (page < msb->pages_in_block) {
 981
 982		error = msb_read_page(msb, pba, page,
 983				NULL, &sg, page * msb->page_size);
 984		if (error)
 985			return error;
 986		page++;
 987	}
 988
 989	if (msb_sg_compare_to_buffer(orig_sg, offset,
 990				msb->block_buffer, msb->block_size))
 991		return -EIO;
 992	return 0;
 993}
 994
 995/* Writes exectly one block + oob */
 996static int msb_write_block(struct msb_data *msb,
 997			u16 pba, u32 lba, struct scatterlist *sg, int offset)
 998{
 999	int error, current_try = 1;
1000	BUG_ON(sg->length < msb->page_size);
1001
1002	if (msb->read_only)
1003		return -EROFS;
1004
1005	if (pba == MS_BLOCK_INVALID) {
1006		pr_err(
1007			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1008		return -EINVAL;
1009	}
1010
1011	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1012		pr_err(
1013		"BUG: write: attempt to write beyond the end of device");
1014		return -EINVAL;
1015	}
1016
1017	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1018		pr_err("BUG: write: lba zone mismatch");
1019		return -EINVAL;
1020	}
1021
1022	if (pba == msb->boot_block_locations[0] ||
1023		pba == msb->boot_block_locations[1]) {
1024		pr_err("BUG: write: attempt to write to boot blocks!");
1025		return -EINVAL;
1026	}
1027
1028	while (1) {
1029
1030		if (msb->read_only)
1031			return -EROFS;
1032
1033		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1034		msb->regs.param.page_address = 0;
1035		msb->regs.param.block_address = cpu_to_be16(pba);
1036
1037		msb->regs.extra_data.management_flag = 0xFF;
1038		msb->regs.extra_data.overwrite_flag = 0xF8;
1039		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1040
1041		msb->current_sg = sg;
1042		msb->current_sg_offset = offset;
1043		msb->current_page = 0;
1044
1045		error = msb_run_state_machine(msb, h_msb_write_block);
1046
1047		/* Sector we just wrote to is assumed erased since its pba
1048			was erased. If it wasn't erased, write will succeed
1049			and will just clear the bits that were set in the block
1050			thus test that what we have written,
1051			matches what we expect.
1052			We do trust the blocks that we erased */
1053		if (!error && (verify_writes ||
1054				!test_bit(pba, msb->erased_blocks_bitmap)))
1055			error = msb_verify_block(msb, pba, sg, offset);
1056
1057		if (!error)
1058			break;
1059
1060		if (current_try > 1 || msb_reset(msb, true))
1061			break;
1062
1063		pr_err("write failed, trying to erase the pba %d", pba);
1064		error = msb_erase_block(msb, pba);
1065		if (error)
1066			break;
1067
1068		current_try++;
1069	}
1070	return error;
1071}
1072
1073/* Finds a free block for write replacement */
1074static u16 msb_get_free_block(struct msb_data *msb, int zone)
1075{
1076	u16 pos;
1077	int pba = zone * MS_BLOCKS_IN_ZONE;
1078	int i;
1079
1080	get_random_bytes(&pos, sizeof(pos));
1081
1082	if (!msb->free_block_count[zone]) {
1083		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1084		msb->read_only = true;
1085		return MS_BLOCK_INVALID;
1086	}
1087
1088	pos %= msb->free_block_count[zone];
1089
1090	dbg_verbose("have %d choices for a free block, selected randomly: %d",
1091		msb->free_block_count[zone], pos);
1092
1093	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1094							msb->block_count, pba);
1095	for (i = 0; i < pos; ++i)
1096		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1097						msb->block_count, pba + 1);
1098
1099	dbg_verbose("result of the free blocks scan: pba %d", pba);
1100
1101	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1102		pr_err("BUG: cant get a free block");
1103		msb->read_only = true;
1104		return MS_BLOCK_INVALID;
1105	}
1106
1107	msb_mark_block_used(msb, pba);
1108	return pba;
1109}
1110
1111static int msb_update_block(struct msb_data *msb, u16 lba,
1112	struct scatterlist *sg, int offset)
1113{
1114	u16 pba, new_pba;
1115	int error, try;
1116
1117	pba = msb->lba_to_pba_table[lba];
1118	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1119
1120	if (pba != MS_BLOCK_INVALID) {
1121		dbg_verbose("setting the update flag on the block");
1122		msb_set_overwrite_flag(msb, pba, 0,
1123				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1124	}
1125
1126	for (try = 0; try < 3; try++) {
1127		new_pba = msb_get_free_block(msb,
1128			msb_get_zone_from_lba(lba));
1129
1130		if (new_pba == MS_BLOCK_INVALID) {
1131			error = -EIO;
1132			goto out;
1133		}
1134
1135		dbg_verbose("block update: writing updated block to the pba %d",
1136								new_pba);
1137		error = msb_write_block(msb, new_pba, lba, sg, offset);
1138		if (error == -EBADMSG) {
1139			msb_mark_bad(msb, new_pba);
1140			continue;
1141		}
1142
1143		if (error)
1144			goto out;
1145
1146		dbg_verbose("block update: erasing the old block");
1147		msb_erase_block(msb, pba);
1148		msb->lba_to_pba_table[lba] = new_pba;
1149		return 0;
1150	}
1151out:
1152	if (error) {
1153		pr_err("block update error after %d tries,  switching to r/o mode", try);
1154		msb->read_only = true;
1155	}
1156	return error;
1157}
1158
1159/* Converts endiannes in the boot block for easy use */
1160static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1161{
1162	p->header.block_id = be16_to_cpu(p->header.block_id);
1163	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1164	p->entry.disabled_block.start_addr
1165		= be32_to_cpu(p->entry.disabled_block.start_addr);
1166	p->entry.disabled_block.data_size
1167		= be32_to_cpu(p->entry.disabled_block.data_size);
1168	p->entry.cis_idi.start_addr
1169		= be32_to_cpu(p->entry.cis_idi.start_addr);
1170	p->entry.cis_idi.data_size
1171		= be32_to_cpu(p->entry.cis_idi.data_size);
1172	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1173	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1174	p->attr.number_of_effective_blocks
1175		= be16_to_cpu(p->attr.number_of_effective_blocks);
1176	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1177	p->attr.memory_manufacturer_code
1178		= be16_to_cpu(p->attr.memory_manufacturer_code);
1179	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1180	p->attr.implemented_capacity
1181		= be16_to_cpu(p->attr.implemented_capacity);
1182	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1183	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1184}
1185
1186static int msb_read_boot_blocks(struct msb_data *msb)
1187{
1188	int pba = 0;
1189	struct scatterlist sg;
1190	struct ms_extra_data_register extra;
1191	struct ms_boot_page *page;
1192
1193	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1194	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1195	msb->boot_block_count = 0;
1196
1197	dbg_verbose("Start of a scan for the boot blocks");
1198
1199	if (!msb->boot_page) {
1200		page = kmalloc_array(2, sizeof(struct ms_boot_page),
1201				     GFP_KERNEL);
1202		if (!page)
1203			return -ENOMEM;
1204
1205		msb->boot_page = page;
1206	} else
1207		page = msb->boot_page;
1208
1209	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1210
1211	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1212
1213		sg_init_one(&sg, page, sizeof(*page));
1214		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1215			dbg("boot scan: can't read pba %d", pba);
1216			continue;
1217		}
1218
1219		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1220			dbg("management flag doesn't indicate boot block %d",
1221									pba);
1222			continue;
1223		}
1224
1225		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1226			dbg("the pba at %d doesn' contain boot block ID", pba);
1227			continue;
1228		}
1229
1230		msb_fix_boot_page_endianness(page);
1231		msb->boot_block_locations[msb->boot_block_count] = pba;
1232
1233		page++;
1234		msb->boot_block_count++;
1235
1236		if (msb->boot_block_count == 2)
1237			break;
1238	}
1239
1240	if (!msb->boot_block_count) {
1241		pr_err("media doesn't contain master page, aborting");
1242		return -EIO;
1243	}
1244
1245	dbg_verbose("End of scan for boot blocks");
1246	return 0;
1247}
1248
1249static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1250{
1251	struct ms_boot_page *boot_block;
1252	struct scatterlist sg;
1253	u16 *buffer = NULL;
1254	int offset = 0;
1255	int i, error = 0;
1256	int data_size, data_offset, page, page_offset, size_to_read;
1257	u16 pba;
1258
1259	BUG_ON(block_nr > 1);
1260	boot_block = &msb->boot_page[block_nr];
1261	pba = msb->boot_block_locations[block_nr];
1262
1263	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1264		return -EINVAL;
1265
1266	data_size = boot_block->entry.disabled_block.data_size;
1267	data_offset = sizeof(struct ms_boot_page) +
1268			boot_block->entry.disabled_block.start_addr;
1269	if (!data_size)
1270		return 0;
1271
1272	page = data_offset / msb->page_size;
1273	page_offset = data_offset % msb->page_size;
1274	size_to_read =
1275		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1276			msb->page_size;
1277
1278	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1279		pba, data_offset, data_size);
1280
1281	buffer = kzalloc(size_to_read, GFP_KERNEL);
1282	if (!buffer)
1283		return -ENOMEM;
1284
1285	/* Read the buffer */
1286	sg_init_one(&sg, buffer, size_to_read);
1287
1288	while (offset < size_to_read) {
1289		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1290		if (error)
1291			goto out;
1292
1293		page++;
1294		offset += msb->page_size;
1295
1296		if (page == msb->pages_in_block) {
1297			pr_err(
1298			"bad block table extends beyond the boot block");
1299			break;
1300		}
1301	}
1302
1303	/* Process the bad block table */
1304	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1305
1306		u16 bad_block = be16_to_cpu(buffer[i]);
1307
1308		if (bad_block >= msb->block_count) {
1309			dbg("bad block table contains invalid block %d",
1310								bad_block);
1311			continue;
1312		}
1313
1314		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1315			dbg("duplicate bad block %d in the table",
1316				bad_block);
1317			continue;
1318		}
1319
1320		dbg("block %d is marked as factory bad", bad_block);
1321		msb_mark_block_used(msb, bad_block);
1322	}
1323out:
1324	kfree(buffer);
1325	return error;
1326}
1327
1328static int msb_ftl_initialize(struct msb_data *msb)
1329{
1330	int i;
1331
1332	if (msb->ftl_initialized)
1333		return 0;
1334
1335	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1336	msb->logical_block_count = msb->zone_count * 496 - 2;
1337
1338	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1339	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1340	msb->lba_to_pba_table =
1341		kmalloc_array(msb->logical_block_count, sizeof(u16),
1342			      GFP_KERNEL);
1343
1344	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1345						!msb->erased_blocks_bitmap) {
1346		kfree(msb->used_blocks_bitmap);
1347		kfree(msb->lba_to_pba_table);
1348		kfree(msb->erased_blocks_bitmap);
1349		return -ENOMEM;
1350	}
1351
1352	for (i = 0; i < msb->zone_count; i++)
1353		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1354
1355	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1356			msb->logical_block_count * sizeof(u16));
1357
1358	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1359		msb->zone_count, msb->logical_block_count);
1360
1361	msb->ftl_initialized = true;
1362	return 0;
1363}
1364
1365static int msb_ftl_scan(struct msb_data *msb)
1366{
1367	u16 pba, lba, other_block;
1368	u8 overwrite_flag, management_flag, other_overwrite_flag;
1369	int error;
1370	struct ms_extra_data_register extra;
1371	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1372
1373	if (!overwrite_flags)
1374		return -ENOMEM;
1375
1376	dbg("Start of media scanning");
1377	for (pba = 0; pba < msb->block_count; pba++) {
1378
1379		if (pba == msb->boot_block_locations[0] ||
1380			pba == msb->boot_block_locations[1]) {
1381			dbg_verbose("pba %05d -> [boot block]", pba);
1382			msb_mark_block_used(msb, pba);
1383			continue;
1384		}
1385
1386		if (test_bit(pba, msb->used_blocks_bitmap)) {
1387			dbg_verbose("pba %05d -> [factory bad]", pba);
1388			continue;
1389		}
1390
1391		memset(&extra, 0, sizeof(extra));
1392		error = msb_read_oob(msb, pba, 0, &extra);
1393
1394		/* can't trust the page if we can't read the oob */
1395		if (error == -EBADMSG) {
1396			pr_notice(
1397			"oob of pba %d damaged, will try to erase it", pba);
1398			msb_mark_block_used(msb, pba);
1399			msb_erase_block(msb, pba);
1400			continue;
1401		} else if (error) {
1402			pr_err("unknown error %d on read of oob of pba %d - aborting",
1403				error, pba);
1404
1405			kfree(overwrite_flags);
1406			return error;
1407		}
1408
1409		lba = be16_to_cpu(extra.logical_address);
1410		management_flag = extra.management_flag;
1411		overwrite_flag = extra.overwrite_flag;
1412		overwrite_flags[pba] = overwrite_flag;
1413
1414		/* Skip bad blocks */
1415		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1416			dbg("pba %05d -> [BAD]", pba);
1417			msb_mark_block_used(msb, pba);
1418			continue;
1419		}
1420
1421		/* Skip system/drm blocks */
1422		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1423			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1424			dbg("pba %05d -> [reserved management flag %02x]",
1425							pba, management_flag);
1426			msb_mark_block_used(msb, pba);
1427			continue;
1428		}
1429
1430		/* Erase temporary tables */
1431		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1432			dbg("pba %05d -> [temp table] - will erase", pba);
1433
1434			msb_mark_block_used(msb, pba);
1435			msb_erase_block(msb, pba);
1436			continue;
1437		}
1438
1439		if (lba == MS_BLOCK_INVALID) {
1440			dbg_verbose("pba %05d -> [free]", pba);
1441			continue;
1442		}
1443
1444		msb_mark_block_used(msb, pba);
1445
1446		/* Block has LBA not according to zoning*/
1447		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1448			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1449								pba, lba);
1450			msb_erase_block(msb, pba);
1451			continue;
1452		}
1453
1454		/* No collisions - great */
1455		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1456			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1457			msb->lba_to_pba_table[lba] = pba;
1458			continue;
1459		}
1460
1461		other_block = msb->lba_to_pba_table[lba];
1462		other_overwrite_flag = overwrite_flags[other_block];
1463
1464		pr_notice("Collision between pba %d and pba %d",
1465			pba, other_block);
1466
1467		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1468			pr_notice("pba %d is marked as stable, use it", pba);
1469			msb_erase_block(msb, other_block);
1470			msb->lba_to_pba_table[lba] = pba;
1471			continue;
1472		}
1473
1474		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1475			pr_notice("pba %d is marked as stable, use it",
1476								other_block);
1477			msb_erase_block(msb, pba);
1478			continue;
1479		}
1480
1481		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1482				pba, other_block, other_block);
1483
1484		msb_erase_block(msb, other_block);
1485		msb->lba_to_pba_table[lba] = pba;
1486	}
1487
1488	dbg("End of media scanning");
1489	kfree(overwrite_flags);
1490	return 0;
1491}
1492
1493static void msb_cache_flush_timer(struct timer_list *t)
1494{
1495	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1496	msb->need_flush_cache = true;
1497	queue_work(msb->io_queue, &msb->io_work);
1498}
1499
1500
1501static void msb_cache_discard(struct msb_data *msb)
1502{
1503	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1504		return;
1505
1506	del_timer_sync(&msb->cache_flush_timer);
1507
1508	dbg_verbose("Discarding the write cache");
1509	msb->cache_block_lba = MS_BLOCK_INVALID;
1510	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1511}
1512
1513static int msb_cache_init(struct msb_data *msb)
1514{
1515	timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1516
1517	if (!msb->cache)
1518		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1519	if (!msb->cache)
1520		return -ENOMEM;
1521
1522	msb_cache_discard(msb);
1523	return 0;
1524}
1525
1526static int msb_cache_flush(struct msb_data *msb)
1527{
1528	struct scatterlist sg;
1529	struct ms_extra_data_register extra;
1530	int page, offset, error;
1531	u16 pba, lba;
1532
1533	if (msb->read_only)
1534		return -EROFS;
1535
1536	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1537		return 0;
1538
1539	lba = msb->cache_block_lba;
1540	pba = msb->lba_to_pba_table[lba];
1541
1542	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1543						pba, msb->cache_block_lba);
1544
1545	sg_init_one(&sg, msb->cache , msb->block_size);
1546
1547	/* Read all missing pages in cache */
1548	for (page = 0; page < msb->pages_in_block; page++) {
1549
1550		if (test_bit(page, &msb->valid_cache_bitmap))
1551			continue;
1552
1553		offset = page * msb->page_size;
1554
1555		dbg_verbose("reading non-present sector %d of cache block %d",
1556			page, lba);
1557		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1558
1559		/* Bad pages are copied with 00 page status */
1560		if (error == -EBADMSG) {
1561			pr_err("read error on sector %d, contents probably damaged", page);
1562			continue;
1563		}
1564
1565		if (error)
1566			return error;
1567
1568		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1569							MEMSTICK_OV_PG_NORMAL) {
1570			dbg("page %d is marked as bad", page);
1571			continue;
1572		}
1573
1574		set_bit(page, &msb->valid_cache_bitmap);
1575	}
1576
1577	/* Write the cache now */
1578	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1579	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1580
1581	/* Mark invalid pages */
1582	if (!error) {
1583		for (page = 0; page < msb->pages_in_block; page++) {
1584
1585			if (test_bit(page, &msb->valid_cache_bitmap))
1586				continue;
1587
1588			dbg("marking page %d as containing damaged data",
1589				page);
1590			msb_set_overwrite_flag(msb,
1591				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1592		}
1593	}
1594
1595	msb_cache_discard(msb);
1596	return error;
1597}
1598
1599static int msb_cache_write(struct msb_data *msb, int lba,
1600	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1601{
1602	int error;
1603	struct scatterlist sg_tmp[10];
1604
1605	if (msb->read_only)
1606		return -EROFS;
1607
1608	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1609						lba != msb->cache_block_lba)
1610		if (add_to_cache_only)
1611			return 0;
1612
1613	/* If we need to write different block */
1614	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1615						lba != msb->cache_block_lba) {
1616		dbg_verbose("first flush the cache");
1617		error = msb_cache_flush(msb);
1618		if (error)
1619			return error;
1620	}
1621
1622	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1623		msb->cache_block_lba  = lba;
1624		mod_timer(&msb->cache_flush_timer,
1625			jiffies + msecs_to_jiffies(cache_flush_timeout));
1626	}
1627
1628	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1629
1630	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1631	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1632
1633	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1634		msb->cache + page * msb->page_size, msb->page_size);
1635
1636	set_bit(page, &msb->valid_cache_bitmap);
1637	return 0;
1638}
1639
1640static int msb_cache_read(struct msb_data *msb, int lba,
1641				int page, struct scatterlist *sg, int offset)
1642{
1643	int pba = msb->lba_to_pba_table[lba];
1644	struct scatterlist sg_tmp[10];
1645	int error = 0;
1646
1647	if (lba == msb->cache_block_lba &&
1648			test_bit(page, &msb->valid_cache_bitmap)) {
1649
1650		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1651							lba, pba, page);
1652
1653		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1654		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1655			offset, msb->page_size);
1656		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1657			msb->cache + msb->page_size * page,
1658							msb->page_size);
1659	} else {
1660		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1661							lba, pba, page);
1662
1663		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1664		if (error)
1665			return error;
1666
1667		msb_cache_write(msb, lba, page, true, sg, offset);
1668	}
1669	return error;
1670}
1671
1672/* Emulated geometry table
1673 * This table content isn't that importaint,
1674 * One could put here different values, providing that they still
1675 * cover whole disk.
1676 * 64 MB entry is what windows reports for my 64M memstick */
1677
1678static const struct chs_entry chs_table[] = {
1679/*        size sectors cylynders  heads */
1680	{ 4,    16,    247,       2  },
1681	{ 8,    16,    495,       2  },
1682	{ 16,   16,    495,       4  },
1683	{ 32,   16,    991,       4  },
1684	{ 64,   16,    991,       8  },
1685	{128,   16,    991,       16 },
1686	{ 0 }
1687};
1688
1689/* Load information about the card */
1690static int msb_init_card(struct memstick_dev *card)
1691{
1692	struct msb_data *msb = memstick_get_drvdata(card);
1693	struct memstick_host *host = card->host;
1694	struct ms_boot_page *boot_block;
1695	int error = 0, i, raw_size_in_megs;
1696
1697	msb->caps = 0;
1698
1699	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1700				card->id.class <= MEMSTICK_CLASS_ROM)
1701		msb->read_only = true;
1702
1703	msb->state = -1;
1704	error = msb_reset(msb, false);
1705	if (error)
1706		return error;
1707
1708	/* Due to a bug in Jmicron driver written by Alex Dubov,
1709	 its serial mode barely works,
1710	 so we switch to parallel mode right away */
1711	if (host->caps & MEMSTICK_CAP_PAR4)
1712		msb_switch_to_parallel(msb);
1713
1714	msb->page_size = sizeof(struct ms_boot_page);
1715
1716	/* Read the boot page */
1717	error = msb_read_boot_blocks(msb);
1718	if (error)
1719		return -EIO;
1720
1721	boot_block = &msb->boot_page[0];
1722
1723	/* Save intersting attributes from boot page */
1724	msb->block_count = boot_block->attr.number_of_blocks;
1725	msb->page_size = boot_block->attr.page_size;
1726
1727	msb->pages_in_block = boot_block->attr.block_size * 2;
1728	msb->block_size = msb->page_size * msb->pages_in_block;
1729
1730	if (msb->page_size > PAGE_SIZE) {
1731		/* this isn't supported by linux at all, anyway*/
1732		dbg("device page %d size isn't supported", msb->page_size);
1733		return -EINVAL;
1734	}
1735
1736	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1737	if (!msb->block_buffer)
1738		return -ENOMEM;
1739
1740	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1741
1742	for (i = 0; chs_table[i].size; i++) {
1743
1744		if (chs_table[i].size != raw_size_in_megs)
1745			continue;
1746
1747		msb->geometry.cylinders = chs_table[i].cyl;
1748		msb->geometry.heads = chs_table[i].head;
1749		msb->geometry.sectors = chs_table[i].sec;
1750		break;
1751	}
1752
1753	if (boot_block->attr.transfer_supporting == 1)
1754		msb->caps |= MEMSTICK_CAP_PAR4;
1755
1756	if (boot_block->attr.device_type & 0x03)
1757		msb->read_only = true;
1758
1759	dbg("Total block count = %d", msb->block_count);
1760	dbg("Each block consists of %d pages", msb->pages_in_block);
1761	dbg("Page size = %d bytes", msb->page_size);
1762	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1763	dbg("Read only: %d", msb->read_only);
1764
1765#if 0
1766	/* Now we can switch the interface */
1767	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1768		msb_switch_to_parallel(msb);
1769#endif
1770
1771	error = msb_cache_init(msb);
1772	if (error)
1773		return error;
1774
1775	error = msb_ftl_initialize(msb);
1776	if (error)
1777		return error;
1778
1779
1780	/* Read the bad block table */
1781	error = msb_read_bad_block_table(msb, 0);
1782
1783	if (error && error != -ENOMEM) {
1784		dbg("failed to read bad block table from primary boot block, trying from backup");
1785		error = msb_read_bad_block_table(msb, 1);
1786	}
1787
1788	if (error)
1789		return error;
1790
1791	/* *drum roll* Scan the media */
1792	error = msb_ftl_scan(msb);
1793	if (error) {
1794		pr_err("Scan of media failed");
1795		return error;
1796	}
1797
1798	return 0;
1799
1800}
1801
1802static int msb_do_write_request(struct msb_data *msb, int lba,
1803	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1804{
1805	int error = 0;
1806	off_t offset = 0;
1807	*sucessfuly_written = 0;
1808
1809	while (offset < len) {
1810		if (page == 0 && len - offset >= msb->block_size) {
1811
1812			if (msb->cache_block_lba == lba)
1813				msb_cache_discard(msb);
1814
1815			dbg_verbose("Writing whole lba %d", lba);
1816			error = msb_update_block(msb, lba, sg, offset);
1817			if (error)
1818				return error;
1819
1820			offset += msb->block_size;
1821			*sucessfuly_written += msb->block_size;
1822			lba++;
1823			continue;
1824		}
1825
1826		error = msb_cache_write(msb, lba, page, false, sg, offset);
1827		if (error)
1828			return error;
1829
1830		offset += msb->page_size;
1831		*sucessfuly_written += msb->page_size;
1832
1833		page++;
1834		if (page == msb->pages_in_block) {
1835			page = 0;
1836			lba++;
1837		}
1838	}
1839	return 0;
1840}
1841
1842static int msb_do_read_request(struct msb_data *msb, int lba,
1843		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1844{
1845	int error = 0;
1846	int offset = 0;
1847	*sucessfuly_read = 0;
1848
1849	while (offset < len) {
1850
1851		error = msb_cache_read(msb, lba, page, sg, offset);
1852		if (error)
1853			return error;
1854
1855		offset += msb->page_size;
1856		*sucessfuly_read += msb->page_size;
1857
1858		page++;
1859		if (page == msb->pages_in_block) {
1860			page = 0;
1861			lba++;
1862		}
1863	}
1864	return 0;
1865}
1866
1867static void msb_io_work(struct work_struct *work)
1868{
1869	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1870	int page, error, len;
1871	sector_t lba;
1872	struct scatterlist *sg = msb->prealloc_sg;
1873	struct request *req;
1874
1875	dbg_verbose("IO: work started");
1876
1877	while (1) {
1878		spin_lock_irq(&msb->q_lock);
1879
1880		if (msb->need_flush_cache) {
1881			msb->need_flush_cache = false;
1882			spin_unlock_irq(&msb->q_lock);
1883			msb_cache_flush(msb);
1884			continue;
1885		}
1886
1887		req = msb->req;
1888		if (!req) {
1889			dbg_verbose("IO: no more requests exiting");
1890			spin_unlock_irq(&msb->q_lock);
1891			return;
1892		}
1893
1894		spin_unlock_irq(&msb->q_lock);
1895
1896		/* process the request */
1897		dbg_verbose("IO: processing new request");
1898		blk_rq_map_sg(msb->queue, req, sg);
1899
1900		lba = blk_rq_pos(req);
1901
1902		sector_div(lba, msb->page_size / 512);
1903		page = sector_div(lba, msb->pages_in_block);
1904
1905		if (rq_data_dir(msb->req) == READ)
1906			error = msb_do_read_request(msb, lba, page, sg,
1907				blk_rq_bytes(req), &len);
1908		else
1909			error = msb_do_write_request(msb, lba, page, sg,
1910				blk_rq_bytes(req), &len);
1911
1912		if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1913			__blk_mq_end_request(req, BLK_STS_OK);
1914			spin_lock_irq(&msb->q_lock);
1915			msb->req = NULL;
1916			spin_unlock_irq(&msb->q_lock);
1917		}
1918
1919		if (error && msb->req) {
1920			blk_status_t ret = errno_to_blk_status(error);
1921
1922			dbg_verbose("IO: ending one sector of the request with error");
1923			blk_mq_end_request(req, ret);
1924			spin_lock_irq(&msb->q_lock);
1925			msb->req = NULL;
1926			spin_unlock_irq(&msb->q_lock);
1927		}
1928
1929		if (msb->req)
1930			dbg_verbose("IO: request still pending");
1931	}
1932}
1933
1934static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1935static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1936
1937static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1938{
1939	struct gendisk *disk = bdev->bd_disk;
1940	struct msb_data *msb = disk->private_data;
1941
1942	dbg_verbose("block device open");
1943
1944	mutex_lock(&msb_disk_lock);
1945
1946	if (msb && msb->card)
1947		msb->usage_count++;
1948
1949	mutex_unlock(&msb_disk_lock);
1950	return 0;
1951}
1952
1953static void msb_data_clear(struct msb_data *msb)
1954{
1955	kfree(msb->boot_page);
1956	kfree(msb->used_blocks_bitmap);
1957	kfree(msb->lba_to_pba_table);
1958	kfree(msb->cache);
1959	msb->card = NULL;
1960}
1961
1962static int msb_disk_release(struct gendisk *disk)
1963{
1964	struct msb_data *msb = disk->private_data;
1965
1966	dbg_verbose("block device release");
1967	mutex_lock(&msb_disk_lock);
1968
1969	if (msb) {
1970		if (msb->usage_count)
1971			msb->usage_count--;
1972
1973		if (!msb->usage_count) {
1974			disk->private_data = NULL;
1975			idr_remove(&msb_disk_idr, msb->disk_id);
1976			put_disk(disk);
1977			kfree(msb);
1978		}
1979	}
1980	mutex_unlock(&msb_disk_lock);
1981	return 0;
1982}
1983
1984static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1985{
1986	msb_disk_release(disk);
1987}
1988
1989static int msb_bd_getgeo(struct block_device *bdev,
1990				 struct hd_geometry *geo)
1991{
1992	struct msb_data *msb = bdev->bd_disk->private_data;
1993	*geo = msb->geometry;
1994	return 0;
1995}
1996
1997static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1998				 const struct blk_mq_queue_data *bd)
1999{
2000	struct memstick_dev *card = hctx->queue->queuedata;
2001	struct msb_data *msb = memstick_get_drvdata(card);
2002	struct request *req = bd->rq;
2003
2004	dbg_verbose("Submit request");
2005
2006	spin_lock_irq(&msb->q_lock);
2007
2008	if (msb->card_dead) {
2009		dbg("Refusing requests on removed card");
2010
2011		WARN_ON(!msb->io_queue_stopped);
2012
2013		spin_unlock_irq(&msb->q_lock);
2014		blk_mq_start_request(req);
2015		return BLK_STS_IOERR;
2016	}
2017
2018	if (msb->req) {
2019		spin_unlock_irq(&msb->q_lock);
2020		return BLK_STS_DEV_RESOURCE;
2021	}
2022
2023	blk_mq_start_request(req);
2024	msb->req = req;
2025
2026	if (!msb->io_queue_stopped)
2027		queue_work(msb->io_queue, &msb->io_work);
2028
2029	spin_unlock_irq(&msb->q_lock);
2030	return BLK_STS_OK;
2031}
2032
2033static int msb_check_card(struct memstick_dev *card)
2034{
2035	struct msb_data *msb = memstick_get_drvdata(card);
2036	return (msb->card_dead == 0);
2037}
2038
2039static void msb_stop(struct memstick_dev *card)
2040{
2041	struct msb_data *msb = memstick_get_drvdata(card);
2042	unsigned long flags;
2043
2044	dbg("Stopping all msblock IO");
2045
2046	blk_mq_stop_hw_queues(msb->queue);
2047	spin_lock_irqsave(&msb->q_lock, flags);
2048	msb->io_queue_stopped = true;
2049	spin_unlock_irqrestore(&msb->q_lock, flags);
2050
2051	del_timer_sync(&msb->cache_flush_timer);
2052	flush_workqueue(msb->io_queue);
2053
2054	spin_lock_irqsave(&msb->q_lock, flags);
2055	if (msb->req) {
2056		blk_mq_requeue_request(msb->req, false);
2057		msb->req = NULL;
2058	}
2059	spin_unlock_irqrestore(&msb->q_lock, flags);
2060}
2061
2062static void msb_start(struct memstick_dev *card)
2063{
2064	struct msb_data *msb = memstick_get_drvdata(card);
2065	unsigned long flags;
2066
2067	dbg("Resuming IO from msblock");
2068
2069	msb_invalidate_reg_window(msb);
2070
2071	spin_lock_irqsave(&msb->q_lock, flags);
2072	if (!msb->io_queue_stopped || msb->card_dead) {
2073		spin_unlock_irqrestore(&msb->q_lock, flags);
2074		return;
2075	}
2076	spin_unlock_irqrestore(&msb->q_lock, flags);
2077
2078	/* Kick cache flush anyway, its harmless */
2079	msb->need_flush_cache = true;
2080	msb->io_queue_stopped = false;
2081
2082	blk_mq_start_hw_queues(msb->queue);
2083
2084	queue_work(msb->io_queue, &msb->io_work);
2085
2086}
2087
2088static const struct block_device_operations msb_bdops = {
2089	.open    = msb_bd_open,
2090	.release = msb_bd_release,
2091	.getgeo  = msb_bd_getgeo,
2092	.owner   = THIS_MODULE
2093};
2094
2095static const struct blk_mq_ops msb_mq_ops = {
2096	.queue_rq	= msb_queue_rq,
2097};
2098
2099/* Registers the block device */
2100static int msb_init_disk(struct memstick_dev *card)
2101{
2102	struct msb_data *msb = memstick_get_drvdata(card);
2103	int rc;
2104	unsigned long capacity;
2105
2106	mutex_lock(&msb_disk_lock);
2107	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2108	mutex_unlock(&msb_disk_lock);
2109
2110	if (msb->disk_id  < 0)
2111		return msb->disk_id;
2112
2113	msb->disk = alloc_disk(0);
2114	if (!msb->disk) {
2115		rc = -ENOMEM;
2116		goto out_release_id;
2117	}
2118
2119	msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
2120						BLK_MQ_F_SHOULD_MERGE);
2121	if (IS_ERR(msb->queue)) {
2122		rc = PTR_ERR(msb->queue);
2123		msb->queue = NULL;
2124		goto out_put_disk;
2125	}
2126
2127	msb->queue->queuedata = card;
2128
2129	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2130	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2131	blk_queue_max_segment_size(msb->queue,
2132				   MS_BLOCK_MAX_PAGES * msb->page_size);
2133	blk_queue_logical_block_size(msb->queue, msb->page_size);
2134
2135	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2136	msb->disk->fops = &msb_bdops;
2137	msb->disk->private_data = msb;
2138	msb->disk->queue = msb->queue;
2139	msb->disk->flags |= GENHD_FL_EXT_DEVT;
2140
2141	capacity = msb->pages_in_block * msb->logical_block_count;
2142	capacity *= (msb->page_size / 512);
2143	set_capacity(msb->disk, capacity);
2144	dbg("Set total disk size to %lu sectors", capacity);
2145
2146	msb->usage_count = 1;
2147	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2148	INIT_WORK(&msb->io_work, msb_io_work);
2149	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2150
2151	if (msb->read_only)
2152		set_disk_ro(msb->disk, 1);
2153
2154	msb_start(card);
2155	device_add_disk(&card->dev, msb->disk, NULL);
2156	dbg("Disk added");
2157	return 0;
2158
2159out_put_disk:
2160	put_disk(msb->disk);
2161out_release_id:
2162	mutex_lock(&msb_disk_lock);
2163	idr_remove(&msb_disk_idr, msb->disk_id);
2164	mutex_unlock(&msb_disk_lock);
2165	return rc;
2166}
2167
2168static int msb_probe(struct memstick_dev *card)
2169{
2170	struct msb_data *msb;
2171	int rc = 0;
2172
2173	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2174	if (!msb)
2175		return -ENOMEM;
2176	memstick_set_drvdata(card, msb);
2177	msb->card = card;
2178	spin_lock_init(&msb->q_lock);
2179
2180	rc = msb_init_card(card);
2181	if (rc)
2182		goto out_free;
2183
2184	rc = msb_init_disk(card);
2185	if (!rc) {
2186		card->check = msb_check_card;
2187		card->stop = msb_stop;
2188		card->start = msb_start;
2189		return 0;
2190	}
2191out_free:
2192	memstick_set_drvdata(card, NULL);
2193	msb_data_clear(msb);
2194	kfree(msb);
2195	return rc;
2196}
2197
2198static void msb_remove(struct memstick_dev *card)
2199{
2200	struct msb_data *msb = memstick_get_drvdata(card);
2201	unsigned long flags;
2202
2203	if (!msb->io_queue_stopped)
2204		msb_stop(card);
2205
2206	dbg("Removing the disk device");
2207
2208	/* Take care of unhandled + new requests from now on */
2209	spin_lock_irqsave(&msb->q_lock, flags);
2210	msb->card_dead = true;
2211	spin_unlock_irqrestore(&msb->q_lock, flags);
2212	blk_mq_start_hw_queues(msb->queue);
2213
2214	/* Remove the disk */
2215	del_gendisk(msb->disk);
2216	blk_cleanup_queue(msb->queue);
2217	blk_mq_free_tag_set(&msb->tag_set);
2218	msb->queue = NULL;
2219
2220	mutex_lock(&msb_disk_lock);
2221	msb_data_clear(msb);
2222	mutex_unlock(&msb_disk_lock);
2223
2224	msb_disk_release(msb->disk);
2225	memstick_set_drvdata(card, NULL);
2226}
2227
2228#ifdef CONFIG_PM
2229
2230static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2231{
2232	msb_stop(card);
2233	return 0;
2234}
2235
2236static int msb_resume(struct memstick_dev *card)
2237{
2238	struct msb_data *msb = memstick_get_drvdata(card);
2239	struct msb_data *new_msb = NULL;
2240	bool card_dead = true;
2241
2242#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2243	msb->card_dead = true;
2244	return 0;
2245#endif
2246	mutex_lock(&card->host->lock);
2247
2248	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2249	if (!new_msb)
2250		goto out;
2251
2252	new_msb->card = card;
2253	memstick_set_drvdata(card, new_msb);
2254	spin_lock_init(&new_msb->q_lock);
2255	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2256
2257	if (msb_init_card(card))
2258		goto out;
2259
2260	if (msb->block_size != new_msb->block_size)
2261		goto out;
2262
2263	if (memcmp(msb->boot_page, new_msb->boot_page,
2264					sizeof(struct ms_boot_page)))
2265		goto out;
2266
2267	if (msb->logical_block_count != new_msb->logical_block_count ||
2268		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2269						msb->logical_block_count))
2270		goto out;
2271
2272	if (msb->block_count != new_msb->block_count ||
2273		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2274							msb->block_count / 8))
2275		goto out;
2276
2277	card_dead = false;
2278out:
2279	if (card_dead)
2280		dbg("Card was removed/replaced during suspend");
2281
2282	msb->card_dead = card_dead;
2283	memstick_set_drvdata(card, msb);
2284
2285	if (new_msb) {
2286		msb_data_clear(new_msb);
2287		kfree(new_msb);
2288	}
2289
2290	msb_start(card);
2291	mutex_unlock(&card->host->lock);
2292	return 0;
2293}
2294#else
2295
2296#define msb_suspend NULL
2297#define msb_resume NULL
2298
2299#endif /* CONFIG_PM */
2300
2301static struct memstick_device_id msb_id_tbl[] = {
2302	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2303	 MEMSTICK_CLASS_FLASH},
2304
2305	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2306	 MEMSTICK_CLASS_ROM},
2307
2308	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2309	 MEMSTICK_CLASS_RO},
2310
2311	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2312	 MEMSTICK_CLASS_WP},
2313
2314	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2315	 MEMSTICK_CLASS_DUO},
2316	{}
2317};
2318MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2319
2320
2321static struct memstick_driver msb_driver = {
2322	.driver = {
2323		.name  = DRIVER_NAME,
2324		.owner = THIS_MODULE
2325	},
2326	.id_table = msb_id_tbl,
2327	.probe    = msb_probe,
2328	.remove   = msb_remove,
2329	.suspend  = msb_suspend,
2330	.resume   = msb_resume
2331};
2332
2333static int __init msb_init(void)
2334{
2335	int rc = memstick_register_driver(&msb_driver);
2336	if (rc)
2337		pr_err("failed to register memstick driver (error %d)\n", rc);
2338
2339	return rc;
2340}
2341
2342static void __exit msb_exit(void)
2343{
2344	memstick_unregister_driver(&msb_driver);
2345	idr_destroy(&msb_disk_idr);
2346}
2347
2348module_init(msb_init);
2349module_exit(msb_exit);
2350
2351module_param(cache_flush_timeout, int, S_IRUGO);
2352MODULE_PARM_DESC(cache_flush_timeout,
2353				"Cache flush timeout in msec (1000 default)");
2354module_param(debug, int, S_IRUGO | S_IWUSR);
2355MODULE_PARM_DESC(debug, "Debug level (0-2)");
2356
2357module_param(verify_writes, bool, S_IRUGO);
2358MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2359
2360MODULE_LICENSE("GPL");
2361MODULE_AUTHOR("Maxim Levitsky");
2362MODULE_DESCRIPTION("Sony MemoryStick block device driver");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  ms_block.c - Sony MemoryStick (legacy) storage support
   4
   5 *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
   6 *
   7 * Minor portions of the driver were copied from mspro_block.c which is
   8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
   9 */
  10#define DRIVER_NAME "ms_block"
  11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  12
  13#include <linux/module.h>
  14#include <linux/blk-mq.h>
  15#include <linux/memstick.h>
  16#include <linux/idr.h>
  17#include <linux/hdreg.h>
  18#include <linux/delay.h>
  19#include <linux/slab.h>
  20#include <linux/random.h>
  21#include <linux/bitmap.h>
  22#include <linux/scatterlist.h>
  23#include <linux/jiffies.h>
  24#include <linux/workqueue.h>
  25#include <linux/mutex.h>
  26#include "ms_block.h"
  27
  28static int debug;
  29static int cache_flush_timeout = 1000;
  30static bool verify_writes;
  31
  32/*
  33 * Copies section of 'sg_from' starting from offset 'offset' and with length
  34 * 'len' To another scatterlist of to_nents enties
  35 */
  36static size_t msb_sg_copy(struct scatterlist *sg_from,
  37	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  38{
  39	size_t copied = 0;
  40
  41	while (offset > 0) {
  42		if (offset >= sg_from->length) {
  43			if (sg_is_last(sg_from))
  44				return 0;
  45
  46			offset -= sg_from->length;
  47			sg_from = sg_next(sg_from);
  48			continue;
  49		}
  50
  51		copied = min(len, sg_from->length - offset);
  52		sg_set_page(sg_to, sg_page(sg_from),
  53			copied, sg_from->offset + offset);
  54
  55		len -= copied;
  56		offset = 0;
  57
  58		if (sg_is_last(sg_from) || !len)
  59			goto out;
  60
  61		sg_to = sg_next(sg_to);
  62		to_nents--;
  63		sg_from = sg_next(sg_from);
  64	}
  65
  66	while (len > sg_from->length && to_nents--) {
  67		len -= sg_from->length;
  68		copied += sg_from->length;
  69
  70		sg_set_page(sg_to, sg_page(sg_from),
  71				sg_from->length, sg_from->offset);
  72
  73		if (sg_is_last(sg_from) || !len)
  74			goto out;
  75
  76		sg_from = sg_next(sg_from);
  77		sg_to = sg_next(sg_to);
  78	}
  79
  80	if (len && to_nents) {
  81		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  82		copied += len;
  83	}
  84out:
  85	sg_mark_end(sg_to);
  86	return copied;
  87}
  88
  89/*
  90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  91 * to linear buffer of length 'len' at address 'buffer'
  92 * Returns 0 if equal and  -1 otherwice
  93 */
  94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  95					size_t offset, u8 *buffer, size_t len)
  96{
  97	int retval = 0, cmplen;
  98	struct sg_mapping_iter miter;
  99
 100	sg_miter_start(&miter, sg, sg_nents(sg),
 101					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 102
 103	while (sg_miter_next(&miter) && len > 0) {
 104		if (offset >= miter.length) {
 105			offset -= miter.length;
 106			continue;
 107		}
 108
 109		cmplen = min(miter.length - offset, len);
 110		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
 111		if (retval)
 112			break;
 113
 114		buffer += cmplen;
 115		len -= cmplen;
 116		offset = 0;
 117	}
 118
 119	if (!retval && len)
 120		retval = -1;
 121
 122	sg_miter_stop(&miter);
 123	return retval;
 124}
 125
 126
 127/* Get zone at which block with logical address 'lba' lives
 128 * Flash is broken into zones.
 129 * Each zone consists of 512 eraseblocks, out of which in first
 130 * zone 494 are used and 496 are for all following zones.
 131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
 132*/
 133static int msb_get_zone_from_lba(int lba)
 134{
 135	if (lba < 494)
 136		return 0;
 137	return ((lba - 494) / 496) + 1;
 138}
 139
 140/* Get zone of physical block. Trivial */
 141static int msb_get_zone_from_pba(int pba)
 142{
 143	return pba / MS_BLOCKS_IN_ZONE;
 144}
 145
 146/* Debug test to validate free block counts */
 147static int msb_validate_used_block_bitmap(struct msb_data *msb)
 148{
 149	int total_free_blocks = 0;
 150	int i;
 151
 152	if (!debug)
 153		return 0;
 154
 155	for (i = 0; i < msb->zone_count; i++)
 156		total_free_blocks += msb->free_block_count[i];
 157
 158	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
 159					msb->block_count) == total_free_blocks)
 160		return 0;
 161
 162	pr_err("BUG: free block counts don't match the bitmap");
 163	msb->read_only = true;
 164	return -EINVAL;
 165}
 166
 167/* Mark physical block as used */
 168static void msb_mark_block_used(struct msb_data *msb, int pba)
 169{
 170	int zone = msb_get_zone_from_pba(pba);
 171
 172	if (test_bit(pba, msb->used_blocks_bitmap)) {
 173		pr_err(
 174		"BUG: attempt to mark already used pba %d as used", pba);
 175		msb->read_only = true;
 176		return;
 177	}
 178
 179	if (msb_validate_used_block_bitmap(msb))
 180		return;
 181
 182	/* No races because all IO is single threaded */
 183	__set_bit(pba, msb->used_blocks_bitmap);
 184	msb->free_block_count[zone]--;
 185}
 186
 187/* Mark physical block as free */
 188static void msb_mark_block_unused(struct msb_data *msb, int pba)
 189{
 190	int zone = msb_get_zone_from_pba(pba);
 191
 192	if (!test_bit(pba, msb->used_blocks_bitmap)) {
 193		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
 194		msb->read_only = true;
 195		return;
 196	}
 197
 198	if (msb_validate_used_block_bitmap(msb))
 199		return;
 200
 201	/* No races because all IO is single threaded */
 202	__clear_bit(pba, msb->used_blocks_bitmap);
 203	msb->free_block_count[zone]++;
 204}
 205
 206/* Invalidate current register window */
 207static void msb_invalidate_reg_window(struct msb_data *msb)
 208{
 209	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
 210	msb->reg_addr.w_length = sizeof(struct ms_id_register);
 211	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
 212	msb->reg_addr.r_length = sizeof(struct ms_id_register);
 213	msb->addr_valid = false;
 214}
 215
 216/* Start a state machine */
 217static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
 218		(struct memstick_dev *card, struct memstick_request **req))
 219{
 220	struct memstick_dev *card = msb->card;
 221
 222	WARN_ON(msb->state != -1);
 223	msb->int_polling = false;
 224	msb->state = 0;
 225	msb->exit_error = 0;
 226
 227	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
 228
 229	card->next_request = state_func;
 230	memstick_new_req(card->host);
 231	wait_for_completion(&card->mrq_complete);
 232
 233	WARN_ON(msb->state != -1);
 234	return msb->exit_error;
 235}
 236
 237/* State machines call that to exit */
 238static int msb_exit_state_machine(struct msb_data *msb, int error)
 239{
 240	WARN_ON(msb->state == -1);
 241
 242	msb->state = -1;
 243	msb->exit_error = error;
 244	msb->card->next_request = h_msb_default_bad;
 245
 246	/* Invalidate reg window on errors */
 247	if (error)
 248		msb_invalidate_reg_window(msb);
 249
 250	complete(&msb->card->mrq_complete);
 251	return -ENXIO;
 252}
 253
 254/* read INT register */
 255static int msb_read_int_reg(struct msb_data *msb, long timeout)
 256{
 257	struct memstick_request *mrq = &msb->card->current_mrq;
 258
 259	WARN_ON(msb->state == -1);
 260
 261	if (!msb->int_polling) {
 262		msb->int_timeout = jiffies +
 263			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
 264		msb->int_polling = true;
 265	} else if (time_after(jiffies, msb->int_timeout)) {
 266		mrq->data[0] = MEMSTICK_INT_CMDNAK;
 267		return 0;
 268	}
 269
 270	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
 271				mrq->need_card_int && !mrq->error) {
 272		mrq->data[0] = mrq->int_reg;
 273		mrq->need_card_int = false;
 274		return 0;
 275	} else {
 276		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 277		return 1;
 278	}
 279}
 280
 281/* Read a register */
 282static int msb_read_regs(struct msb_data *msb, int offset, int len)
 283{
 284	struct memstick_request *req = &msb->card->current_mrq;
 285
 286	if (msb->reg_addr.r_offset != offset ||
 287	    msb->reg_addr.r_length != len || !msb->addr_valid) {
 288
 289		msb->reg_addr.r_offset = offset;
 290		msb->reg_addr.r_length = len;
 291		msb->addr_valid = true;
 292
 293		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 294			&msb->reg_addr, sizeof(msb->reg_addr));
 295		return 0;
 296	}
 297
 298	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
 299	return 1;
 300}
 301
 302/* Write a card register */
 303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
 304{
 305	struct memstick_request *req = &msb->card->current_mrq;
 306
 307	if (msb->reg_addr.w_offset != offset ||
 308		msb->reg_addr.w_length != len  || !msb->addr_valid) {
 309
 310		msb->reg_addr.w_offset = offset;
 311		msb->reg_addr.w_length = len;
 312		msb->addr_valid = true;
 313
 314		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 315			&msb->reg_addr, sizeof(msb->reg_addr));
 316		return 0;
 317	}
 318
 319	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
 320	return 1;
 321}
 322
 323/* Handler for absence of IO */
 324static int h_msb_default_bad(struct memstick_dev *card,
 325						struct memstick_request **mrq)
 326{
 327	return -ENXIO;
 328}
 329
 330/*
 331 * This function is a handler for reads of one page from device.
 332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
 333 * Can also be used to read extra data only. Set params accordintly.
 334 */
 335static int h_msb_read_page(struct memstick_dev *card,
 336					struct memstick_request **out_mrq)
 337{
 338	struct msb_data *msb = memstick_get_drvdata(card);
 339	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 340	struct scatterlist sg[2];
 341	u8 command, intreg;
 342
 343	if (mrq->error) {
 344		dbg("read_page, unknown error");
 345		return msb_exit_state_machine(msb, mrq->error);
 346	}
 347again:
 348	switch (msb->state) {
 349	case MSB_RP_SEND_BLOCK_ADDRESS:
 350		/* msb_write_regs sometimes "fails" because it needs to update
 351			the reg window, and thus it returns request for that.
 352			Then we stay in this state and retry */
 353		if (!msb_write_regs(msb,
 354			offsetof(struct ms_register, param),
 355			sizeof(struct ms_param_register),
 356			(unsigned char *)&msb->regs.param))
 357			return 0;
 358
 359		msb->state = MSB_RP_SEND_READ_COMMAND;
 360		return 0;
 361
 362	case MSB_RP_SEND_READ_COMMAND:
 363		command = MS_CMD_BLOCK_READ;
 364		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 365		msb->state = MSB_RP_SEND_INT_REQ;
 366		return 0;
 367
 368	case MSB_RP_SEND_INT_REQ:
 369		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
 370		/* If dont actually need to send the int read request (only in
 371			serial mode), then just fall through */
 372		if (msb_read_int_reg(msb, -1))
 373			return 0;
 374		fallthrough;
 375
 376	case MSB_RP_RECEIVE_INT_REQ_RESULT:
 377		intreg = mrq->data[0];
 378		msb->regs.status.interrupt = intreg;
 379
 380		if (intreg & MEMSTICK_INT_CMDNAK)
 381			return msb_exit_state_machine(msb, -EIO);
 382
 383		if (!(intreg & MEMSTICK_INT_CED)) {
 384			msb->state = MSB_RP_SEND_INT_REQ;
 385			goto again;
 386		}
 387
 388		msb->int_polling = false;
 389		msb->state = (intreg & MEMSTICK_INT_ERR) ?
 390			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
 391		goto again;
 392
 393	case MSB_RP_SEND_READ_STATUS_REG:
 394		 /* read the status register to understand source of the INT_ERR */
 395		if (!msb_read_regs(msb,
 396			offsetof(struct ms_register, status),
 397			sizeof(struct ms_status_register)))
 398			return 0;
 399
 400		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 401		return 0;
 402
 403	case MSB_RP_RECEIVE_STATUS_REG:
 404		msb->regs.status = *(struct ms_status_register *)mrq->data;
 405		msb->state = MSB_RP_SEND_OOB_READ;
 406		fallthrough;
 407
 408	case MSB_RP_SEND_OOB_READ:
 409		if (!msb_read_regs(msb,
 410			offsetof(struct ms_register, extra_data),
 411			sizeof(struct ms_extra_data_register)))
 412			return 0;
 413
 414		msb->state = MSB_RP_RECEIVE_OOB_READ;
 415		return 0;
 416
 417	case MSB_RP_RECEIVE_OOB_READ:
 418		msb->regs.extra_data =
 419			*(struct ms_extra_data_register *) mrq->data;
 420		msb->state = MSB_RP_SEND_READ_DATA;
 421		fallthrough;
 422
 423	case MSB_RP_SEND_READ_DATA:
 424		/* Skip that state if we only read the oob */
 425		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
 426			msb->state = MSB_RP_RECEIVE_READ_DATA;
 427			goto again;
 428		}
 429
 430		sg_init_table(sg, ARRAY_SIZE(sg));
 431		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 432			msb->current_sg_offset,
 433			msb->page_size);
 434
 435		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
 436		msb->state = MSB_RP_RECEIVE_READ_DATA;
 437		return 0;
 438
 439	case MSB_RP_RECEIVE_READ_DATA:
 440		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
 441			msb->current_sg_offset += msb->page_size;
 442			return msb_exit_state_machine(msb, 0);
 443		}
 444
 445		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
 446			dbg("read_page: uncorrectable error");
 447			return msb_exit_state_machine(msb, -EBADMSG);
 448		}
 449
 450		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
 451			dbg("read_page: correctable error");
 452			msb->current_sg_offset += msb->page_size;
 453			return msb_exit_state_machine(msb, -EUCLEAN);
 454		} else {
 455			dbg("read_page: INT error, but no status error bits");
 456			return msb_exit_state_machine(msb, -EIO);
 457		}
 458	}
 459
 460	BUG();
 461}
 462
 463/*
 464 * Handler of writes of exactly one block.
 465 * Takes address from msb->regs.param.
 466 * Writes same extra data to blocks, also taken
 467 * from msb->regs.extra
 468 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
 469 * device refuses to take the command or something else
 470 */
 471static int h_msb_write_block(struct memstick_dev *card,
 472					struct memstick_request **out_mrq)
 473{
 474	struct msb_data *msb = memstick_get_drvdata(card);
 475	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 476	struct scatterlist sg[2];
 477	u8 intreg, command;
 478
 479	if (mrq->error)
 480		return msb_exit_state_machine(msb, mrq->error);
 481
 482again:
 483	switch (msb->state) {
 484
 485	/* HACK: Jmicon handling of TPCs between 8 and
 486	 *	sizeof(memstick_request.data) is broken due to hardware
 487	 *	bug in PIO mode that is used for these TPCs
 488	 *	Therefore split the write
 489	 */
 490
 491	case MSB_WB_SEND_WRITE_PARAMS:
 492		if (!msb_write_regs(msb,
 493			offsetof(struct ms_register, param),
 494			sizeof(struct ms_param_register),
 495			&msb->regs.param))
 496			return 0;
 497
 498		msb->state = MSB_WB_SEND_WRITE_OOB;
 499		return 0;
 500
 501	case MSB_WB_SEND_WRITE_OOB:
 502		if (!msb_write_regs(msb,
 503			offsetof(struct ms_register, extra_data),
 504			sizeof(struct ms_extra_data_register),
 505			&msb->regs.extra_data))
 506			return 0;
 507		msb->state = MSB_WB_SEND_WRITE_COMMAND;
 508		return 0;
 509
 510
 511	case MSB_WB_SEND_WRITE_COMMAND:
 512		command = MS_CMD_BLOCK_WRITE;
 513		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 514		msb->state = MSB_WB_SEND_INT_REQ;
 515		return 0;
 516
 517	case MSB_WB_SEND_INT_REQ:
 518		msb->state = MSB_WB_RECEIVE_INT_REQ;
 519		if (msb_read_int_reg(msb, -1))
 520			return 0;
 521		fallthrough;
 522
 523	case MSB_WB_RECEIVE_INT_REQ:
 524		intreg = mrq->data[0];
 525		msb->regs.status.interrupt = intreg;
 526
 527		/* errors mean out of here, and fast... */
 528		if (intreg & (MEMSTICK_INT_CMDNAK))
 529			return msb_exit_state_machine(msb, -EIO);
 530
 531		if (intreg & MEMSTICK_INT_ERR)
 532			return msb_exit_state_machine(msb, -EBADMSG);
 533
 534
 535		/* for last page we need to poll CED */
 536		if (msb->current_page == msb->pages_in_block) {
 537			if (intreg & MEMSTICK_INT_CED)
 538				return msb_exit_state_machine(msb, 0);
 539			msb->state = MSB_WB_SEND_INT_REQ;
 540			goto again;
 541
 542		}
 543
 544		/* for non-last page we need BREQ before writing next chunk */
 545		if (!(intreg & MEMSTICK_INT_BREQ)) {
 546			msb->state = MSB_WB_SEND_INT_REQ;
 547			goto again;
 548		}
 549
 550		msb->int_polling = false;
 551		msb->state = MSB_WB_SEND_WRITE_DATA;
 552		fallthrough;
 553
 554	case MSB_WB_SEND_WRITE_DATA:
 555		sg_init_table(sg, ARRAY_SIZE(sg));
 556
 557		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 558			msb->current_sg_offset,
 559			msb->page_size) < msb->page_size)
 560			return msb_exit_state_machine(msb, -EIO);
 561
 562		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
 563		mrq->need_card_int = 1;
 564		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
 565		return 0;
 566
 567	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
 568		msb->current_page++;
 569		msb->current_sg_offset += msb->page_size;
 570		msb->state = MSB_WB_SEND_INT_REQ;
 571		goto again;
 572	default:
 573		BUG();
 574	}
 575
 576	return 0;
 577}
 578
 579/*
 580 * This function is used to send simple IO requests to device that consist
 581 * of register write + command
 582 */
 583static int h_msb_send_command(struct memstick_dev *card,
 584					struct memstick_request **out_mrq)
 585{
 586	struct msb_data *msb = memstick_get_drvdata(card);
 587	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 588	u8 intreg;
 589
 590	if (mrq->error) {
 591		dbg("send_command: unknown error");
 592		return msb_exit_state_machine(msb, mrq->error);
 593	}
 594again:
 595	switch (msb->state) {
 596
 597	/* HACK: see h_msb_write_block */
 598	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
 599		if (!msb_write_regs(msb,
 600			offsetof(struct ms_register, param),
 601			sizeof(struct ms_param_register),
 602			&msb->regs.param))
 603			return 0;
 604		msb->state = MSB_SC_SEND_WRITE_OOB;
 605		return 0;
 606
 607	case MSB_SC_SEND_WRITE_OOB:
 608		if (!msb->command_need_oob) {
 609			msb->state = MSB_SC_SEND_COMMAND;
 610			goto again;
 611		}
 612
 613		if (!msb_write_regs(msb,
 614			offsetof(struct ms_register, extra_data),
 615			sizeof(struct ms_extra_data_register),
 616			&msb->regs.extra_data))
 617			return 0;
 618
 619		msb->state = MSB_SC_SEND_COMMAND;
 620		return 0;
 621
 622	case MSB_SC_SEND_COMMAND:
 623		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
 624		msb->state = MSB_SC_SEND_INT_REQ;
 625		return 0;
 626
 627	case MSB_SC_SEND_INT_REQ:
 628		msb->state = MSB_SC_RECEIVE_INT_REQ;
 629		if (msb_read_int_reg(msb, -1))
 630			return 0;
 631		fallthrough;
 632
 633	case MSB_SC_RECEIVE_INT_REQ:
 634		intreg = mrq->data[0];
 635
 636		if (intreg & MEMSTICK_INT_CMDNAK)
 637			return msb_exit_state_machine(msb, -EIO);
 638		if (intreg & MEMSTICK_INT_ERR)
 639			return msb_exit_state_machine(msb, -EBADMSG);
 640
 641		if (!(intreg & MEMSTICK_INT_CED)) {
 642			msb->state = MSB_SC_SEND_INT_REQ;
 643			goto again;
 644		}
 645
 646		return msb_exit_state_machine(msb, 0);
 647	}
 648
 649	BUG();
 650}
 651
 652/* Small handler for card reset */
 653static int h_msb_reset(struct memstick_dev *card,
 654					struct memstick_request **out_mrq)
 655{
 656	u8 command = MS_CMD_RESET;
 657	struct msb_data *msb = memstick_get_drvdata(card);
 658	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 659
 660	if (mrq->error)
 661		return msb_exit_state_machine(msb, mrq->error);
 662
 663	switch (msb->state) {
 664	case MSB_RS_SEND:
 665		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 666		mrq->need_card_int = 0;
 667		msb->state = MSB_RS_CONFIRM;
 668		return 0;
 669	case MSB_RS_CONFIRM:
 670		return msb_exit_state_machine(msb, 0);
 671	}
 672	BUG();
 673}
 674
 675/* This handler is used to do serial->parallel switch */
 676static int h_msb_parallel_switch(struct memstick_dev *card,
 677					struct memstick_request **out_mrq)
 678{
 679	struct msb_data *msb = memstick_get_drvdata(card);
 680	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 681	struct memstick_host *host = card->host;
 682
 683	if (mrq->error) {
 684		dbg("parallel_switch: error");
 685		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 686		return msb_exit_state_machine(msb, mrq->error);
 687	}
 688
 689	switch (msb->state) {
 690	case MSB_PS_SEND_SWITCH_COMMAND:
 691		/* Set the parallel interface on memstick side */
 692		msb->regs.param.system |= MEMSTICK_SYS_PAM;
 693
 694		if (!msb_write_regs(msb,
 695			offsetof(struct ms_register, param),
 696			1,
 697			(unsigned char *)&msb->regs.param))
 698			return 0;
 699
 700		msb->state = MSB_PS_SWICH_HOST;
 701		return 0;
 702
 703	case MSB_PS_SWICH_HOST:
 704		 /* Set parallel interface on our side + send a dummy request
 705			to see if card responds */
 706		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
 707		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 708		msb->state = MSB_PS_CONFIRM;
 709		return 0;
 710
 711	case MSB_PS_CONFIRM:
 712		return msb_exit_state_machine(msb, 0);
 713	}
 714
 715	BUG();
 716}
 717
 718static int msb_switch_to_parallel(struct msb_data *msb);
 719
 720/* Reset the card, to guard against hw errors beeing treated as bad blocks */
 721static int msb_reset(struct msb_data *msb, bool full)
 722{
 723
 724	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
 725	struct memstick_dev *card = msb->card;
 726	struct memstick_host *host = card->host;
 727	int error;
 728
 729	/* Reset the card */
 730	msb->regs.param.system = MEMSTICK_SYS_BAMD;
 731
 732	if (full) {
 733		error =  host->set_param(host,
 734					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 735		if (error)
 736			goto out_error;
 737
 738		msb_invalidate_reg_window(msb);
 739
 740		error = host->set_param(host,
 741					MEMSTICK_POWER, MEMSTICK_POWER_ON);
 742		if (error)
 743			goto out_error;
 744
 745		error = host->set_param(host,
 746					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
 747		if (error) {
 748out_error:
 749			dbg("Failed to reset the host controller");
 750			msb->read_only = true;
 751			return -EFAULT;
 752		}
 753	}
 754
 755	error = msb_run_state_machine(msb, h_msb_reset);
 756	if (error) {
 757		dbg("Failed to reset the card");
 758		msb->read_only = true;
 759		return -ENODEV;
 760	}
 761
 762	/* Set parallel mode */
 763	if (was_parallel)
 764		msb_switch_to_parallel(msb);
 765	return 0;
 766}
 767
 768/* Attempts to switch interface to parallel mode */
 769static int msb_switch_to_parallel(struct msb_data *msb)
 770{
 771	int error;
 772
 773	error = msb_run_state_machine(msb, h_msb_parallel_switch);
 774	if (error) {
 775		pr_err("Switch to parallel failed");
 776		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 777		msb_reset(msb, true);
 778		return -EFAULT;
 779	}
 780
 781	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
 782	return 0;
 783}
 784
 785/* Changes overwrite flag on a page */
 786static int msb_set_overwrite_flag(struct msb_data *msb,
 787						u16 pba, u8 page, u8 flag)
 788{
 789	if (msb->read_only)
 790		return -EROFS;
 791
 792	msb->regs.param.block_address = cpu_to_be16(pba);
 793	msb->regs.param.page_address = page;
 794	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
 795	msb->regs.extra_data.overwrite_flag = flag;
 796	msb->command_value = MS_CMD_BLOCK_WRITE;
 797	msb->command_need_oob = true;
 798
 799	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
 800							flag, pba, page);
 801	return msb_run_state_machine(msb, h_msb_send_command);
 802}
 803
 804static int msb_mark_bad(struct msb_data *msb, int pba)
 805{
 806	pr_notice("marking pba %d as bad", pba);
 807	msb_reset(msb, true);
 808	return msb_set_overwrite_flag(
 809			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
 810}
 811
 812static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
 813{
 814	dbg("marking page %d of pba %d as bad", page, pba);
 815	msb_reset(msb, true);
 816	return msb_set_overwrite_flag(msb,
 817		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
 818}
 819
 820/* Erases one physical block */
 821static int msb_erase_block(struct msb_data *msb, u16 pba)
 822{
 823	int error, try;
 824	if (msb->read_only)
 825		return -EROFS;
 826
 827	dbg_verbose("erasing pba %d", pba);
 828
 829	for (try = 1; try < 3; try++) {
 830		msb->regs.param.block_address = cpu_to_be16(pba);
 831		msb->regs.param.page_address = 0;
 832		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
 833		msb->command_value = MS_CMD_BLOCK_ERASE;
 834		msb->command_need_oob = false;
 835
 836
 837		error = msb_run_state_machine(msb, h_msb_send_command);
 838		if (!error || msb_reset(msb, true))
 839			break;
 840	}
 841
 842	if (error) {
 843		pr_err("erase failed, marking pba %d as bad", pba);
 844		msb_mark_bad(msb, pba);
 845	}
 846
 847	dbg_verbose("erase success, marking pba %d as unused", pba);
 848	msb_mark_block_unused(msb, pba);
 849	__set_bit(pba, msb->erased_blocks_bitmap);
 850	return error;
 851}
 852
 853/* Reads one page from device */
 854static int msb_read_page(struct msb_data *msb,
 855	u16 pba, u8 page, struct ms_extra_data_register *extra,
 856					struct scatterlist *sg,  int offset)
 857{
 858	int try, error;
 859
 860	if (pba == MS_BLOCK_INVALID) {
 861		unsigned long flags;
 862		struct sg_mapping_iter miter;
 863		size_t len = msb->page_size;
 864
 865		dbg_verbose("read unmapped sector. returning 0xFF");
 866
 867		local_irq_save(flags);
 868		sg_miter_start(&miter, sg, sg_nents(sg),
 869				SG_MITER_ATOMIC | SG_MITER_TO_SG);
 870
 871		while (sg_miter_next(&miter) && len > 0) {
 872
 873			int chunklen;
 874
 875			if (offset && offset >= miter.length) {
 876				offset -= miter.length;
 877				continue;
 878			}
 879
 880			chunklen = min(miter.length - offset, len);
 881			memset(miter.addr + offset, 0xFF, chunklen);
 882			len -= chunklen;
 883			offset = 0;
 884		}
 885
 886		sg_miter_stop(&miter);
 887		local_irq_restore(flags);
 888
 889		if (offset)
 890			return -EFAULT;
 891
 892		if (extra)
 893			memset(extra, 0xFF, sizeof(*extra));
 894		return 0;
 895	}
 896
 897	if (pba >= msb->block_count) {
 898		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
 899		return -EINVAL;
 900	}
 901
 902	for (try = 1; try < 3; try++) {
 903		msb->regs.param.block_address = cpu_to_be16(pba);
 904		msb->regs.param.page_address = page;
 905		msb->regs.param.cp = MEMSTICK_CP_PAGE;
 906
 907		msb->current_sg = sg;
 908		msb->current_sg_offset = offset;
 909		error = msb_run_state_machine(msb, h_msb_read_page);
 910
 911
 912		if (error == -EUCLEAN) {
 913			pr_notice("correctable error on pba %d, page %d",
 914				pba, page);
 915			error = 0;
 916		}
 917
 918		if (!error && extra)
 919			*extra = msb->regs.extra_data;
 920
 921		if (!error || msb_reset(msb, true))
 922			break;
 923
 924	}
 925
 926	/* Mark bad pages */
 927	if (error == -EBADMSG) {
 928		pr_err("uncorrectable error on read of pba %d, page %d",
 929			pba, page);
 930
 931		if (msb->regs.extra_data.overwrite_flag &
 932					MEMSTICK_OVERWRITE_PGST0)
 933			msb_mark_page_bad(msb, pba, page);
 934		return -EBADMSG;
 935	}
 936
 937	if (error)
 938		pr_err("read of pba %d, page %d failed with error %d",
 939			pba, page, error);
 940	return error;
 941}
 942
 943/* Reads oob of page only */
 944static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
 945	struct ms_extra_data_register *extra)
 946{
 947	int error;
 948
 949	BUG_ON(!extra);
 950	msb->regs.param.block_address = cpu_to_be16(pba);
 951	msb->regs.param.page_address = page;
 952	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
 953
 954	if (pba > msb->block_count) {
 955		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
 956		return -EINVAL;
 957	}
 958
 959	error = msb_run_state_machine(msb, h_msb_read_page);
 960	*extra = msb->regs.extra_data;
 961
 962	if (error == -EUCLEAN) {
 963		pr_notice("correctable error on pba %d, page %d",
 964			pba, page);
 965		return 0;
 966	}
 967
 968	return error;
 969}
 970
 971/* Reads a block and compares it with data contained in scatterlist orig_sg */
 972static int msb_verify_block(struct msb_data *msb, u16 pba,
 973				struct scatterlist *orig_sg,  int offset)
 974{
 975	struct scatterlist sg;
 976	int page = 0, error;
 977
 978	sg_init_one(&sg, msb->block_buffer, msb->block_size);
 979
 980	while (page < msb->pages_in_block) {
 981
 982		error = msb_read_page(msb, pba, page,
 983				NULL, &sg, page * msb->page_size);
 984		if (error)
 985			return error;
 986		page++;
 987	}
 988
 989	if (msb_sg_compare_to_buffer(orig_sg, offset,
 990				msb->block_buffer, msb->block_size))
 991		return -EIO;
 992	return 0;
 993}
 994
 995/* Writes exectly one block + oob */
 996static int msb_write_block(struct msb_data *msb,
 997			u16 pba, u32 lba, struct scatterlist *sg, int offset)
 998{
 999	int error, current_try = 1;
1000	BUG_ON(sg->length < msb->page_size);
1001
1002	if (msb->read_only)
1003		return -EROFS;
1004
1005	if (pba == MS_BLOCK_INVALID) {
1006		pr_err(
1007			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1008		return -EINVAL;
1009	}
1010
1011	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1012		pr_err(
1013		"BUG: write: attempt to write beyond the end of device");
1014		return -EINVAL;
1015	}
1016
1017	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1018		pr_err("BUG: write: lba zone mismatch");
1019		return -EINVAL;
1020	}
1021
1022	if (pba == msb->boot_block_locations[0] ||
1023		pba == msb->boot_block_locations[1]) {
1024		pr_err("BUG: write: attempt to write to boot blocks!");
1025		return -EINVAL;
1026	}
1027
1028	while (1) {
1029
1030		if (msb->read_only)
1031			return -EROFS;
1032
1033		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1034		msb->regs.param.page_address = 0;
1035		msb->regs.param.block_address = cpu_to_be16(pba);
1036
1037		msb->regs.extra_data.management_flag = 0xFF;
1038		msb->regs.extra_data.overwrite_flag = 0xF8;
1039		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1040
1041		msb->current_sg = sg;
1042		msb->current_sg_offset = offset;
1043		msb->current_page = 0;
1044
1045		error = msb_run_state_machine(msb, h_msb_write_block);
1046
1047		/* Sector we just wrote to is assumed erased since its pba
1048			was erased. If it wasn't erased, write will succeed
1049			and will just clear the bits that were set in the block
1050			thus test that what we have written,
1051			matches what we expect.
1052			We do trust the blocks that we erased */
1053		if (!error && (verify_writes ||
1054				!test_bit(pba, msb->erased_blocks_bitmap)))
1055			error = msb_verify_block(msb, pba, sg, offset);
1056
1057		if (!error)
1058			break;
1059
1060		if (current_try > 1 || msb_reset(msb, true))
1061			break;
1062
1063		pr_err("write failed, trying to erase the pba %d", pba);
1064		error = msb_erase_block(msb, pba);
1065		if (error)
1066			break;
1067
1068		current_try++;
1069	}
1070	return error;
1071}
1072
1073/* Finds a free block for write replacement */
1074static u16 msb_get_free_block(struct msb_data *msb, int zone)
1075{
1076	u16 pos;
1077	int pba = zone * MS_BLOCKS_IN_ZONE;
1078	int i;
1079
1080	get_random_bytes(&pos, sizeof(pos));
1081
1082	if (!msb->free_block_count[zone]) {
1083		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1084		msb->read_only = true;
1085		return MS_BLOCK_INVALID;
1086	}
1087
1088	pos %= msb->free_block_count[zone];
1089
1090	dbg_verbose("have %d choices for a free block, selected randomly: %d",
1091		msb->free_block_count[zone], pos);
1092
1093	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1094							msb->block_count, pba);
1095	for (i = 0; i < pos; ++i)
1096		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1097						msb->block_count, pba + 1);
1098
1099	dbg_verbose("result of the free blocks scan: pba %d", pba);
1100
1101	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1102		pr_err("BUG: cant get a free block");
1103		msb->read_only = true;
1104		return MS_BLOCK_INVALID;
1105	}
1106
1107	msb_mark_block_used(msb, pba);
1108	return pba;
1109}
1110
1111static int msb_update_block(struct msb_data *msb, u16 lba,
1112	struct scatterlist *sg, int offset)
1113{
1114	u16 pba, new_pba;
1115	int error, try;
1116
1117	pba = msb->lba_to_pba_table[lba];
1118	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1119
1120	if (pba != MS_BLOCK_INVALID) {
1121		dbg_verbose("setting the update flag on the block");
1122		msb_set_overwrite_flag(msb, pba, 0,
1123				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1124	}
1125
1126	for (try = 0; try < 3; try++) {
1127		new_pba = msb_get_free_block(msb,
1128			msb_get_zone_from_lba(lba));
1129
1130		if (new_pba == MS_BLOCK_INVALID) {
1131			error = -EIO;
1132			goto out;
1133		}
1134
1135		dbg_verbose("block update: writing updated block to the pba %d",
1136								new_pba);
1137		error = msb_write_block(msb, new_pba, lba, sg, offset);
1138		if (error == -EBADMSG) {
1139			msb_mark_bad(msb, new_pba);
1140			continue;
1141		}
1142
1143		if (error)
1144			goto out;
1145
1146		dbg_verbose("block update: erasing the old block");
1147		msb_erase_block(msb, pba);
1148		msb->lba_to_pba_table[lba] = new_pba;
1149		return 0;
1150	}
1151out:
1152	if (error) {
1153		pr_err("block update error after %d tries,  switching to r/o mode", try);
1154		msb->read_only = true;
1155	}
1156	return error;
1157}
1158
1159/* Converts endiannes in the boot block for easy use */
1160static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1161{
1162	p->header.block_id = be16_to_cpu(p->header.block_id);
1163	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1164	p->entry.disabled_block.start_addr
1165		= be32_to_cpu(p->entry.disabled_block.start_addr);
1166	p->entry.disabled_block.data_size
1167		= be32_to_cpu(p->entry.disabled_block.data_size);
1168	p->entry.cis_idi.start_addr
1169		= be32_to_cpu(p->entry.cis_idi.start_addr);
1170	p->entry.cis_idi.data_size
1171		= be32_to_cpu(p->entry.cis_idi.data_size);
1172	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1173	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1174	p->attr.number_of_effective_blocks
1175		= be16_to_cpu(p->attr.number_of_effective_blocks);
1176	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1177	p->attr.memory_manufacturer_code
1178		= be16_to_cpu(p->attr.memory_manufacturer_code);
1179	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1180	p->attr.implemented_capacity
1181		= be16_to_cpu(p->attr.implemented_capacity);
1182	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1183	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1184}
1185
1186static int msb_read_boot_blocks(struct msb_data *msb)
1187{
1188	int pba = 0;
1189	struct scatterlist sg;
1190	struct ms_extra_data_register extra;
1191	struct ms_boot_page *page;
1192
1193	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1194	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1195	msb->boot_block_count = 0;
1196
1197	dbg_verbose("Start of a scan for the boot blocks");
1198
1199	if (!msb->boot_page) {
1200		page = kmalloc_array(2, sizeof(struct ms_boot_page),
1201				     GFP_KERNEL);
1202		if (!page)
1203			return -ENOMEM;
1204
1205		msb->boot_page = page;
1206	} else
1207		page = msb->boot_page;
1208
1209	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1210
1211	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1212
1213		sg_init_one(&sg, page, sizeof(*page));
1214		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1215			dbg("boot scan: can't read pba %d", pba);
1216			continue;
1217		}
1218
1219		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1220			dbg("management flag doesn't indicate boot block %d",
1221									pba);
1222			continue;
1223		}
1224
1225		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1226			dbg("the pba at %d doesn' contain boot block ID", pba);
1227			continue;
1228		}
1229
1230		msb_fix_boot_page_endianness(page);
1231		msb->boot_block_locations[msb->boot_block_count] = pba;
1232
1233		page++;
1234		msb->boot_block_count++;
1235
1236		if (msb->boot_block_count == 2)
1237			break;
1238	}
1239
1240	if (!msb->boot_block_count) {
1241		pr_err("media doesn't contain master page, aborting");
1242		return -EIO;
1243	}
1244
1245	dbg_verbose("End of scan for boot blocks");
1246	return 0;
1247}
1248
1249static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1250{
1251	struct ms_boot_page *boot_block;
1252	struct scatterlist sg;
1253	u16 *buffer = NULL;
1254	int offset = 0;
1255	int i, error = 0;
1256	int data_size, data_offset, page, page_offset, size_to_read;
1257	u16 pba;
1258
1259	BUG_ON(block_nr > 1);
1260	boot_block = &msb->boot_page[block_nr];
1261	pba = msb->boot_block_locations[block_nr];
1262
1263	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1264		return -EINVAL;
1265
1266	data_size = boot_block->entry.disabled_block.data_size;
1267	data_offset = sizeof(struct ms_boot_page) +
1268			boot_block->entry.disabled_block.start_addr;
1269	if (!data_size)
1270		return 0;
1271
1272	page = data_offset / msb->page_size;
1273	page_offset = data_offset % msb->page_size;
1274	size_to_read =
1275		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1276			msb->page_size;
1277
1278	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1279		pba, data_offset, data_size);
1280
1281	buffer = kzalloc(size_to_read, GFP_KERNEL);
1282	if (!buffer)
1283		return -ENOMEM;
1284
1285	/* Read the buffer */
1286	sg_init_one(&sg, buffer, size_to_read);
1287
1288	while (offset < size_to_read) {
1289		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1290		if (error)
1291			goto out;
1292
1293		page++;
1294		offset += msb->page_size;
1295
1296		if (page == msb->pages_in_block) {
1297			pr_err(
1298			"bad block table extends beyond the boot block");
1299			break;
1300		}
1301	}
1302
1303	/* Process the bad block table */
1304	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1305
1306		u16 bad_block = be16_to_cpu(buffer[i]);
1307
1308		if (bad_block >= msb->block_count) {
1309			dbg("bad block table contains invalid block %d",
1310								bad_block);
1311			continue;
1312		}
1313
1314		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1315			dbg("duplicate bad block %d in the table",
1316				bad_block);
1317			continue;
1318		}
1319
1320		dbg("block %d is marked as factory bad", bad_block);
1321		msb_mark_block_used(msb, bad_block);
1322	}
1323out:
1324	kfree(buffer);
1325	return error;
1326}
1327
1328static int msb_ftl_initialize(struct msb_data *msb)
1329{
1330	int i;
1331
1332	if (msb->ftl_initialized)
1333		return 0;
1334
1335	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1336	msb->logical_block_count = msb->zone_count * 496 - 2;
1337
1338	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1339	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1340	msb->lba_to_pba_table =
1341		kmalloc_array(msb->logical_block_count, sizeof(u16),
1342			      GFP_KERNEL);
1343
1344	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1345						!msb->erased_blocks_bitmap) {
1346		kfree(msb->used_blocks_bitmap);
1347		kfree(msb->lba_to_pba_table);
1348		kfree(msb->erased_blocks_bitmap);
1349		return -ENOMEM;
1350	}
1351
1352	for (i = 0; i < msb->zone_count; i++)
1353		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1354
1355	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1356			msb->logical_block_count * sizeof(u16));
1357
1358	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1359		msb->zone_count, msb->logical_block_count);
1360
1361	msb->ftl_initialized = true;
1362	return 0;
1363}
1364
1365static int msb_ftl_scan(struct msb_data *msb)
1366{
1367	u16 pba, lba, other_block;
1368	u8 overwrite_flag, management_flag, other_overwrite_flag;
1369	int error;
1370	struct ms_extra_data_register extra;
1371	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1372
1373	if (!overwrite_flags)
1374		return -ENOMEM;
1375
1376	dbg("Start of media scanning");
1377	for (pba = 0; pba < msb->block_count; pba++) {
1378
1379		if (pba == msb->boot_block_locations[0] ||
1380			pba == msb->boot_block_locations[1]) {
1381			dbg_verbose("pba %05d -> [boot block]", pba);
1382			msb_mark_block_used(msb, pba);
1383			continue;
1384		}
1385
1386		if (test_bit(pba, msb->used_blocks_bitmap)) {
1387			dbg_verbose("pba %05d -> [factory bad]", pba);
1388			continue;
1389		}
1390
1391		memset(&extra, 0, sizeof(extra));
1392		error = msb_read_oob(msb, pba, 0, &extra);
1393
1394		/* can't trust the page if we can't read the oob */
1395		if (error == -EBADMSG) {
1396			pr_notice(
1397			"oob of pba %d damaged, will try to erase it", pba);
1398			msb_mark_block_used(msb, pba);
1399			msb_erase_block(msb, pba);
1400			continue;
1401		} else if (error) {
1402			pr_err("unknown error %d on read of oob of pba %d - aborting",
1403				error, pba);
1404
1405			kfree(overwrite_flags);
1406			return error;
1407		}
1408
1409		lba = be16_to_cpu(extra.logical_address);
1410		management_flag = extra.management_flag;
1411		overwrite_flag = extra.overwrite_flag;
1412		overwrite_flags[pba] = overwrite_flag;
1413
1414		/* Skip bad blocks */
1415		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1416			dbg("pba %05d -> [BAD]", pba);
1417			msb_mark_block_used(msb, pba);
1418			continue;
1419		}
1420
1421		/* Skip system/drm blocks */
1422		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1423			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1424			dbg("pba %05d -> [reserved management flag %02x]",
1425							pba, management_flag);
1426			msb_mark_block_used(msb, pba);
1427			continue;
1428		}
1429
1430		/* Erase temporary tables */
1431		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1432			dbg("pba %05d -> [temp table] - will erase", pba);
1433
1434			msb_mark_block_used(msb, pba);
1435			msb_erase_block(msb, pba);
1436			continue;
1437		}
1438
1439		if (lba == MS_BLOCK_INVALID) {
1440			dbg_verbose("pba %05d -> [free]", pba);
1441			continue;
1442		}
1443
1444		msb_mark_block_used(msb, pba);
1445
1446		/* Block has LBA not according to zoning*/
1447		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1448			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1449								pba, lba);
1450			msb_erase_block(msb, pba);
1451			continue;
1452		}
1453
1454		/* No collisions - great */
1455		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1456			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1457			msb->lba_to_pba_table[lba] = pba;
1458			continue;
1459		}
1460
1461		other_block = msb->lba_to_pba_table[lba];
1462		other_overwrite_flag = overwrite_flags[other_block];
1463
1464		pr_notice("Collision between pba %d and pba %d",
1465			pba, other_block);
1466
1467		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1468			pr_notice("pba %d is marked as stable, use it", pba);
1469			msb_erase_block(msb, other_block);
1470			msb->lba_to_pba_table[lba] = pba;
1471			continue;
1472		}
1473
1474		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1475			pr_notice("pba %d is marked as stable, use it",
1476								other_block);
1477			msb_erase_block(msb, pba);
1478			continue;
1479		}
1480
1481		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1482				pba, other_block, other_block);
1483
1484		msb_erase_block(msb, other_block);
1485		msb->lba_to_pba_table[lba] = pba;
1486	}
1487
1488	dbg("End of media scanning");
1489	kfree(overwrite_flags);
1490	return 0;
1491}
1492
1493static void msb_cache_flush_timer(struct timer_list *t)
1494{
1495	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1496	msb->need_flush_cache = true;
1497	queue_work(msb->io_queue, &msb->io_work);
1498}
1499
1500
1501static void msb_cache_discard(struct msb_data *msb)
1502{
1503	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1504		return;
1505
1506	del_timer_sync(&msb->cache_flush_timer);
1507
1508	dbg_verbose("Discarding the write cache");
1509	msb->cache_block_lba = MS_BLOCK_INVALID;
1510	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1511}
1512
1513static int msb_cache_init(struct msb_data *msb)
1514{
1515	timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1516
1517	if (!msb->cache)
1518		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1519	if (!msb->cache)
1520		return -ENOMEM;
1521
1522	msb_cache_discard(msb);
1523	return 0;
1524}
1525
1526static int msb_cache_flush(struct msb_data *msb)
1527{
1528	struct scatterlist sg;
1529	struct ms_extra_data_register extra;
1530	int page, offset, error;
1531	u16 pba, lba;
1532
1533	if (msb->read_only)
1534		return -EROFS;
1535
1536	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1537		return 0;
1538
1539	lba = msb->cache_block_lba;
1540	pba = msb->lba_to_pba_table[lba];
1541
1542	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1543						pba, msb->cache_block_lba);
1544
1545	sg_init_one(&sg, msb->cache , msb->block_size);
1546
1547	/* Read all missing pages in cache */
1548	for (page = 0; page < msb->pages_in_block; page++) {
1549
1550		if (test_bit(page, &msb->valid_cache_bitmap))
1551			continue;
1552
1553		offset = page * msb->page_size;
1554
1555		dbg_verbose("reading non-present sector %d of cache block %d",
1556			page, lba);
1557		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1558
1559		/* Bad pages are copied with 00 page status */
1560		if (error == -EBADMSG) {
1561			pr_err("read error on sector %d, contents probably damaged", page);
1562			continue;
1563		}
1564
1565		if (error)
1566			return error;
1567
1568		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1569							MEMSTICK_OV_PG_NORMAL) {
1570			dbg("page %d is marked as bad", page);
1571			continue;
1572		}
1573
1574		set_bit(page, &msb->valid_cache_bitmap);
1575	}
1576
1577	/* Write the cache now */
1578	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1579	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1580
1581	/* Mark invalid pages */
1582	if (!error) {
1583		for (page = 0; page < msb->pages_in_block; page++) {
1584
1585			if (test_bit(page, &msb->valid_cache_bitmap))
1586				continue;
1587
1588			dbg("marking page %d as containing damaged data",
1589				page);
1590			msb_set_overwrite_flag(msb,
1591				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1592		}
1593	}
1594
1595	msb_cache_discard(msb);
1596	return error;
1597}
1598
1599static int msb_cache_write(struct msb_data *msb, int lba,
1600	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1601{
1602	int error;
1603	struct scatterlist sg_tmp[10];
1604
1605	if (msb->read_only)
1606		return -EROFS;
1607
1608	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1609						lba != msb->cache_block_lba)
1610		if (add_to_cache_only)
1611			return 0;
1612
1613	/* If we need to write different block */
1614	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1615						lba != msb->cache_block_lba) {
1616		dbg_verbose("first flush the cache");
1617		error = msb_cache_flush(msb);
1618		if (error)
1619			return error;
1620	}
1621
1622	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1623		msb->cache_block_lba  = lba;
1624		mod_timer(&msb->cache_flush_timer,
1625			jiffies + msecs_to_jiffies(cache_flush_timeout));
1626	}
1627
1628	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1629
1630	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1631	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1632
1633	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1634		msb->cache + page * msb->page_size, msb->page_size);
1635
1636	set_bit(page, &msb->valid_cache_bitmap);
1637	return 0;
1638}
1639
1640static int msb_cache_read(struct msb_data *msb, int lba,
1641				int page, struct scatterlist *sg, int offset)
1642{
1643	int pba = msb->lba_to_pba_table[lba];
1644	struct scatterlist sg_tmp[10];
1645	int error = 0;
1646
1647	if (lba == msb->cache_block_lba &&
1648			test_bit(page, &msb->valid_cache_bitmap)) {
1649
1650		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1651							lba, pba, page);
1652
1653		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1654		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1655			offset, msb->page_size);
1656		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1657			msb->cache + msb->page_size * page,
1658							msb->page_size);
1659	} else {
1660		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1661							lba, pba, page);
1662
1663		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1664		if (error)
1665			return error;
1666
1667		msb_cache_write(msb, lba, page, true, sg, offset);
1668	}
1669	return error;
1670}
1671
1672/* Emulated geometry table
1673 * This table content isn't that importaint,
1674 * One could put here different values, providing that they still
1675 * cover whole disk.
1676 * 64 MB entry is what windows reports for my 64M memstick */
1677
1678static const struct chs_entry chs_table[] = {
1679/*        size sectors cylynders  heads */
1680	{ 4,    16,    247,       2  },
1681	{ 8,    16,    495,       2  },
1682	{ 16,   16,    495,       4  },
1683	{ 32,   16,    991,       4  },
1684	{ 64,   16,    991,       8  },
1685	{128,   16,    991,       16 },
1686	{ 0 }
1687};
1688
1689/* Load information about the card */
1690static int msb_init_card(struct memstick_dev *card)
1691{
1692	struct msb_data *msb = memstick_get_drvdata(card);
1693	struct memstick_host *host = card->host;
1694	struct ms_boot_page *boot_block;
1695	int error = 0, i, raw_size_in_megs;
1696
1697	msb->caps = 0;
1698
1699	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1700				card->id.class <= MEMSTICK_CLASS_ROM)
1701		msb->read_only = true;
1702
1703	msb->state = -1;
1704	error = msb_reset(msb, false);
1705	if (error)
1706		return error;
1707
1708	/* Due to a bug in Jmicron driver written by Alex Dubov,
1709	 its serial mode barely works,
1710	 so we switch to parallel mode right away */
1711	if (host->caps & MEMSTICK_CAP_PAR4)
1712		msb_switch_to_parallel(msb);
1713
1714	msb->page_size = sizeof(struct ms_boot_page);
1715
1716	/* Read the boot page */
1717	error = msb_read_boot_blocks(msb);
1718	if (error)
1719		return -EIO;
1720
1721	boot_block = &msb->boot_page[0];
1722
1723	/* Save intersting attributes from boot page */
1724	msb->block_count = boot_block->attr.number_of_blocks;
1725	msb->page_size = boot_block->attr.page_size;
1726
1727	msb->pages_in_block = boot_block->attr.block_size * 2;
1728	msb->block_size = msb->page_size * msb->pages_in_block;
1729
1730	if (msb->page_size > PAGE_SIZE) {
1731		/* this isn't supported by linux at all, anyway*/
1732		dbg("device page %d size isn't supported", msb->page_size);
1733		return -EINVAL;
1734	}
1735
1736	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1737	if (!msb->block_buffer)
1738		return -ENOMEM;
1739
1740	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1741
1742	for (i = 0; chs_table[i].size; i++) {
1743
1744		if (chs_table[i].size != raw_size_in_megs)
1745			continue;
1746
1747		msb->geometry.cylinders = chs_table[i].cyl;
1748		msb->geometry.heads = chs_table[i].head;
1749		msb->geometry.sectors = chs_table[i].sec;
1750		break;
1751	}
1752
1753	if (boot_block->attr.transfer_supporting == 1)
1754		msb->caps |= MEMSTICK_CAP_PAR4;
1755
1756	if (boot_block->attr.device_type & 0x03)
1757		msb->read_only = true;
1758
1759	dbg("Total block count = %d", msb->block_count);
1760	dbg("Each block consists of %d pages", msb->pages_in_block);
1761	dbg("Page size = %d bytes", msb->page_size);
1762	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1763	dbg("Read only: %d", msb->read_only);
1764
1765#if 0
1766	/* Now we can switch the interface */
1767	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1768		msb_switch_to_parallel(msb);
1769#endif
1770
1771	error = msb_cache_init(msb);
1772	if (error)
1773		return error;
1774
1775	error = msb_ftl_initialize(msb);
1776	if (error)
1777		return error;
1778
1779
1780	/* Read the bad block table */
1781	error = msb_read_bad_block_table(msb, 0);
1782
1783	if (error && error != -ENOMEM) {
1784		dbg("failed to read bad block table from primary boot block, trying from backup");
1785		error = msb_read_bad_block_table(msb, 1);
1786	}
1787
1788	if (error)
1789		return error;
1790
1791	/* *drum roll* Scan the media */
1792	error = msb_ftl_scan(msb);
1793	if (error) {
1794		pr_err("Scan of media failed");
1795		return error;
1796	}
1797
1798	return 0;
1799
1800}
1801
1802static int msb_do_write_request(struct msb_data *msb, int lba,
1803	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1804{
1805	int error = 0;
1806	off_t offset = 0;
1807	*sucessfuly_written = 0;
1808
1809	while (offset < len) {
1810		if (page == 0 && len - offset >= msb->block_size) {
1811
1812			if (msb->cache_block_lba == lba)
1813				msb_cache_discard(msb);
1814
1815			dbg_verbose("Writing whole lba %d", lba);
1816			error = msb_update_block(msb, lba, sg, offset);
1817			if (error)
1818				return error;
1819
1820			offset += msb->block_size;
1821			*sucessfuly_written += msb->block_size;
1822			lba++;
1823			continue;
1824		}
1825
1826		error = msb_cache_write(msb, lba, page, false, sg, offset);
1827		if (error)
1828			return error;
1829
1830		offset += msb->page_size;
1831		*sucessfuly_written += msb->page_size;
1832
1833		page++;
1834		if (page == msb->pages_in_block) {
1835			page = 0;
1836			lba++;
1837		}
1838	}
1839	return 0;
1840}
1841
1842static int msb_do_read_request(struct msb_data *msb, int lba,
1843		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1844{
1845	int error = 0;
1846	int offset = 0;
1847	*sucessfuly_read = 0;
1848
1849	while (offset < len) {
1850
1851		error = msb_cache_read(msb, lba, page, sg, offset);
1852		if (error)
1853			return error;
1854
1855		offset += msb->page_size;
1856		*sucessfuly_read += msb->page_size;
1857
1858		page++;
1859		if (page == msb->pages_in_block) {
1860			page = 0;
1861			lba++;
1862		}
1863	}
1864	return 0;
1865}
1866
1867static void msb_io_work(struct work_struct *work)
1868{
1869	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1870	int page, error, len;
1871	sector_t lba;
1872	struct scatterlist *sg = msb->prealloc_sg;
1873	struct request *req;
1874
1875	dbg_verbose("IO: work started");
1876
1877	while (1) {
1878		spin_lock_irq(&msb->q_lock);
1879
1880		if (msb->need_flush_cache) {
1881			msb->need_flush_cache = false;
1882			spin_unlock_irq(&msb->q_lock);
1883			msb_cache_flush(msb);
1884			continue;
1885		}
1886
1887		req = msb->req;
1888		if (!req) {
1889			dbg_verbose("IO: no more requests exiting");
1890			spin_unlock_irq(&msb->q_lock);
1891			return;
1892		}
1893
1894		spin_unlock_irq(&msb->q_lock);
1895
1896		/* process the request */
1897		dbg_verbose("IO: processing new request");
1898		blk_rq_map_sg(msb->queue, req, sg);
1899
1900		lba = blk_rq_pos(req);
1901
1902		sector_div(lba, msb->page_size / 512);
1903		page = sector_div(lba, msb->pages_in_block);
1904
1905		if (rq_data_dir(msb->req) == READ)
1906			error = msb_do_read_request(msb, lba, page, sg,
1907				blk_rq_bytes(req), &len);
1908		else
1909			error = msb_do_write_request(msb, lba, page, sg,
1910				blk_rq_bytes(req), &len);
1911
1912		if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1913			__blk_mq_end_request(req, BLK_STS_OK);
1914			spin_lock_irq(&msb->q_lock);
1915			msb->req = NULL;
1916			spin_unlock_irq(&msb->q_lock);
1917		}
1918
1919		if (error && msb->req) {
1920			blk_status_t ret = errno_to_blk_status(error);
1921
1922			dbg_verbose("IO: ending one sector of the request with error");
1923			blk_mq_end_request(req, ret);
1924			spin_lock_irq(&msb->q_lock);
1925			msb->req = NULL;
1926			spin_unlock_irq(&msb->q_lock);
1927		}
1928
1929		if (msb->req)
1930			dbg_verbose("IO: request still pending");
1931	}
1932}
1933
1934static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1935static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1936
1937static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1938{
1939	struct gendisk *disk = bdev->bd_disk;
1940	struct msb_data *msb = disk->private_data;
1941
1942	dbg_verbose("block device open");
1943
1944	mutex_lock(&msb_disk_lock);
1945
1946	if (msb && msb->card)
1947		msb->usage_count++;
1948
1949	mutex_unlock(&msb_disk_lock);
1950	return 0;
1951}
1952
1953static void msb_data_clear(struct msb_data *msb)
1954{
1955	kfree(msb->boot_page);
1956	kfree(msb->used_blocks_bitmap);
1957	kfree(msb->lba_to_pba_table);
1958	kfree(msb->cache);
1959	msb->card = NULL;
1960}
1961
1962static int msb_disk_release(struct gendisk *disk)
1963{
1964	struct msb_data *msb = disk->private_data;
1965
1966	dbg_verbose("block device release");
1967	mutex_lock(&msb_disk_lock);
1968
1969	if (msb) {
1970		if (msb->usage_count)
1971			msb->usage_count--;
1972
1973		if (!msb->usage_count) {
1974			disk->private_data = NULL;
1975			idr_remove(&msb_disk_idr, msb->disk_id);
1976			put_disk(disk);
1977			kfree(msb);
1978		}
1979	}
1980	mutex_unlock(&msb_disk_lock);
1981	return 0;
1982}
1983
1984static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1985{
1986	msb_disk_release(disk);
1987}
1988
1989static int msb_bd_getgeo(struct block_device *bdev,
1990				 struct hd_geometry *geo)
1991{
1992	struct msb_data *msb = bdev->bd_disk->private_data;
1993	*geo = msb->geometry;
1994	return 0;
1995}
1996
1997static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1998				 const struct blk_mq_queue_data *bd)
1999{
2000	struct memstick_dev *card = hctx->queue->queuedata;
2001	struct msb_data *msb = memstick_get_drvdata(card);
2002	struct request *req = bd->rq;
2003
2004	dbg_verbose("Submit request");
2005
2006	spin_lock_irq(&msb->q_lock);
2007
2008	if (msb->card_dead) {
2009		dbg("Refusing requests on removed card");
2010
2011		WARN_ON(!msb->io_queue_stopped);
2012
2013		spin_unlock_irq(&msb->q_lock);
2014		blk_mq_start_request(req);
2015		return BLK_STS_IOERR;
2016	}
2017
2018	if (msb->req) {
2019		spin_unlock_irq(&msb->q_lock);
2020		return BLK_STS_DEV_RESOURCE;
2021	}
2022
2023	blk_mq_start_request(req);
2024	msb->req = req;
2025
2026	if (!msb->io_queue_stopped)
2027		queue_work(msb->io_queue, &msb->io_work);
2028
2029	spin_unlock_irq(&msb->q_lock);
2030	return BLK_STS_OK;
2031}
2032
2033static int msb_check_card(struct memstick_dev *card)
2034{
2035	struct msb_data *msb = memstick_get_drvdata(card);
2036	return (msb->card_dead == 0);
2037}
2038
2039static void msb_stop(struct memstick_dev *card)
2040{
2041	struct msb_data *msb = memstick_get_drvdata(card);
2042	unsigned long flags;
2043
2044	dbg("Stopping all msblock IO");
2045
2046	blk_mq_stop_hw_queues(msb->queue);
2047	spin_lock_irqsave(&msb->q_lock, flags);
2048	msb->io_queue_stopped = true;
2049	spin_unlock_irqrestore(&msb->q_lock, flags);
2050
2051	del_timer_sync(&msb->cache_flush_timer);
2052	flush_workqueue(msb->io_queue);
2053
2054	spin_lock_irqsave(&msb->q_lock, flags);
2055	if (msb->req) {
2056		blk_mq_requeue_request(msb->req, false);
2057		msb->req = NULL;
2058	}
2059	spin_unlock_irqrestore(&msb->q_lock, flags);
2060}
2061
2062static void msb_start(struct memstick_dev *card)
2063{
2064	struct msb_data *msb = memstick_get_drvdata(card);
2065	unsigned long flags;
2066
2067	dbg("Resuming IO from msblock");
2068
2069	msb_invalidate_reg_window(msb);
2070
2071	spin_lock_irqsave(&msb->q_lock, flags);
2072	if (!msb->io_queue_stopped || msb->card_dead) {
2073		spin_unlock_irqrestore(&msb->q_lock, flags);
2074		return;
2075	}
2076	spin_unlock_irqrestore(&msb->q_lock, flags);
2077
2078	/* Kick cache flush anyway, its harmless */
2079	msb->need_flush_cache = true;
2080	msb->io_queue_stopped = false;
2081
2082	blk_mq_start_hw_queues(msb->queue);
2083
2084	queue_work(msb->io_queue, &msb->io_work);
2085
2086}
2087
2088static const struct block_device_operations msb_bdops = {
2089	.open    = msb_bd_open,
2090	.release = msb_bd_release,
2091	.getgeo  = msb_bd_getgeo,
2092	.owner   = THIS_MODULE
2093};
2094
2095static const struct blk_mq_ops msb_mq_ops = {
2096	.queue_rq	= msb_queue_rq,
2097};
2098
2099/* Registers the block device */
2100static int msb_init_disk(struct memstick_dev *card)
2101{
2102	struct msb_data *msb = memstick_get_drvdata(card);
2103	int rc;
2104	unsigned long capacity;
2105
2106	mutex_lock(&msb_disk_lock);
2107	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2108	mutex_unlock(&msb_disk_lock);
2109
2110	if (msb->disk_id  < 0)
2111		return msb->disk_id;
2112
2113	msb->disk = alloc_disk(0);
2114	if (!msb->disk) {
2115		rc = -ENOMEM;
2116		goto out_release_id;
2117	}
2118
2119	msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
2120						BLK_MQ_F_SHOULD_MERGE);
2121	if (IS_ERR(msb->queue)) {
2122		rc = PTR_ERR(msb->queue);
2123		msb->queue = NULL;
2124		goto out_put_disk;
2125	}
2126
2127	msb->queue->queuedata = card;
2128
2129	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2130	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2131	blk_queue_max_segment_size(msb->queue,
2132				   MS_BLOCK_MAX_PAGES * msb->page_size);
2133	blk_queue_logical_block_size(msb->queue, msb->page_size);
2134
2135	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2136	msb->disk->fops = &msb_bdops;
2137	msb->disk->private_data = msb;
2138	msb->disk->queue = msb->queue;
2139	msb->disk->flags |= GENHD_FL_EXT_DEVT;
2140
2141	capacity = msb->pages_in_block * msb->logical_block_count;
2142	capacity *= (msb->page_size / 512);
2143	set_capacity(msb->disk, capacity);
2144	dbg("Set total disk size to %lu sectors", capacity);
2145
2146	msb->usage_count = 1;
2147	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2148	INIT_WORK(&msb->io_work, msb_io_work);
2149	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2150
2151	if (msb->read_only)
2152		set_disk_ro(msb->disk, 1);
2153
2154	msb_start(card);
2155	device_add_disk(&card->dev, msb->disk, NULL);
2156	dbg("Disk added");
2157	return 0;
2158
2159out_put_disk:
2160	put_disk(msb->disk);
2161out_release_id:
2162	mutex_lock(&msb_disk_lock);
2163	idr_remove(&msb_disk_idr, msb->disk_id);
2164	mutex_unlock(&msb_disk_lock);
2165	return rc;
2166}
2167
2168static int msb_probe(struct memstick_dev *card)
2169{
2170	struct msb_data *msb;
2171	int rc = 0;
2172
2173	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2174	if (!msb)
2175		return -ENOMEM;
2176	memstick_set_drvdata(card, msb);
2177	msb->card = card;
2178	spin_lock_init(&msb->q_lock);
2179
2180	rc = msb_init_card(card);
2181	if (rc)
2182		goto out_free;
2183
2184	rc = msb_init_disk(card);
2185	if (!rc) {
2186		card->check = msb_check_card;
2187		card->stop = msb_stop;
2188		card->start = msb_start;
2189		return 0;
2190	}
2191out_free:
2192	memstick_set_drvdata(card, NULL);
2193	msb_data_clear(msb);
2194	kfree(msb);
2195	return rc;
2196}
2197
2198static void msb_remove(struct memstick_dev *card)
2199{
2200	struct msb_data *msb = memstick_get_drvdata(card);
2201	unsigned long flags;
2202
2203	if (!msb->io_queue_stopped)
2204		msb_stop(card);
2205
2206	dbg("Removing the disk device");
2207
2208	/* Take care of unhandled + new requests from now on */
2209	spin_lock_irqsave(&msb->q_lock, flags);
2210	msb->card_dead = true;
2211	spin_unlock_irqrestore(&msb->q_lock, flags);
2212	blk_mq_start_hw_queues(msb->queue);
2213
2214	/* Remove the disk */
2215	del_gendisk(msb->disk);
2216	blk_cleanup_queue(msb->queue);
2217	blk_mq_free_tag_set(&msb->tag_set);
2218	msb->queue = NULL;
2219
2220	mutex_lock(&msb_disk_lock);
2221	msb_data_clear(msb);
2222	mutex_unlock(&msb_disk_lock);
2223
2224	msb_disk_release(msb->disk);
2225	memstick_set_drvdata(card, NULL);
2226}
2227
2228#ifdef CONFIG_PM
2229
2230static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2231{
2232	msb_stop(card);
2233	return 0;
2234}
2235
2236static int msb_resume(struct memstick_dev *card)
2237{
2238	struct msb_data *msb = memstick_get_drvdata(card);
2239	struct msb_data *new_msb = NULL;
2240	bool card_dead = true;
2241
2242#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2243	msb->card_dead = true;
2244	return 0;
2245#endif
2246	mutex_lock(&card->host->lock);
2247
2248	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2249	if (!new_msb)
2250		goto out;
2251
2252	new_msb->card = card;
2253	memstick_set_drvdata(card, new_msb);
2254	spin_lock_init(&new_msb->q_lock);
2255	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2256
2257	if (msb_init_card(card))
2258		goto out;
2259
2260	if (msb->block_size != new_msb->block_size)
2261		goto out;
2262
2263	if (memcmp(msb->boot_page, new_msb->boot_page,
2264					sizeof(struct ms_boot_page)))
2265		goto out;
2266
2267	if (msb->logical_block_count != new_msb->logical_block_count ||
2268		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2269						msb->logical_block_count))
2270		goto out;
2271
2272	if (msb->block_count != new_msb->block_count ||
2273		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2274							msb->block_count / 8))
2275		goto out;
2276
2277	card_dead = false;
2278out:
2279	if (card_dead)
2280		dbg("Card was removed/replaced during suspend");
2281
2282	msb->card_dead = card_dead;
2283	memstick_set_drvdata(card, msb);
2284
2285	if (new_msb) {
2286		msb_data_clear(new_msb);
2287		kfree(new_msb);
2288	}
2289
2290	msb_start(card);
2291	mutex_unlock(&card->host->lock);
2292	return 0;
2293}
2294#else
2295
2296#define msb_suspend NULL
2297#define msb_resume NULL
2298
2299#endif /* CONFIG_PM */
2300
2301static struct memstick_device_id msb_id_tbl[] = {
2302	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2303	 MEMSTICK_CLASS_FLASH},
2304
2305	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2306	 MEMSTICK_CLASS_ROM},
2307
2308	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2309	 MEMSTICK_CLASS_RO},
2310
2311	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2312	 MEMSTICK_CLASS_WP},
2313
2314	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2315	 MEMSTICK_CLASS_DUO},
2316	{}
2317};
2318MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2319
2320
2321static struct memstick_driver msb_driver = {
2322	.driver = {
2323		.name  = DRIVER_NAME,
2324		.owner = THIS_MODULE
2325	},
2326	.id_table = msb_id_tbl,
2327	.probe    = msb_probe,
2328	.remove   = msb_remove,
2329	.suspend  = msb_suspend,
2330	.resume   = msb_resume
2331};
2332
2333static int __init msb_init(void)
2334{
2335	int rc = memstick_register_driver(&msb_driver);
2336	if (rc)
2337		pr_err("failed to register memstick driver (error %d)\n", rc);
2338
2339	return rc;
2340}
2341
2342static void __exit msb_exit(void)
2343{
2344	memstick_unregister_driver(&msb_driver);
2345	idr_destroy(&msb_disk_idr);
2346}
2347
2348module_init(msb_init);
2349module_exit(msb_exit);
2350
2351module_param(cache_flush_timeout, int, S_IRUGO);
2352MODULE_PARM_DESC(cache_flush_timeout,
2353				"Cache flush timeout in msec (1000 default)");
2354module_param(debug, int, S_IRUGO | S_IWUSR);
2355MODULE_PARM_DESC(debug, "Debug level (0-2)");
2356
2357module_param(verify_writes, bool, S_IRUGO);
2358MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2359
2360MODULE_LICENSE("GPL");
2361MODULE_AUTHOR("Maxim Levitsky");
2362MODULE_DESCRIPTION("Sony MemoryStick block device driver");