Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  ms_block.c - Sony MemoryStick (legacy) storage support
   4
   5 *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
   6 *
   7 * Minor portions of the driver were copied from mspro_block.c which is
   8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
   9 */
  10#define DRIVER_NAME "ms_block"
  11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  12
  13#include <linux/module.h>
  14#include <linux/blk-mq.h>
  15#include <linux/memstick.h>
  16#include <linux/idr.h>
  17#include <linux/hdreg.h>
  18#include <linux/delay.h>
  19#include <linux/slab.h>
  20#include <linux/random.h>
  21#include <linux/bitmap.h>
  22#include <linux/scatterlist.h>
  23#include <linux/jiffies.h>
  24#include <linux/workqueue.h>
  25#include <linux/mutex.h>
  26#include "ms_block.h"
  27
  28static int debug;
  29static int cache_flush_timeout = 1000;
  30static bool verify_writes;
  31
  32/*
  33 * Copies section of 'sg_from' starting from offset 'offset' and with length
  34 * 'len' To another scatterlist of to_nents enties
  35 */
  36static size_t msb_sg_copy(struct scatterlist *sg_from,
  37	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  38{
  39	size_t copied = 0;
  40
  41	while (offset > 0) {
  42		if (offset >= sg_from->length) {
  43			if (sg_is_last(sg_from))
  44				return 0;
  45
  46			offset -= sg_from->length;
  47			sg_from = sg_next(sg_from);
  48			continue;
  49		}
  50
  51		copied = min(len, sg_from->length - offset);
  52		sg_set_page(sg_to, sg_page(sg_from),
  53			copied, sg_from->offset + offset);
  54
  55		len -= copied;
  56		offset = 0;
  57
  58		if (sg_is_last(sg_from) || !len)
  59			goto out;
  60
  61		sg_to = sg_next(sg_to);
  62		to_nents--;
  63		sg_from = sg_next(sg_from);
  64	}
  65
  66	while (len > sg_from->length && to_nents--) {
  67		len -= sg_from->length;
  68		copied += sg_from->length;
  69
  70		sg_set_page(sg_to, sg_page(sg_from),
  71				sg_from->length, sg_from->offset);
  72
  73		if (sg_is_last(sg_from) || !len)
  74			goto out;
  75
  76		sg_from = sg_next(sg_from);
  77		sg_to = sg_next(sg_to);
  78	}
  79
  80	if (len && to_nents) {
  81		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  82		copied += len;
  83	}
  84out:
  85	sg_mark_end(sg_to);
  86	return copied;
  87}
  88
  89/*
  90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  91 * to linear buffer of length 'len' at address 'buffer'
  92 * Returns 0 if equal and  -1 otherwice
  93 */
  94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  95					size_t offset, u8 *buffer, size_t len)
  96{
  97	int retval = 0, cmplen;
  98	struct sg_mapping_iter miter;
  99
 100	sg_miter_start(&miter, sg, sg_nents(sg),
 101					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 102
 103	while (sg_miter_next(&miter) && len > 0) {
 104		if (offset >= miter.length) {
 105			offset -= miter.length;
 106			continue;
 107		}
 108
 109		cmplen = min(miter.length - offset, len);
 110		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
 111		if (retval)
 112			break;
 113
 114		buffer += cmplen;
 115		len -= cmplen;
 116		offset = 0;
 117	}
 118
 119	if (!retval && len)
 120		retval = -1;
 121
 122	sg_miter_stop(&miter);
 123	return retval;
 124}
 125
 126
 127/* Get zone at which block with logical address 'lba' lives
 128 * Flash is broken into zones.
 129 * Each zone consists of 512 eraseblocks, out of which in first
 130 * zone 494 are used and 496 are for all following zones.
 131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
 132 */
 133static int msb_get_zone_from_lba(int lba)
 134{
 135	if (lba < 494)
 136		return 0;
 137	return ((lba - 494) / 496) + 1;
 138}
 139
 140/* Get zone of physical block. Trivial */
 141static int msb_get_zone_from_pba(int pba)
 142{
 143	return pba / MS_BLOCKS_IN_ZONE;
 144}
 145
 146/* Debug test to validate free block counts */
 147static int msb_validate_used_block_bitmap(struct msb_data *msb)
 148{
 149	int total_free_blocks = 0;
 150	int i;
 151
 152	if (!debug)
 153		return 0;
 154
 155	for (i = 0; i < msb->zone_count; i++)
 156		total_free_blocks += msb->free_block_count[i];
 157
 158	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
 159					msb->block_count) == total_free_blocks)
 160		return 0;
 161
 162	pr_err("BUG: free block counts don't match the bitmap");
 163	msb->read_only = true;
 164	return -EINVAL;
 165}
 166
 167/* Mark physical block as used */
 168static void msb_mark_block_used(struct msb_data *msb, int pba)
 169{
 170	int zone = msb_get_zone_from_pba(pba);
 171
 172	if (test_bit(pba, msb->used_blocks_bitmap)) {
 173		pr_err(
 174		"BUG: attempt to mark already used pba %d as used", pba);
 175		msb->read_only = true;
 176		return;
 177	}
 178
 179	if (msb_validate_used_block_bitmap(msb))
 180		return;
 181
 182	/* No races because all IO is single threaded */
 183	__set_bit(pba, msb->used_blocks_bitmap);
 184	msb->free_block_count[zone]--;
 185}
 186
 187/* Mark physical block as free */
 188static void msb_mark_block_unused(struct msb_data *msb, int pba)
 189{
 190	int zone = msb_get_zone_from_pba(pba);
 191
 192	if (!test_bit(pba, msb->used_blocks_bitmap)) {
 193		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
 194		msb->read_only = true;
 195		return;
 196	}
 197
 198	if (msb_validate_used_block_bitmap(msb))
 199		return;
 200
 201	/* No races because all IO is single threaded */
 202	__clear_bit(pba, msb->used_blocks_bitmap);
 203	msb->free_block_count[zone]++;
 204}
 205
 206/* Invalidate current register window */
 207static void msb_invalidate_reg_window(struct msb_data *msb)
 208{
 209	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
 210	msb->reg_addr.w_length = sizeof(struct ms_id_register);
 211	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
 212	msb->reg_addr.r_length = sizeof(struct ms_id_register);
 213	msb->addr_valid = false;
 214}
 215
 216/* Start a state machine */
 217static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
 218		(struct memstick_dev *card, struct memstick_request **req))
 219{
 220	struct memstick_dev *card = msb->card;
 221
 222	WARN_ON(msb->state != -1);
 223	msb->int_polling = false;
 224	msb->state = 0;
 225	msb->exit_error = 0;
 226
 227	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
 228
 229	card->next_request = state_func;
 230	memstick_new_req(card->host);
 231	wait_for_completion(&card->mrq_complete);
 232
 233	WARN_ON(msb->state != -1);
 234	return msb->exit_error;
 235}
 236
 237/* State machines call that to exit */
 238static int msb_exit_state_machine(struct msb_data *msb, int error)
 239{
 240	WARN_ON(msb->state == -1);
 241
 242	msb->state = -1;
 243	msb->exit_error = error;
 244	msb->card->next_request = h_msb_default_bad;
 245
 246	/* Invalidate reg window on errors */
 247	if (error)
 248		msb_invalidate_reg_window(msb);
 249
 250	complete(&msb->card->mrq_complete);
 251	return -ENXIO;
 252}
 253
 254/* read INT register */
 255static int msb_read_int_reg(struct msb_data *msb, long timeout)
 256{
 257	struct memstick_request *mrq = &msb->card->current_mrq;
 258
 259	WARN_ON(msb->state == -1);
 260
 261	if (!msb->int_polling) {
 262		msb->int_timeout = jiffies +
 263			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
 264		msb->int_polling = true;
 265	} else if (time_after(jiffies, msb->int_timeout)) {
 266		mrq->data[0] = MEMSTICK_INT_CMDNAK;
 267		return 0;
 268	}
 269
 270	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
 271				mrq->need_card_int && !mrq->error) {
 272		mrq->data[0] = mrq->int_reg;
 273		mrq->need_card_int = false;
 274		return 0;
 275	} else {
 276		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 277		return 1;
 278	}
 279}
 280
 281/* Read a register */
 282static int msb_read_regs(struct msb_data *msb, int offset, int len)
 283{
 284	struct memstick_request *req = &msb->card->current_mrq;
 285
 286	if (msb->reg_addr.r_offset != offset ||
 287	    msb->reg_addr.r_length != len || !msb->addr_valid) {
 288
 289		msb->reg_addr.r_offset = offset;
 290		msb->reg_addr.r_length = len;
 291		msb->addr_valid = true;
 292
 293		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 294			&msb->reg_addr, sizeof(msb->reg_addr));
 295		return 0;
 296	}
 297
 298	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
 299	return 1;
 300}
 301
 302/* Write a card register */
 303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
 304{
 305	struct memstick_request *req = &msb->card->current_mrq;
 306
 307	if (msb->reg_addr.w_offset != offset ||
 308		msb->reg_addr.w_length != len  || !msb->addr_valid) {
 309
 310		msb->reg_addr.w_offset = offset;
 311		msb->reg_addr.w_length = len;
 312		msb->addr_valid = true;
 313
 314		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 315			&msb->reg_addr, sizeof(msb->reg_addr));
 316		return 0;
 317	}
 318
 319	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
 320	return 1;
 321}
 322
 323/* Handler for absence of IO */
 324static int h_msb_default_bad(struct memstick_dev *card,
 325						struct memstick_request **mrq)
 326{
 327	return -ENXIO;
 328}
 329
 330/*
 331 * This function is a handler for reads of one page from device.
 332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
 333 * Can also be used to read extra data only. Set params accordintly.
 334 */
 335static int h_msb_read_page(struct memstick_dev *card,
 336					struct memstick_request **out_mrq)
 337{
 338	struct msb_data *msb = memstick_get_drvdata(card);
 339	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 340	struct scatterlist sg[2];
 341	u8 command, intreg;
 342
 343	if (mrq->error) {
 344		dbg("read_page, unknown error");
 345		return msb_exit_state_machine(msb, mrq->error);
 346	}
 347again:
 348	switch (msb->state) {
 349	case MSB_RP_SEND_BLOCK_ADDRESS:
 350		/* msb_write_regs sometimes "fails" because it needs to update
 351		 * the reg window, and thus it returns request for that.
 352		 * Then we stay in this state and retry
 353		 */
 354		if (!msb_write_regs(msb,
 355			offsetof(struct ms_register, param),
 356			sizeof(struct ms_param_register),
 357			(unsigned char *)&msb->regs.param))
 358			return 0;
 359
 360		msb->state = MSB_RP_SEND_READ_COMMAND;
 361		return 0;
 362
 363	case MSB_RP_SEND_READ_COMMAND:
 364		command = MS_CMD_BLOCK_READ;
 365		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 366		msb->state = MSB_RP_SEND_INT_REQ;
 367		return 0;
 368
 369	case MSB_RP_SEND_INT_REQ:
 370		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
 371		/* If dont actually need to send the int read request (only in
 372		 * serial mode), then just fall through
 373		 */
 374		if (msb_read_int_reg(msb, -1))
 375			return 0;
 376		fallthrough;
 377
 378	case MSB_RP_RECEIVE_INT_REQ_RESULT:
 379		intreg = mrq->data[0];
 380		msb->regs.status.interrupt = intreg;
 381
 382		if (intreg & MEMSTICK_INT_CMDNAK)
 383			return msb_exit_state_machine(msb, -EIO);
 384
 385		if (!(intreg & MEMSTICK_INT_CED)) {
 386			msb->state = MSB_RP_SEND_INT_REQ;
 387			goto again;
 388		}
 389
 390		msb->int_polling = false;
 391		msb->state = (intreg & MEMSTICK_INT_ERR) ?
 392			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
 393		goto again;
 394
 395	case MSB_RP_SEND_READ_STATUS_REG:
 396		 /* read the status register to understand source of the INT_ERR */
 397		if (!msb_read_regs(msb,
 398			offsetof(struct ms_register, status),
 399			sizeof(struct ms_status_register)))
 400			return 0;
 401
 402		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 403		return 0;
 404
 405	case MSB_RP_RECEIVE_STATUS_REG:
 406		msb->regs.status = *(struct ms_status_register *)mrq->data;
 407		msb->state = MSB_RP_SEND_OOB_READ;
 408		fallthrough;
 409
 410	case MSB_RP_SEND_OOB_READ:
 411		if (!msb_read_regs(msb,
 412			offsetof(struct ms_register, extra_data),
 413			sizeof(struct ms_extra_data_register)))
 414			return 0;
 415
 416		msb->state = MSB_RP_RECEIVE_OOB_READ;
 417		return 0;
 418
 419	case MSB_RP_RECEIVE_OOB_READ:
 420		msb->regs.extra_data =
 421			*(struct ms_extra_data_register *) mrq->data;
 422		msb->state = MSB_RP_SEND_READ_DATA;
 423		fallthrough;
 424
 425	case MSB_RP_SEND_READ_DATA:
 426		/* Skip that state if we only read the oob */
 427		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
 428			msb->state = MSB_RP_RECEIVE_READ_DATA;
 429			goto again;
 430		}
 431
 432		sg_init_table(sg, ARRAY_SIZE(sg));
 433		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 434			msb->current_sg_offset,
 435			msb->page_size);
 436
 437		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
 438		msb->state = MSB_RP_RECEIVE_READ_DATA;
 439		return 0;
 440
 441	case MSB_RP_RECEIVE_READ_DATA:
 442		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
 443			msb->current_sg_offset += msb->page_size;
 444			return msb_exit_state_machine(msb, 0);
 445		}
 446
 447		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
 448			dbg("read_page: uncorrectable error");
 449			return msb_exit_state_machine(msb, -EBADMSG);
 450		}
 451
 452		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
 453			dbg("read_page: correctable error");
 454			msb->current_sg_offset += msb->page_size;
 455			return msb_exit_state_machine(msb, -EUCLEAN);
 456		} else {
 457			dbg("read_page: INT error, but no status error bits");
 458			return msb_exit_state_machine(msb, -EIO);
 459		}
 460	}
 461
 462	BUG();
 463}
 464
 465/*
 466 * Handler of writes of exactly one block.
 467 * Takes address from msb->regs.param.
 468 * Writes same extra data to blocks, also taken
 469 * from msb->regs.extra
 470 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
 471 * device refuses to take the command or something else
 472 */
 473static int h_msb_write_block(struct memstick_dev *card,
 474					struct memstick_request **out_mrq)
 475{
 476	struct msb_data *msb = memstick_get_drvdata(card);
 477	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 478	struct scatterlist sg[2];
 479	u8 intreg, command;
 480
 481	if (mrq->error)
 482		return msb_exit_state_machine(msb, mrq->error);
 483
 484again:
 485	switch (msb->state) {
 486
 487	/* HACK: Jmicon handling of TPCs between 8 and
 488	 *	sizeof(memstick_request.data) is broken due to hardware
 489	 *	bug in PIO mode that is used for these TPCs
 490	 *	Therefore split the write
 491	 */
 492
 493	case MSB_WB_SEND_WRITE_PARAMS:
 494		if (!msb_write_regs(msb,
 495			offsetof(struct ms_register, param),
 496			sizeof(struct ms_param_register),
 497			&msb->regs.param))
 498			return 0;
 499
 500		msb->state = MSB_WB_SEND_WRITE_OOB;
 501		return 0;
 502
 503	case MSB_WB_SEND_WRITE_OOB:
 504		if (!msb_write_regs(msb,
 505			offsetof(struct ms_register, extra_data),
 506			sizeof(struct ms_extra_data_register),
 507			&msb->regs.extra_data))
 508			return 0;
 509		msb->state = MSB_WB_SEND_WRITE_COMMAND;
 510		return 0;
 511
 512
 513	case MSB_WB_SEND_WRITE_COMMAND:
 514		command = MS_CMD_BLOCK_WRITE;
 515		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 516		msb->state = MSB_WB_SEND_INT_REQ;
 517		return 0;
 518
 519	case MSB_WB_SEND_INT_REQ:
 520		msb->state = MSB_WB_RECEIVE_INT_REQ;
 521		if (msb_read_int_reg(msb, -1))
 522			return 0;
 523		fallthrough;
 524
 525	case MSB_WB_RECEIVE_INT_REQ:
 526		intreg = mrq->data[0];
 527		msb->regs.status.interrupt = intreg;
 528
 529		/* errors mean out of here, and fast... */
 530		if (intreg & (MEMSTICK_INT_CMDNAK))
 531			return msb_exit_state_machine(msb, -EIO);
 532
 533		if (intreg & MEMSTICK_INT_ERR)
 534			return msb_exit_state_machine(msb, -EBADMSG);
 535
 536
 537		/* for last page we need to poll CED */
 538		if (msb->current_page == msb->pages_in_block) {
 539			if (intreg & MEMSTICK_INT_CED)
 540				return msb_exit_state_machine(msb, 0);
 541			msb->state = MSB_WB_SEND_INT_REQ;
 542			goto again;
 543
 544		}
 545
 546		/* for non-last page we need BREQ before writing next chunk */
 547		if (!(intreg & MEMSTICK_INT_BREQ)) {
 548			msb->state = MSB_WB_SEND_INT_REQ;
 549			goto again;
 550		}
 551
 552		msb->int_polling = false;
 553		msb->state = MSB_WB_SEND_WRITE_DATA;
 554		fallthrough;
 555
 556	case MSB_WB_SEND_WRITE_DATA:
 557		sg_init_table(sg, ARRAY_SIZE(sg));
 558
 559		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 560			msb->current_sg_offset,
 561			msb->page_size) < msb->page_size)
 562			return msb_exit_state_machine(msb, -EIO);
 563
 564		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
 565		mrq->need_card_int = 1;
 566		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
 567		return 0;
 568
 569	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
 570		msb->current_page++;
 571		msb->current_sg_offset += msb->page_size;
 572		msb->state = MSB_WB_SEND_INT_REQ;
 573		goto again;
 574	default:
 575		BUG();
 576	}
 577
 578	return 0;
 579}
 580
 581/*
 582 * This function is used to send simple IO requests to device that consist
 583 * of register write + command
 584 */
 585static int h_msb_send_command(struct memstick_dev *card,
 586					struct memstick_request **out_mrq)
 587{
 588	struct msb_data *msb = memstick_get_drvdata(card);
 589	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 590	u8 intreg;
 591
 592	if (mrq->error) {
 593		dbg("send_command: unknown error");
 594		return msb_exit_state_machine(msb, mrq->error);
 595	}
 596again:
 597	switch (msb->state) {
 598
 599	/* HACK: see h_msb_write_block */
 600	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
 601		if (!msb_write_regs(msb,
 602			offsetof(struct ms_register, param),
 603			sizeof(struct ms_param_register),
 604			&msb->regs.param))
 605			return 0;
 606		msb->state = MSB_SC_SEND_WRITE_OOB;
 607		return 0;
 608
 609	case MSB_SC_SEND_WRITE_OOB:
 610		if (!msb->command_need_oob) {
 611			msb->state = MSB_SC_SEND_COMMAND;
 612			goto again;
 613		}
 614
 615		if (!msb_write_regs(msb,
 616			offsetof(struct ms_register, extra_data),
 617			sizeof(struct ms_extra_data_register),
 618			&msb->regs.extra_data))
 619			return 0;
 620
 621		msb->state = MSB_SC_SEND_COMMAND;
 622		return 0;
 623
 624	case MSB_SC_SEND_COMMAND:
 625		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
 626		msb->state = MSB_SC_SEND_INT_REQ;
 627		return 0;
 628
 629	case MSB_SC_SEND_INT_REQ:
 630		msb->state = MSB_SC_RECEIVE_INT_REQ;
 631		if (msb_read_int_reg(msb, -1))
 632			return 0;
 633		fallthrough;
 634
 635	case MSB_SC_RECEIVE_INT_REQ:
 636		intreg = mrq->data[0];
 637
 638		if (intreg & MEMSTICK_INT_CMDNAK)
 639			return msb_exit_state_machine(msb, -EIO);
 640		if (intreg & MEMSTICK_INT_ERR)
 641			return msb_exit_state_machine(msb, -EBADMSG);
 642
 643		if (!(intreg & MEMSTICK_INT_CED)) {
 644			msb->state = MSB_SC_SEND_INT_REQ;
 645			goto again;
 646		}
 647
 648		return msb_exit_state_machine(msb, 0);
 649	}
 650
 651	BUG();
 652}
 653
 654/* Small handler for card reset */
 655static int h_msb_reset(struct memstick_dev *card,
 656					struct memstick_request **out_mrq)
 657{
 658	u8 command = MS_CMD_RESET;
 659	struct msb_data *msb = memstick_get_drvdata(card);
 660	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 661
 662	if (mrq->error)
 663		return msb_exit_state_machine(msb, mrq->error);
 664
 665	switch (msb->state) {
 666	case MSB_RS_SEND:
 667		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 668		mrq->need_card_int = 0;
 669		msb->state = MSB_RS_CONFIRM;
 670		return 0;
 671	case MSB_RS_CONFIRM:
 672		return msb_exit_state_machine(msb, 0);
 673	}
 674	BUG();
 675}
 676
 677/* This handler is used to do serial->parallel switch */
 678static int h_msb_parallel_switch(struct memstick_dev *card,
 679					struct memstick_request **out_mrq)
 680{
 681	struct msb_data *msb = memstick_get_drvdata(card);
 682	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 683	struct memstick_host *host = card->host;
 684
 685	if (mrq->error) {
 686		dbg("parallel_switch: error");
 687		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 688		return msb_exit_state_machine(msb, mrq->error);
 689	}
 690
 691	switch (msb->state) {
 692	case MSB_PS_SEND_SWITCH_COMMAND:
 693		/* Set the parallel interface on memstick side */
 694		msb->regs.param.system |= MEMSTICK_SYS_PAM;
 695
 696		if (!msb_write_regs(msb,
 697			offsetof(struct ms_register, param),
 698			1,
 699			(unsigned char *)&msb->regs.param))
 700			return 0;
 701
 702		msb->state = MSB_PS_SWICH_HOST;
 703		return 0;
 704
 705	case MSB_PS_SWICH_HOST:
 706		 /* Set parallel interface on our side + send a dummy request
 707		  * to see if card responds
 708		  */
 709		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
 710		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 711		msb->state = MSB_PS_CONFIRM;
 712		return 0;
 713
 714	case MSB_PS_CONFIRM:
 715		return msb_exit_state_machine(msb, 0);
 716	}
 717
 718	BUG();
 719}
 720
 721static int msb_switch_to_parallel(struct msb_data *msb);
 722
 723/* Reset the card, to guard against hw errors beeing treated as bad blocks */
 724static int msb_reset(struct msb_data *msb, bool full)
 725{
 726
 727	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
 728	struct memstick_dev *card = msb->card;
 729	struct memstick_host *host = card->host;
 730	int error;
 731
 732	/* Reset the card */
 733	msb->regs.param.system = MEMSTICK_SYS_BAMD;
 734
 735	if (full) {
 736		error =  host->set_param(host,
 737					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 738		if (error)
 739			goto out_error;
 740
 741		msb_invalidate_reg_window(msb);
 742
 743		error = host->set_param(host,
 744					MEMSTICK_POWER, MEMSTICK_POWER_ON);
 745		if (error)
 746			goto out_error;
 747
 748		error = host->set_param(host,
 749					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
 750		if (error) {
 751out_error:
 752			dbg("Failed to reset the host controller");
 753			msb->read_only = true;
 754			return -EFAULT;
 755		}
 756	}
 757
 758	error = msb_run_state_machine(msb, h_msb_reset);
 759	if (error) {
 760		dbg("Failed to reset the card");
 761		msb->read_only = true;
 762		return -ENODEV;
 763	}
 764
 765	/* Set parallel mode */
 766	if (was_parallel)
 767		msb_switch_to_parallel(msb);
 768	return 0;
 769}
 770
 771/* Attempts to switch interface to parallel mode */
 772static int msb_switch_to_parallel(struct msb_data *msb)
 773{
 774	int error;
 775
 776	error = msb_run_state_machine(msb, h_msb_parallel_switch);
 777	if (error) {
 778		pr_err("Switch to parallel failed");
 779		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 780		msb_reset(msb, true);
 781		return -EFAULT;
 782	}
 783
 784	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
 785	return 0;
 786}
 787
 788/* Changes overwrite flag on a page */
 789static int msb_set_overwrite_flag(struct msb_data *msb,
 790						u16 pba, u8 page, u8 flag)
 791{
 792	if (msb->read_only)
 793		return -EROFS;
 794
 795	msb->regs.param.block_address = cpu_to_be16(pba);
 796	msb->regs.param.page_address = page;
 797	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
 798	msb->regs.extra_data.overwrite_flag = flag;
 799	msb->command_value = MS_CMD_BLOCK_WRITE;
 800	msb->command_need_oob = true;
 801
 802	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
 803							flag, pba, page);
 804	return msb_run_state_machine(msb, h_msb_send_command);
 805}
 806
 807static int msb_mark_bad(struct msb_data *msb, int pba)
 808{
 809	pr_notice("marking pba %d as bad", pba);
 810	msb_reset(msb, true);
 811	return msb_set_overwrite_flag(
 812			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
 813}
 814
 815static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
 816{
 817	dbg("marking page %d of pba %d as bad", page, pba);
 818	msb_reset(msb, true);
 819	return msb_set_overwrite_flag(msb,
 820		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
 821}
 822
 823/* Erases one physical block */
 824static int msb_erase_block(struct msb_data *msb, u16 pba)
 825{
 826	int error, try;
 827
 828	if (msb->read_only)
 829		return -EROFS;
 830
 831	dbg_verbose("erasing pba %d", pba);
 832
 833	for (try = 1; try < 3; try++) {
 834		msb->regs.param.block_address = cpu_to_be16(pba);
 835		msb->regs.param.page_address = 0;
 836		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
 837		msb->command_value = MS_CMD_BLOCK_ERASE;
 838		msb->command_need_oob = false;
 839
 840
 841		error = msb_run_state_machine(msb, h_msb_send_command);
 842		if (!error || msb_reset(msb, true))
 843			break;
 844	}
 845
 846	if (error) {
 847		pr_err("erase failed, marking pba %d as bad", pba);
 848		msb_mark_bad(msb, pba);
 849	}
 850
 851	dbg_verbose("erase success, marking pba %d as unused", pba);
 852	msb_mark_block_unused(msb, pba);
 853	__set_bit(pba, msb->erased_blocks_bitmap);
 854	return error;
 855}
 856
 857/* Reads one page from device */
 858static int msb_read_page(struct msb_data *msb,
 859	u16 pba, u8 page, struct ms_extra_data_register *extra,
 860					struct scatterlist *sg,  int offset)
 861{
 862	int try, error;
 863
 864	if (pba == MS_BLOCK_INVALID) {
 865		unsigned long flags;
 866		struct sg_mapping_iter miter;
 867		size_t len = msb->page_size;
 868
 869		dbg_verbose("read unmapped sector. returning 0xFF");
 870
 871		local_irq_save(flags);
 872		sg_miter_start(&miter, sg, sg_nents(sg),
 873				SG_MITER_ATOMIC | SG_MITER_TO_SG);
 874
 875		while (sg_miter_next(&miter) && len > 0) {
 876
 877			int chunklen;
 878
 879			if (offset && offset >= miter.length) {
 880				offset -= miter.length;
 881				continue;
 882			}
 883
 884			chunklen = min(miter.length - offset, len);
 885			memset(miter.addr + offset, 0xFF, chunklen);
 886			len -= chunklen;
 887			offset = 0;
 888		}
 889
 890		sg_miter_stop(&miter);
 891		local_irq_restore(flags);
 892
 893		if (offset)
 894			return -EFAULT;
 895
 896		if (extra)
 897			memset(extra, 0xFF, sizeof(*extra));
 898		return 0;
 899	}
 900
 901	if (pba >= msb->block_count) {
 902		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
 903		return -EINVAL;
 904	}
 905
 906	for (try = 1; try < 3; try++) {
 907		msb->regs.param.block_address = cpu_to_be16(pba);
 908		msb->regs.param.page_address = page;
 909		msb->regs.param.cp = MEMSTICK_CP_PAGE;
 910
 911		msb->current_sg = sg;
 912		msb->current_sg_offset = offset;
 913		error = msb_run_state_machine(msb, h_msb_read_page);
 914
 915
 916		if (error == -EUCLEAN) {
 917			pr_notice("correctable error on pba %d, page %d",
 918				pba, page);
 919			error = 0;
 920		}
 921
 922		if (!error && extra)
 923			*extra = msb->regs.extra_data;
 924
 925		if (!error || msb_reset(msb, true))
 926			break;
 927
 928	}
 929
 930	/* Mark bad pages */
 931	if (error == -EBADMSG) {
 932		pr_err("uncorrectable error on read of pba %d, page %d",
 933			pba, page);
 934
 935		if (msb->regs.extra_data.overwrite_flag &
 936					MEMSTICK_OVERWRITE_PGST0)
 937			msb_mark_page_bad(msb, pba, page);
 938		return -EBADMSG;
 939	}
 940
 941	if (error)
 942		pr_err("read of pba %d, page %d failed with error %d",
 943			pba, page, error);
 944	return error;
 945}
 946
 947/* Reads oob of page only */
 948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
 949	struct ms_extra_data_register *extra)
 950{
 951	int error;
 952
 953	BUG_ON(!extra);
 954	msb->regs.param.block_address = cpu_to_be16(pba);
 955	msb->regs.param.page_address = page;
 956	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
 957
 958	if (pba > msb->block_count) {
 959		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
 960		return -EINVAL;
 961	}
 962
 963	error = msb_run_state_machine(msb, h_msb_read_page);
 964	*extra = msb->regs.extra_data;
 965
 966	if (error == -EUCLEAN) {
 967		pr_notice("correctable error on pba %d, page %d",
 968			pba, page);
 969		return 0;
 970	}
 971
 972	return error;
 973}
 974
 975/* Reads a block and compares it with data contained in scatterlist orig_sg */
 976static int msb_verify_block(struct msb_data *msb, u16 pba,
 977				struct scatterlist *orig_sg,  int offset)
 978{
 979	struct scatterlist sg;
 980	int page = 0, error;
 981
 982	sg_init_one(&sg, msb->block_buffer, msb->block_size);
 983
 984	while (page < msb->pages_in_block) {
 985
 986		error = msb_read_page(msb, pba, page,
 987				NULL, &sg, page * msb->page_size);
 988		if (error)
 989			return error;
 990		page++;
 991	}
 992
 993	if (msb_sg_compare_to_buffer(orig_sg, offset,
 994				msb->block_buffer, msb->block_size))
 995		return -EIO;
 996	return 0;
 997}
 998
 999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001			u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003	int error, current_try = 1;
1004
1005	BUG_ON(sg->length < msb->page_size);
1006
1007	if (msb->read_only)
1008		return -EROFS;
1009
1010	if (pba == MS_BLOCK_INVALID) {
1011		pr_err(
1012			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1013		return -EINVAL;
1014	}
1015
1016	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1017		pr_err(
1018		"BUG: write: attempt to write beyond the end of device");
1019		return -EINVAL;
1020	}
1021
1022	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023		pr_err("BUG: write: lba zone mismatch");
1024		return -EINVAL;
1025	}
1026
1027	if (pba == msb->boot_block_locations[0] ||
1028		pba == msb->boot_block_locations[1]) {
1029		pr_err("BUG: write: attempt to write to boot blocks!");
1030		return -EINVAL;
1031	}
1032
1033	while (1) {
1034
1035		if (msb->read_only)
1036			return -EROFS;
1037
1038		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039		msb->regs.param.page_address = 0;
1040		msb->regs.param.block_address = cpu_to_be16(pba);
1041
1042		msb->regs.extra_data.management_flag = 0xFF;
1043		msb->regs.extra_data.overwrite_flag = 0xF8;
1044		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045
1046		msb->current_sg = sg;
1047		msb->current_sg_offset = offset;
1048		msb->current_page = 0;
1049
1050		error = msb_run_state_machine(msb, h_msb_write_block);
1051
1052		/* Sector we just wrote to is assumed erased since its pba
1053		 * was erased. If it wasn't erased, write will succeed
1054		 * and will just clear the bits that were set in the block
1055		 * thus test that what we have written,
1056		 * matches what we expect.
1057		 * We do trust the blocks that we erased
1058		 */
1059		if (!error && (verify_writes ||
1060				!test_bit(pba, msb->erased_blocks_bitmap)))
1061			error = msb_verify_block(msb, pba, sg, offset);
1062
1063		if (!error)
1064			break;
1065
1066		if (current_try > 1 || msb_reset(msb, true))
1067			break;
1068
1069		pr_err("write failed, trying to erase the pba %d", pba);
1070		error = msb_erase_block(msb, pba);
1071		if (error)
1072			break;
1073
1074		current_try++;
1075	}
1076	return error;
1077}
1078
1079/* Finds a free block for write replacement */
1080static u16 msb_get_free_block(struct msb_data *msb, int zone)
1081{
1082	u16 pos;
1083	int pba = zone * MS_BLOCKS_IN_ZONE;
1084	int i;
1085
1086	get_random_bytes(&pos, sizeof(pos));
1087
1088	if (!msb->free_block_count[zone]) {
1089		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090		msb->read_only = true;
1091		return MS_BLOCK_INVALID;
1092	}
1093
1094	pos %= msb->free_block_count[zone];
1095
1096	dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097		msb->free_block_count[zone], pos);
1098
1099	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100							msb->block_count, pba);
1101	for (i = 0; i < pos; ++i)
1102		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103						msb->block_count, pba + 1);
1104
1105	dbg_verbose("result of the free blocks scan: pba %d", pba);
1106
1107	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108		pr_err("BUG: can't get a free block");
1109		msb->read_only = true;
1110		return MS_BLOCK_INVALID;
1111	}
1112
1113	msb_mark_block_used(msb, pba);
1114	return pba;
1115}
1116
1117static int msb_update_block(struct msb_data *msb, u16 lba,
1118	struct scatterlist *sg, int offset)
1119{
1120	u16 pba, new_pba;
1121	int error, try;
1122
1123	pba = msb->lba_to_pba_table[lba];
1124	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1125
1126	if (pba != MS_BLOCK_INVALID) {
1127		dbg_verbose("setting the update flag on the block");
1128		msb_set_overwrite_flag(msb, pba, 0,
1129				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1130	}
1131
1132	for (try = 0; try < 3; try++) {
1133		new_pba = msb_get_free_block(msb,
1134			msb_get_zone_from_lba(lba));
1135
1136		if (new_pba == MS_BLOCK_INVALID) {
1137			error = -EIO;
1138			goto out;
1139		}
1140
1141		dbg_verbose("block update: writing updated block to the pba %d",
1142								new_pba);
1143		error = msb_write_block(msb, new_pba, lba, sg, offset);
1144		if (error == -EBADMSG) {
1145			msb_mark_bad(msb, new_pba);
1146			continue;
1147		}
1148
1149		if (error)
1150			goto out;
1151
1152		dbg_verbose("block update: erasing the old block");
1153		msb_erase_block(msb, pba);
1154		msb->lba_to_pba_table[lba] = new_pba;
1155		return 0;
1156	}
1157out:
1158	if (error) {
1159		pr_err("block update error after %d tries,  switching to r/o mode", try);
1160		msb->read_only = true;
1161	}
1162	return error;
1163}
1164
1165/* Converts endiannes in the boot block for easy use */
1166static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1167{
1168	p->header.block_id = be16_to_cpu(p->header.block_id);
1169	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170	p->entry.disabled_block.start_addr
1171		= be32_to_cpu(p->entry.disabled_block.start_addr);
1172	p->entry.disabled_block.data_size
1173		= be32_to_cpu(p->entry.disabled_block.data_size);
1174	p->entry.cis_idi.start_addr
1175		= be32_to_cpu(p->entry.cis_idi.start_addr);
1176	p->entry.cis_idi.data_size
1177		= be32_to_cpu(p->entry.cis_idi.data_size);
1178	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180	p->attr.number_of_effective_blocks
1181		= be16_to_cpu(p->attr.number_of_effective_blocks);
1182	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183	p->attr.memory_manufacturer_code
1184		= be16_to_cpu(p->attr.memory_manufacturer_code);
1185	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186	p->attr.implemented_capacity
1187		= be16_to_cpu(p->attr.implemented_capacity);
1188	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1190}
1191
1192static int msb_read_boot_blocks(struct msb_data *msb)
1193{
1194	int pba = 0;
1195	struct scatterlist sg;
1196	struct ms_extra_data_register extra;
1197	struct ms_boot_page *page;
1198
1199	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201	msb->boot_block_count = 0;
1202
1203	dbg_verbose("Start of a scan for the boot blocks");
1204
1205	if (!msb->boot_page) {
1206		page = kmalloc_array(2, sizeof(struct ms_boot_page),
1207				     GFP_KERNEL);
1208		if (!page)
1209			return -ENOMEM;
1210
1211		msb->boot_page = page;
1212	} else
1213		page = msb->boot_page;
1214
1215	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1216
1217	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1218
1219		sg_init_one(&sg, page, sizeof(*page));
1220		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1221			dbg("boot scan: can't read pba %d", pba);
1222			continue;
1223		}
1224
1225		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1226			dbg("management flag doesn't indicate boot block %d",
1227									pba);
1228			continue;
1229		}
1230
1231		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1232			dbg("the pba at %d doesn't contain boot block ID", pba);
1233			continue;
1234		}
1235
1236		msb_fix_boot_page_endianness(page);
1237		msb->boot_block_locations[msb->boot_block_count] = pba;
1238
1239		page++;
1240		msb->boot_block_count++;
1241
1242		if (msb->boot_block_count == 2)
1243			break;
1244	}
1245
1246	if (!msb->boot_block_count) {
1247		pr_err("media doesn't contain master page, aborting");
1248		return -EIO;
1249	}
1250
1251	dbg_verbose("End of scan for boot blocks");
1252	return 0;
1253}
1254
1255static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1256{
1257	struct ms_boot_page *boot_block;
1258	struct scatterlist sg;
1259	u16 *buffer = NULL;
1260	int offset = 0;
1261	int i, error = 0;
1262	int data_size, data_offset, page, page_offset, size_to_read;
1263	u16 pba;
1264
1265	BUG_ON(block_nr > 1);
1266	boot_block = &msb->boot_page[block_nr];
1267	pba = msb->boot_block_locations[block_nr];
1268
1269	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1270		return -EINVAL;
1271
1272	data_size = boot_block->entry.disabled_block.data_size;
1273	data_offset = sizeof(struct ms_boot_page) +
1274			boot_block->entry.disabled_block.start_addr;
1275	if (!data_size)
1276		return 0;
1277
1278	page = data_offset / msb->page_size;
1279	page_offset = data_offset % msb->page_size;
1280	size_to_read =
1281		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1282			msb->page_size;
1283
1284	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1285		pba, data_offset, data_size);
1286
1287	buffer = kzalloc(size_to_read, GFP_KERNEL);
1288	if (!buffer)
1289		return -ENOMEM;
1290
1291	/* Read the buffer */
1292	sg_init_one(&sg, buffer, size_to_read);
1293
1294	while (offset < size_to_read) {
1295		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1296		if (error)
1297			goto out;
1298
1299		page++;
1300		offset += msb->page_size;
1301
1302		if (page == msb->pages_in_block) {
1303			pr_err(
1304			"bad block table extends beyond the boot block");
1305			break;
1306		}
1307	}
1308
1309	/* Process the bad block table */
1310	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1311
1312		u16 bad_block = be16_to_cpu(buffer[i]);
1313
1314		if (bad_block >= msb->block_count) {
1315			dbg("bad block table contains invalid block %d",
1316								bad_block);
1317			continue;
1318		}
1319
1320		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1321			dbg("duplicate bad block %d in the table",
1322				bad_block);
1323			continue;
1324		}
1325
1326		dbg("block %d is marked as factory bad", bad_block);
1327		msb_mark_block_used(msb, bad_block);
1328	}
1329out:
1330	kfree(buffer);
1331	return error;
1332}
1333
1334static int msb_ftl_initialize(struct msb_data *msb)
1335{
1336	int i;
1337
1338	if (msb->ftl_initialized)
1339		return 0;
1340
1341	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1342	msb->logical_block_count = msb->zone_count * 496 - 2;
1343
1344	msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1345	msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1346	msb->lba_to_pba_table =
1347		kmalloc_array(msb->logical_block_count, sizeof(u16),
1348			      GFP_KERNEL);
1349
1350	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1351						!msb->erased_blocks_bitmap) {
1352		bitmap_free(msb->used_blocks_bitmap);
1353		bitmap_free(msb->erased_blocks_bitmap);
1354		kfree(msb->lba_to_pba_table);
 
1355		return -ENOMEM;
1356	}
1357
1358	for (i = 0; i < msb->zone_count; i++)
1359		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1360
1361	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1362			msb->logical_block_count * sizeof(u16));
1363
1364	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1365		msb->zone_count, msb->logical_block_count);
1366
1367	msb->ftl_initialized = true;
1368	return 0;
1369}
1370
1371static int msb_ftl_scan(struct msb_data *msb)
1372{
1373	u16 pba, lba, other_block;
1374	u8 overwrite_flag, management_flag, other_overwrite_flag;
1375	int error;
1376	struct ms_extra_data_register extra;
1377	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1378
1379	if (!overwrite_flags)
1380		return -ENOMEM;
1381
1382	dbg("Start of media scanning");
1383	for (pba = 0; pba < msb->block_count; pba++) {
1384
1385		if (pba == msb->boot_block_locations[0] ||
1386			pba == msb->boot_block_locations[1]) {
1387			dbg_verbose("pba %05d -> [boot block]", pba);
1388			msb_mark_block_used(msb, pba);
1389			continue;
1390		}
1391
1392		if (test_bit(pba, msb->used_blocks_bitmap)) {
1393			dbg_verbose("pba %05d -> [factory bad]", pba);
1394			continue;
1395		}
1396
1397		memset(&extra, 0, sizeof(extra));
1398		error = msb_read_oob(msb, pba, 0, &extra);
1399
1400		/* can't trust the page if we can't read the oob */
1401		if (error == -EBADMSG) {
1402			pr_notice(
1403			"oob of pba %d damaged, will try to erase it", pba);
1404			msb_mark_block_used(msb, pba);
1405			msb_erase_block(msb, pba);
1406			continue;
1407		} else if (error) {
1408			pr_err("unknown error %d on read of oob of pba %d - aborting",
1409				error, pba);
1410
1411			kfree(overwrite_flags);
1412			return error;
1413		}
1414
1415		lba = be16_to_cpu(extra.logical_address);
1416		management_flag = extra.management_flag;
1417		overwrite_flag = extra.overwrite_flag;
1418		overwrite_flags[pba] = overwrite_flag;
1419
1420		/* Skip bad blocks */
1421		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1422			dbg("pba %05d -> [BAD]", pba);
1423			msb_mark_block_used(msb, pba);
1424			continue;
1425		}
1426
1427		/* Skip system/drm blocks */
1428		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1429			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1430			dbg("pba %05d -> [reserved management flag %02x]",
1431							pba, management_flag);
1432			msb_mark_block_used(msb, pba);
1433			continue;
1434		}
1435
1436		/* Erase temporary tables */
1437		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1438			dbg("pba %05d -> [temp table] - will erase", pba);
1439
1440			msb_mark_block_used(msb, pba);
1441			msb_erase_block(msb, pba);
1442			continue;
1443		}
1444
1445		if (lba == MS_BLOCK_INVALID) {
1446			dbg_verbose("pba %05d -> [free]", pba);
1447			continue;
1448		}
1449
1450		msb_mark_block_used(msb, pba);
1451
1452		/* Block has LBA not according to zoning*/
1453		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1454			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1455								pba, lba);
1456			msb_erase_block(msb, pba);
1457			continue;
1458		}
1459
1460		/* No collisions - great */
1461		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1462			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1463			msb->lba_to_pba_table[lba] = pba;
1464			continue;
1465		}
1466
1467		other_block = msb->lba_to_pba_table[lba];
1468		other_overwrite_flag = overwrite_flags[other_block];
1469
1470		pr_notice("Collision between pba %d and pba %d",
1471			pba, other_block);
1472
1473		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1474			pr_notice("pba %d is marked as stable, use it", pba);
1475			msb_erase_block(msb, other_block);
1476			msb->lba_to_pba_table[lba] = pba;
1477			continue;
1478		}
1479
1480		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1481			pr_notice("pba %d is marked as stable, use it",
1482								other_block);
1483			msb_erase_block(msb, pba);
1484			continue;
1485		}
1486
1487		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1488				pba, other_block, other_block);
1489
1490		msb_erase_block(msb, other_block);
1491		msb->lba_to_pba_table[lba] = pba;
1492	}
1493
1494	dbg("End of media scanning");
1495	kfree(overwrite_flags);
1496	return 0;
1497}
1498
1499static void msb_cache_flush_timer(struct timer_list *t)
1500{
1501	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1502
1503	msb->need_flush_cache = true;
1504	queue_work(msb->io_queue, &msb->io_work);
1505}
1506
1507
1508static void msb_cache_discard(struct msb_data *msb)
1509{
1510	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1511		return;
1512
1513	del_timer_sync(&msb->cache_flush_timer);
1514
1515	dbg_verbose("Discarding the write cache");
1516	msb->cache_block_lba = MS_BLOCK_INVALID;
1517	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1518}
1519
1520static int msb_cache_init(struct msb_data *msb)
1521{
1522	timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1523
1524	if (!msb->cache)
1525		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1526	if (!msb->cache)
1527		return -ENOMEM;
1528
1529	msb_cache_discard(msb);
1530	return 0;
1531}
1532
1533static int msb_cache_flush(struct msb_data *msb)
1534{
1535	struct scatterlist sg;
1536	struct ms_extra_data_register extra;
1537	int page, offset, error;
1538	u16 pba, lba;
1539
1540	if (msb->read_only)
1541		return -EROFS;
1542
1543	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1544		return 0;
1545
1546	lba = msb->cache_block_lba;
1547	pba = msb->lba_to_pba_table[lba];
1548
1549	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550						pba, msb->cache_block_lba);
1551
1552	sg_init_one(&sg, msb->cache , msb->block_size);
1553
1554	/* Read all missing pages in cache */
1555	for (page = 0; page < msb->pages_in_block; page++) {
1556
1557		if (test_bit(page, &msb->valid_cache_bitmap))
1558			continue;
1559
1560		offset = page * msb->page_size;
1561
1562		dbg_verbose("reading non-present sector %d of cache block %d",
1563			page, lba);
1564		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1565
1566		/* Bad pages are copied with 00 page status */
1567		if (error == -EBADMSG) {
1568			pr_err("read error on sector %d, contents probably damaged", page);
1569			continue;
1570		}
1571
1572		if (error)
1573			return error;
1574
1575		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1576							MEMSTICK_OV_PG_NORMAL) {
1577			dbg("page %d is marked as bad", page);
1578			continue;
1579		}
1580
1581		set_bit(page, &msb->valid_cache_bitmap);
1582	}
1583
1584	/* Write the cache now */
1585	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1586	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1587
1588	/* Mark invalid pages */
1589	if (!error) {
1590		for (page = 0; page < msb->pages_in_block; page++) {
1591
1592			if (test_bit(page, &msb->valid_cache_bitmap))
1593				continue;
1594
1595			dbg("marking page %d as containing damaged data",
1596				page);
1597			msb_set_overwrite_flag(msb,
1598				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1599		}
1600	}
1601
1602	msb_cache_discard(msb);
1603	return error;
1604}
1605
1606static int msb_cache_write(struct msb_data *msb, int lba,
1607	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1608{
1609	int error;
1610	struct scatterlist sg_tmp[10];
1611
1612	if (msb->read_only)
1613		return -EROFS;
1614
1615	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1616						lba != msb->cache_block_lba)
1617		if (add_to_cache_only)
1618			return 0;
1619
1620	/* If we need to write different block */
1621	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1622						lba != msb->cache_block_lba) {
1623		dbg_verbose("first flush the cache");
1624		error = msb_cache_flush(msb);
1625		if (error)
1626			return error;
1627	}
1628
1629	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1630		msb->cache_block_lba  = lba;
1631		mod_timer(&msb->cache_flush_timer,
1632			jiffies + msecs_to_jiffies(cache_flush_timeout));
1633	}
1634
1635	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1636
1637	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1638	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1639
1640	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1641		msb->cache + page * msb->page_size, msb->page_size);
1642
1643	set_bit(page, &msb->valid_cache_bitmap);
1644	return 0;
1645}
1646
1647static int msb_cache_read(struct msb_data *msb, int lba,
1648				int page, struct scatterlist *sg, int offset)
1649{
1650	int pba = msb->lba_to_pba_table[lba];
1651	struct scatterlist sg_tmp[10];
1652	int error = 0;
1653
1654	if (lba == msb->cache_block_lba &&
1655			test_bit(page, &msb->valid_cache_bitmap)) {
1656
1657		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1658							lba, pba, page);
1659
1660		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1661		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1662			offset, msb->page_size);
1663		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664			msb->cache + msb->page_size * page,
1665							msb->page_size);
1666	} else {
1667		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1668							lba, pba, page);
1669
1670		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1671		if (error)
1672			return error;
1673
1674		msb_cache_write(msb, lba, page, true, sg, offset);
1675	}
1676	return error;
1677}
1678
1679/* Emulated geometry table
1680 * This table content isn't that importaint,
1681 * One could put here different values, providing that they still
1682 * cover whole disk.
1683 * 64 MB entry is what windows reports for my 64M memstick
1684 */
1685
1686static const struct chs_entry chs_table[] = {
1687/*        size sectors cylynders  heads */
1688	{ 4,    16,    247,       2  },
1689	{ 8,    16,    495,       2  },
1690	{ 16,   16,    495,       4  },
1691	{ 32,   16,    991,       4  },
1692	{ 64,   16,    991,       8  },
1693	{128,   16,    991,       16 },
1694	{ 0 }
1695};
1696
1697/* Load information about the card */
1698static int msb_init_card(struct memstick_dev *card)
1699{
1700	struct msb_data *msb = memstick_get_drvdata(card);
1701	struct memstick_host *host = card->host;
1702	struct ms_boot_page *boot_block;
1703	int error = 0, i, raw_size_in_megs;
1704
1705	msb->caps = 0;
1706
1707	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1708				card->id.class <= MEMSTICK_CLASS_ROM)
1709		msb->read_only = true;
1710
1711	msb->state = -1;
1712	error = msb_reset(msb, false);
1713	if (error)
1714		return error;
1715
1716	/* Due to a bug in Jmicron driver written by Alex Dubov,
1717	 * its serial mode barely works,
1718	 * so we switch to parallel mode right away
1719	 */
1720	if (host->caps & MEMSTICK_CAP_PAR4)
1721		msb_switch_to_parallel(msb);
1722
1723	msb->page_size = sizeof(struct ms_boot_page);
1724
1725	/* Read the boot page */
1726	error = msb_read_boot_blocks(msb);
1727	if (error)
1728		return -EIO;
1729
1730	boot_block = &msb->boot_page[0];
1731
1732	/* Save intersting attributes from boot page */
1733	msb->block_count = boot_block->attr.number_of_blocks;
1734	msb->page_size = boot_block->attr.page_size;
1735
1736	msb->pages_in_block = boot_block->attr.block_size * 2;
1737	msb->block_size = msb->page_size * msb->pages_in_block;
1738
1739	if ((size_t)msb->page_size > PAGE_SIZE) {
1740		/* this isn't supported by linux at all, anyway*/
1741		dbg("device page %d size isn't supported", msb->page_size);
1742		return -EINVAL;
1743	}
1744
1745	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1746	if (!msb->block_buffer)
1747		return -ENOMEM;
1748
1749	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1750
1751	for (i = 0; chs_table[i].size; i++) {
1752
1753		if (chs_table[i].size != raw_size_in_megs)
1754			continue;
1755
1756		msb->geometry.cylinders = chs_table[i].cyl;
1757		msb->geometry.heads = chs_table[i].head;
1758		msb->geometry.sectors = chs_table[i].sec;
1759		break;
1760	}
1761
1762	if (boot_block->attr.transfer_supporting == 1)
1763		msb->caps |= MEMSTICK_CAP_PAR4;
1764
1765	if (boot_block->attr.device_type & 0x03)
1766		msb->read_only = true;
1767
1768	dbg("Total block count = %d", msb->block_count);
1769	dbg("Each block consists of %d pages", msb->pages_in_block);
1770	dbg("Page size = %d bytes", msb->page_size);
1771	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1772	dbg("Read only: %d", msb->read_only);
1773
1774#if 0
1775	/* Now we can switch the interface */
1776	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1777		msb_switch_to_parallel(msb);
1778#endif
1779
1780	error = msb_cache_init(msb);
1781	if (error)
1782		return error;
1783
1784	error = msb_ftl_initialize(msb);
1785	if (error)
1786		return error;
1787
1788
1789	/* Read the bad block table */
1790	error = msb_read_bad_block_table(msb, 0);
1791
1792	if (error && error != -ENOMEM) {
1793		dbg("failed to read bad block table from primary boot block, trying from backup");
1794		error = msb_read_bad_block_table(msb, 1);
1795	}
1796
1797	if (error)
1798		return error;
1799
1800	/* *drum roll* Scan the media */
1801	error = msb_ftl_scan(msb);
1802	if (error) {
1803		pr_err("Scan of media failed");
1804		return error;
1805	}
1806
1807	return 0;
1808
1809}
1810
1811static int msb_do_write_request(struct msb_data *msb, int lba,
1812	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1813{
1814	int error = 0;
1815	off_t offset = 0;
1816	*sucessfuly_written = 0;
1817
1818	while (offset < len) {
1819		if (page == 0 && len - offset >= msb->block_size) {
1820
1821			if (msb->cache_block_lba == lba)
1822				msb_cache_discard(msb);
1823
1824			dbg_verbose("Writing whole lba %d", lba);
1825			error = msb_update_block(msb, lba, sg, offset);
1826			if (error)
1827				return error;
1828
1829			offset += msb->block_size;
1830			*sucessfuly_written += msb->block_size;
1831			lba++;
1832			continue;
1833		}
1834
1835		error = msb_cache_write(msb, lba, page, false, sg, offset);
1836		if (error)
1837			return error;
1838
1839		offset += msb->page_size;
1840		*sucessfuly_written += msb->page_size;
1841
1842		page++;
1843		if (page == msb->pages_in_block) {
1844			page = 0;
1845			lba++;
1846		}
1847	}
1848	return 0;
1849}
1850
1851static int msb_do_read_request(struct msb_data *msb, int lba,
1852		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1853{
1854	int error = 0;
1855	int offset = 0;
1856	*sucessfuly_read = 0;
1857
1858	while (offset < len) {
1859
1860		error = msb_cache_read(msb, lba, page, sg, offset);
1861		if (error)
1862			return error;
1863
1864		offset += msb->page_size;
1865		*sucessfuly_read += msb->page_size;
1866
1867		page++;
1868		if (page == msb->pages_in_block) {
1869			page = 0;
1870			lba++;
1871		}
1872	}
1873	return 0;
1874}
1875
1876static void msb_io_work(struct work_struct *work)
1877{
1878	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1879	int page, error, len;
1880	sector_t lba;
1881	struct scatterlist *sg = msb->prealloc_sg;
1882	struct request *req;
1883
1884	dbg_verbose("IO: work started");
1885
1886	while (1) {
1887		spin_lock_irq(&msb->q_lock);
1888
1889		if (msb->need_flush_cache) {
1890			msb->need_flush_cache = false;
1891			spin_unlock_irq(&msb->q_lock);
1892			msb_cache_flush(msb);
1893			continue;
1894		}
1895
1896		req = msb->req;
1897		if (!req) {
1898			dbg_verbose("IO: no more requests exiting");
1899			spin_unlock_irq(&msb->q_lock);
1900			return;
1901		}
1902
1903		spin_unlock_irq(&msb->q_lock);
1904
1905		/* process the request */
1906		dbg_verbose("IO: processing new request");
1907		blk_rq_map_sg(msb->queue, req, sg);
1908
1909		lba = blk_rq_pos(req);
1910
1911		sector_div(lba, msb->page_size / 512);
1912		page = sector_div(lba, msb->pages_in_block);
1913
1914		if (rq_data_dir(msb->req) == READ)
1915			error = msb_do_read_request(msb, lba, page, sg,
1916				blk_rq_bytes(req), &len);
1917		else
1918			error = msb_do_write_request(msb, lba, page, sg,
1919				blk_rq_bytes(req), &len);
1920
1921		if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1922			__blk_mq_end_request(req, BLK_STS_OK);
1923			spin_lock_irq(&msb->q_lock);
1924			msb->req = NULL;
1925			spin_unlock_irq(&msb->q_lock);
1926		}
1927
1928		if (error && msb->req) {
1929			blk_status_t ret = errno_to_blk_status(error);
1930
1931			dbg_verbose("IO: ending one sector of the request with error");
1932			blk_mq_end_request(req, ret);
1933			spin_lock_irq(&msb->q_lock);
1934			msb->req = NULL;
1935			spin_unlock_irq(&msb->q_lock);
1936		}
1937
1938		if (msb->req)
1939			dbg_verbose("IO: request still pending");
1940	}
1941}
1942
1943static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1944static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1945
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1946static void msb_data_clear(struct msb_data *msb)
1947{
1948	kfree(msb->boot_page);
1949	bitmap_free(msb->used_blocks_bitmap);
1950	bitmap_free(msb->erased_blocks_bitmap);
1951	kfree(msb->lba_to_pba_table);
1952	kfree(msb->cache);
1953	msb->card = NULL;
1954}
1955
1956static int msb_bd_getgeo(struct block_device *bdev,
1957				 struct hd_geometry *geo)
1958{
1959	struct msb_data *msb = bdev->bd_disk->private_data;
1960	*geo = msb->geometry;
1961	return 0;
1962}
1963
1964static void msb_bd_free_disk(struct gendisk *disk)
1965{
1966	struct msb_data *msb = disk->private_data;
1967
 
1968	mutex_lock(&msb_disk_lock);
1969	idr_remove(&msb_disk_idr, msb->disk_id);
 
 
 
 
 
 
 
 
 
 
 
1970	mutex_unlock(&msb_disk_lock);
 
 
1971
1972	kfree(msb);
 
 
 
 
 
 
 
 
 
 
1973}
1974
1975static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1976				 const struct blk_mq_queue_data *bd)
1977{
1978	struct memstick_dev *card = hctx->queue->queuedata;
1979	struct msb_data *msb = memstick_get_drvdata(card);
1980	struct request *req = bd->rq;
1981
1982	dbg_verbose("Submit request");
1983
1984	spin_lock_irq(&msb->q_lock);
1985
1986	if (msb->card_dead) {
1987		dbg("Refusing requests on removed card");
1988
1989		WARN_ON(!msb->io_queue_stopped);
1990
1991		spin_unlock_irq(&msb->q_lock);
1992		blk_mq_start_request(req);
1993		return BLK_STS_IOERR;
1994	}
1995
1996	if (msb->req) {
1997		spin_unlock_irq(&msb->q_lock);
1998		return BLK_STS_DEV_RESOURCE;
1999	}
2000
2001	blk_mq_start_request(req);
2002	msb->req = req;
2003
2004	if (!msb->io_queue_stopped)
2005		queue_work(msb->io_queue, &msb->io_work);
2006
2007	spin_unlock_irq(&msb->q_lock);
2008	return BLK_STS_OK;
2009}
2010
2011static int msb_check_card(struct memstick_dev *card)
2012{
2013	struct msb_data *msb = memstick_get_drvdata(card);
2014
2015	return (msb->card_dead == 0);
2016}
2017
2018static void msb_stop(struct memstick_dev *card)
2019{
2020	struct msb_data *msb = memstick_get_drvdata(card);
2021	unsigned long flags;
2022
2023	dbg("Stopping all msblock IO");
2024
2025	blk_mq_stop_hw_queues(msb->queue);
2026	spin_lock_irqsave(&msb->q_lock, flags);
2027	msb->io_queue_stopped = true;
2028	spin_unlock_irqrestore(&msb->q_lock, flags);
2029
2030	del_timer_sync(&msb->cache_flush_timer);
2031	flush_workqueue(msb->io_queue);
2032
2033	spin_lock_irqsave(&msb->q_lock, flags);
2034	if (msb->req) {
2035		blk_mq_requeue_request(msb->req, false);
2036		msb->req = NULL;
2037	}
2038	spin_unlock_irqrestore(&msb->q_lock, flags);
2039}
2040
2041static void msb_start(struct memstick_dev *card)
2042{
2043	struct msb_data *msb = memstick_get_drvdata(card);
2044	unsigned long flags;
2045
2046	dbg("Resuming IO from msblock");
2047
2048	msb_invalidate_reg_window(msb);
2049
2050	spin_lock_irqsave(&msb->q_lock, flags);
2051	if (!msb->io_queue_stopped || msb->card_dead) {
2052		spin_unlock_irqrestore(&msb->q_lock, flags);
2053		return;
2054	}
2055	spin_unlock_irqrestore(&msb->q_lock, flags);
2056
2057	/* Kick cache flush anyway, its harmless */
2058	msb->need_flush_cache = true;
2059	msb->io_queue_stopped = false;
2060
2061	blk_mq_start_hw_queues(msb->queue);
2062
2063	queue_work(msb->io_queue, &msb->io_work);
2064
2065}
2066
2067static const struct block_device_operations msb_bdops = {
2068	.owner		= THIS_MODULE,
2069	.getgeo		= msb_bd_getgeo,
2070	.free_disk	= msb_bd_free_disk, 
 
2071};
2072
2073static const struct blk_mq_ops msb_mq_ops = {
2074	.queue_rq	= msb_queue_rq,
2075};
2076
2077/* Registers the block device */
2078static int msb_init_disk(struct memstick_dev *card)
2079{
2080	struct msb_data *msb = memstick_get_drvdata(card);
2081	int rc;
2082	unsigned long capacity;
2083
2084	mutex_lock(&msb_disk_lock);
2085	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2086	mutex_unlock(&msb_disk_lock);
2087
2088	if (msb->disk_id  < 0)
2089		return msb->disk_id;
2090
2091	rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
2092				     BLK_MQ_F_SHOULD_MERGE);
2093	if (rc)
2094		goto out_release_id;
2095
2096	msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
2097	if (IS_ERR(msb->disk)) {
2098		rc = PTR_ERR(msb->disk);
2099		goto out_free_tag_set;
2100	}
2101	msb->queue = msb->disk->queue;
2102
2103	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2104	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2105	blk_queue_max_segment_size(msb->queue,
2106				   MS_BLOCK_MAX_PAGES * msb->page_size);
2107	blk_queue_logical_block_size(msb->queue, msb->page_size);
2108
2109	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2110	msb->disk->fops = &msb_bdops;
2111	msb->disk->private_data = msb;
2112
2113	capacity = msb->pages_in_block * msb->logical_block_count;
2114	capacity *= (msb->page_size / 512);
2115	set_capacity(msb->disk, capacity);
2116	dbg("Set total disk size to %lu sectors", capacity);
2117
 
2118	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2119	if (!msb->io_queue) {
2120		rc = -ENOMEM;
2121		goto out_cleanup_disk;
2122	}
2123
2124	INIT_WORK(&msb->io_work, msb_io_work);
2125	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2126
2127	if (msb->read_only)
2128		set_disk_ro(msb->disk, 1);
2129
2130	msb_start(card);
2131	rc = device_add_disk(&card->dev, msb->disk, NULL);
2132	if (rc)
2133		goto out_destroy_workqueue;
2134	dbg("Disk added");
2135	return 0;
2136
2137out_destroy_workqueue:
2138	destroy_workqueue(msb->io_queue);
2139out_cleanup_disk:
2140	put_disk(msb->disk);
2141out_free_tag_set:
2142	blk_mq_free_tag_set(&msb->tag_set);
2143out_release_id:
2144	mutex_lock(&msb_disk_lock);
2145	idr_remove(&msb_disk_idr, msb->disk_id);
2146	mutex_unlock(&msb_disk_lock);
2147	return rc;
2148}
2149
2150static int msb_probe(struct memstick_dev *card)
2151{
2152	struct msb_data *msb;
2153	int rc = 0;
2154
2155	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2156	if (!msb)
2157		return -ENOMEM;
2158	memstick_set_drvdata(card, msb);
2159	msb->card = card;
2160	spin_lock_init(&msb->q_lock);
2161
2162	rc = msb_init_card(card);
2163	if (rc)
2164		goto out_free;
2165
2166	rc = msb_init_disk(card);
2167	if (!rc) {
2168		card->check = msb_check_card;
2169		card->stop = msb_stop;
2170		card->start = msb_start;
2171		return 0;
2172	}
2173out_free:
2174	memstick_set_drvdata(card, NULL);
2175	msb_data_clear(msb);
2176	kfree(msb);
2177	return rc;
2178}
2179
2180static void msb_remove(struct memstick_dev *card)
2181{
2182	struct msb_data *msb = memstick_get_drvdata(card);
2183	unsigned long flags;
2184
2185	if (!msb->io_queue_stopped)
2186		msb_stop(card);
2187
2188	dbg("Removing the disk device");
2189
2190	/* Take care of unhandled + new requests from now on */
2191	spin_lock_irqsave(&msb->q_lock, flags);
2192	msb->card_dead = true;
2193	spin_unlock_irqrestore(&msb->q_lock, flags);
2194	blk_mq_start_hw_queues(msb->queue);
2195
2196	/* Remove the disk */
2197	del_gendisk(msb->disk);
 
2198	blk_mq_free_tag_set(&msb->tag_set);
2199	msb->queue = NULL;
2200
2201	mutex_lock(&msb_disk_lock);
2202	msb_data_clear(msb);
2203	mutex_unlock(&msb_disk_lock);
2204
2205	put_disk(msb->disk);
2206	memstick_set_drvdata(card, NULL);
2207}
2208
2209#ifdef CONFIG_PM
2210
2211static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2212{
2213	msb_stop(card);
2214	return 0;
2215}
2216
2217static int msb_resume(struct memstick_dev *card)
2218{
2219	struct msb_data *msb = memstick_get_drvdata(card);
2220	struct msb_data *new_msb = NULL;
2221	bool card_dead = true;
2222
2223#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2224	msb->card_dead = true;
2225	return 0;
2226#endif
2227	mutex_lock(&card->host->lock);
2228
2229	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2230	if (!new_msb)
2231		goto out;
2232
2233	new_msb->card = card;
2234	memstick_set_drvdata(card, new_msb);
2235	spin_lock_init(&new_msb->q_lock);
2236	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2237
2238	if (msb_init_card(card))
2239		goto out;
2240
2241	if (msb->block_size != new_msb->block_size)
2242		goto out;
2243
2244	if (memcmp(msb->boot_page, new_msb->boot_page,
2245					sizeof(struct ms_boot_page)))
2246		goto out;
2247
2248	if (msb->logical_block_count != new_msb->logical_block_count ||
2249		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2250						msb->logical_block_count))
2251		goto out;
2252
2253	if (msb->block_count != new_msb->block_count ||
2254	    !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2255							msb->block_count))
2256		goto out;
2257
2258	card_dead = false;
2259out:
2260	if (card_dead)
2261		dbg("Card was removed/replaced during suspend");
2262
2263	msb->card_dead = card_dead;
2264	memstick_set_drvdata(card, msb);
2265
2266	if (new_msb) {
2267		msb_data_clear(new_msb);
2268		kfree(new_msb);
2269	}
2270
2271	msb_start(card);
2272	mutex_unlock(&card->host->lock);
2273	return 0;
2274}
2275#else
2276
2277#define msb_suspend NULL
2278#define msb_resume NULL
2279
2280#endif /* CONFIG_PM */
2281
2282static struct memstick_device_id msb_id_tbl[] = {
2283	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2284	 MEMSTICK_CLASS_FLASH},
2285
2286	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2287	 MEMSTICK_CLASS_ROM},
2288
2289	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2290	 MEMSTICK_CLASS_RO},
2291
2292	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2293	 MEMSTICK_CLASS_WP},
2294
2295	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2296	 MEMSTICK_CLASS_DUO},
2297	{}
2298};
2299MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2300
2301
2302static struct memstick_driver msb_driver = {
2303	.driver = {
2304		.name  = DRIVER_NAME,
2305		.owner = THIS_MODULE
2306	},
2307	.id_table = msb_id_tbl,
2308	.probe    = msb_probe,
2309	.remove   = msb_remove,
2310	.suspend  = msb_suspend,
2311	.resume   = msb_resume
2312};
2313
2314static int __init msb_init(void)
2315{
2316	int rc = memstick_register_driver(&msb_driver);
2317
2318	if (rc)
2319		pr_err("failed to register memstick driver (error %d)\n", rc);
2320
2321	return rc;
2322}
2323
2324static void __exit msb_exit(void)
2325{
2326	memstick_unregister_driver(&msb_driver);
2327	idr_destroy(&msb_disk_idr);
2328}
2329
2330module_init(msb_init);
2331module_exit(msb_exit);
2332
2333module_param(cache_flush_timeout, int, S_IRUGO);
2334MODULE_PARM_DESC(cache_flush_timeout,
2335				"Cache flush timeout in msec (1000 default)");
2336module_param(debug, int, S_IRUGO | S_IWUSR);
2337MODULE_PARM_DESC(debug, "Debug level (0-2)");
2338
2339module_param(verify_writes, bool, S_IRUGO);
2340MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2341
2342MODULE_LICENSE("GPL");
2343MODULE_AUTHOR("Maxim Levitsky");
2344MODULE_DESCRIPTION("Sony MemoryStick block device driver");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  ms_block.c - Sony MemoryStick (legacy) storage support
   4
   5 *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
   6 *
   7 * Minor portions of the driver were copied from mspro_block.c which is
   8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
   9 */
  10#define DRIVER_NAME "ms_block"
  11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  12
  13#include <linux/module.h>
  14#include <linux/blk-mq.h>
  15#include <linux/memstick.h>
  16#include <linux/idr.h>
  17#include <linux/hdreg.h>
  18#include <linux/delay.h>
  19#include <linux/slab.h>
  20#include <linux/random.h>
  21#include <linux/bitmap.h>
  22#include <linux/scatterlist.h>
  23#include <linux/jiffies.h>
  24#include <linux/workqueue.h>
  25#include <linux/mutex.h>
  26#include "ms_block.h"
  27
  28static int debug;
  29static int cache_flush_timeout = 1000;
  30static bool verify_writes;
  31
  32/*
  33 * Copies section of 'sg_from' starting from offset 'offset' and with length
  34 * 'len' To another scatterlist of to_nents enties
  35 */
  36static size_t msb_sg_copy(struct scatterlist *sg_from,
  37	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  38{
  39	size_t copied = 0;
  40
  41	while (offset > 0) {
  42		if (offset >= sg_from->length) {
  43			if (sg_is_last(sg_from))
  44				return 0;
  45
  46			offset -= sg_from->length;
  47			sg_from = sg_next(sg_from);
  48			continue;
  49		}
  50
  51		copied = min(len, sg_from->length - offset);
  52		sg_set_page(sg_to, sg_page(sg_from),
  53			copied, sg_from->offset + offset);
  54
  55		len -= copied;
  56		offset = 0;
  57
  58		if (sg_is_last(sg_from) || !len)
  59			goto out;
  60
  61		sg_to = sg_next(sg_to);
  62		to_nents--;
  63		sg_from = sg_next(sg_from);
  64	}
  65
  66	while (len > sg_from->length && to_nents--) {
  67		len -= sg_from->length;
  68		copied += sg_from->length;
  69
  70		sg_set_page(sg_to, sg_page(sg_from),
  71				sg_from->length, sg_from->offset);
  72
  73		if (sg_is_last(sg_from) || !len)
  74			goto out;
  75
  76		sg_from = sg_next(sg_from);
  77		sg_to = sg_next(sg_to);
  78	}
  79
  80	if (len && to_nents) {
  81		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  82		copied += len;
  83	}
  84out:
  85	sg_mark_end(sg_to);
  86	return copied;
  87}
  88
  89/*
  90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  91 * to linear buffer of length 'len' at address 'buffer'
  92 * Returns 0 if equal and  -1 otherwice
  93 */
  94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  95					size_t offset, u8 *buffer, size_t len)
  96{
  97	int retval = 0, cmplen;
  98	struct sg_mapping_iter miter;
  99
 100	sg_miter_start(&miter, sg, sg_nents(sg),
 101					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 102
 103	while (sg_miter_next(&miter) && len > 0) {
 104		if (offset >= miter.length) {
 105			offset -= miter.length;
 106			continue;
 107		}
 108
 109		cmplen = min(miter.length - offset, len);
 110		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
 111		if (retval)
 112			break;
 113
 114		buffer += cmplen;
 115		len -= cmplen;
 116		offset = 0;
 117	}
 118
 119	if (!retval && len)
 120		retval = -1;
 121
 122	sg_miter_stop(&miter);
 123	return retval;
 124}
 125
 126
 127/* Get zone at which block with logical address 'lba' lives
 128 * Flash is broken into zones.
 129 * Each zone consists of 512 eraseblocks, out of which in first
 130 * zone 494 are used and 496 are for all following zones.
 131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
 132 */
 133static int msb_get_zone_from_lba(int lba)
 134{
 135	if (lba < 494)
 136		return 0;
 137	return ((lba - 494) / 496) + 1;
 138}
 139
 140/* Get zone of physical block. Trivial */
 141static int msb_get_zone_from_pba(int pba)
 142{
 143	return pba / MS_BLOCKS_IN_ZONE;
 144}
 145
 146/* Debug test to validate free block counts */
 147static int msb_validate_used_block_bitmap(struct msb_data *msb)
 148{
 149	int total_free_blocks = 0;
 150	int i;
 151
 152	if (!debug)
 153		return 0;
 154
 155	for (i = 0; i < msb->zone_count; i++)
 156		total_free_blocks += msb->free_block_count[i];
 157
 158	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
 159					msb->block_count) == total_free_blocks)
 160		return 0;
 161
 162	pr_err("BUG: free block counts don't match the bitmap");
 163	msb->read_only = true;
 164	return -EINVAL;
 165}
 166
 167/* Mark physical block as used */
 168static void msb_mark_block_used(struct msb_data *msb, int pba)
 169{
 170	int zone = msb_get_zone_from_pba(pba);
 171
 172	if (test_bit(pba, msb->used_blocks_bitmap)) {
 173		pr_err(
 174		"BUG: attempt to mark already used pba %d as used", pba);
 175		msb->read_only = true;
 176		return;
 177	}
 178
 179	if (msb_validate_used_block_bitmap(msb))
 180		return;
 181
 182	/* No races because all IO is single threaded */
 183	__set_bit(pba, msb->used_blocks_bitmap);
 184	msb->free_block_count[zone]--;
 185}
 186
 187/* Mark physical block as free */
 188static void msb_mark_block_unused(struct msb_data *msb, int pba)
 189{
 190	int zone = msb_get_zone_from_pba(pba);
 191
 192	if (!test_bit(pba, msb->used_blocks_bitmap)) {
 193		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
 194		msb->read_only = true;
 195		return;
 196	}
 197
 198	if (msb_validate_used_block_bitmap(msb))
 199		return;
 200
 201	/* No races because all IO is single threaded */
 202	__clear_bit(pba, msb->used_blocks_bitmap);
 203	msb->free_block_count[zone]++;
 204}
 205
 206/* Invalidate current register window */
 207static void msb_invalidate_reg_window(struct msb_data *msb)
 208{
 209	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
 210	msb->reg_addr.w_length = sizeof(struct ms_id_register);
 211	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
 212	msb->reg_addr.r_length = sizeof(struct ms_id_register);
 213	msb->addr_valid = false;
 214}
 215
 216/* Start a state machine */
 217static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
 218		(struct memstick_dev *card, struct memstick_request **req))
 219{
 220	struct memstick_dev *card = msb->card;
 221
 222	WARN_ON(msb->state != -1);
 223	msb->int_polling = false;
 224	msb->state = 0;
 225	msb->exit_error = 0;
 226
 227	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
 228
 229	card->next_request = state_func;
 230	memstick_new_req(card->host);
 231	wait_for_completion(&card->mrq_complete);
 232
 233	WARN_ON(msb->state != -1);
 234	return msb->exit_error;
 235}
 236
 237/* State machines call that to exit */
 238static int msb_exit_state_machine(struct msb_data *msb, int error)
 239{
 240	WARN_ON(msb->state == -1);
 241
 242	msb->state = -1;
 243	msb->exit_error = error;
 244	msb->card->next_request = h_msb_default_bad;
 245
 246	/* Invalidate reg window on errors */
 247	if (error)
 248		msb_invalidate_reg_window(msb);
 249
 250	complete(&msb->card->mrq_complete);
 251	return -ENXIO;
 252}
 253
 254/* read INT register */
 255static int msb_read_int_reg(struct msb_data *msb, long timeout)
 256{
 257	struct memstick_request *mrq = &msb->card->current_mrq;
 258
 259	WARN_ON(msb->state == -1);
 260
 261	if (!msb->int_polling) {
 262		msb->int_timeout = jiffies +
 263			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
 264		msb->int_polling = true;
 265	} else if (time_after(jiffies, msb->int_timeout)) {
 266		mrq->data[0] = MEMSTICK_INT_CMDNAK;
 267		return 0;
 268	}
 269
 270	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
 271				mrq->need_card_int && !mrq->error) {
 272		mrq->data[0] = mrq->int_reg;
 273		mrq->need_card_int = false;
 274		return 0;
 275	} else {
 276		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 277		return 1;
 278	}
 279}
 280
 281/* Read a register */
 282static int msb_read_regs(struct msb_data *msb, int offset, int len)
 283{
 284	struct memstick_request *req = &msb->card->current_mrq;
 285
 286	if (msb->reg_addr.r_offset != offset ||
 287	    msb->reg_addr.r_length != len || !msb->addr_valid) {
 288
 289		msb->reg_addr.r_offset = offset;
 290		msb->reg_addr.r_length = len;
 291		msb->addr_valid = true;
 292
 293		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 294			&msb->reg_addr, sizeof(msb->reg_addr));
 295		return 0;
 296	}
 297
 298	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
 299	return 1;
 300}
 301
 302/* Write a card register */
 303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
 304{
 305	struct memstick_request *req = &msb->card->current_mrq;
 306
 307	if (msb->reg_addr.w_offset != offset ||
 308		msb->reg_addr.w_length != len  || !msb->addr_valid) {
 309
 310		msb->reg_addr.w_offset = offset;
 311		msb->reg_addr.w_length = len;
 312		msb->addr_valid = true;
 313
 314		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
 315			&msb->reg_addr, sizeof(msb->reg_addr));
 316		return 0;
 317	}
 318
 319	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
 320	return 1;
 321}
 322
 323/* Handler for absence of IO */
 324static int h_msb_default_bad(struct memstick_dev *card,
 325						struct memstick_request **mrq)
 326{
 327	return -ENXIO;
 328}
 329
 330/*
 331 * This function is a handler for reads of one page from device.
 332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
 333 * Can also be used to read extra data only. Set params accordintly.
 334 */
 335static int h_msb_read_page(struct memstick_dev *card,
 336					struct memstick_request **out_mrq)
 337{
 338	struct msb_data *msb = memstick_get_drvdata(card);
 339	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 340	struct scatterlist sg[2];
 341	u8 command, intreg;
 342
 343	if (mrq->error) {
 344		dbg("read_page, unknown error");
 345		return msb_exit_state_machine(msb, mrq->error);
 346	}
 347again:
 348	switch (msb->state) {
 349	case MSB_RP_SEND_BLOCK_ADDRESS:
 350		/* msb_write_regs sometimes "fails" because it needs to update
 351		 * the reg window, and thus it returns request for that.
 352		 * Then we stay in this state and retry
 353		 */
 354		if (!msb_write_regs(msb,
 355			offsetof(struct ms_register, param),
 356			sizeof(struct ms_param_register),
 357			(unsigned char *)&msb->regs.param))
 358			return 0;
 359
 360		msb->state = MSB_RP_SEND_READ_COMMAND;
 361		return 0;
 362
 363	case MSB_RP_SEND_READ_COMMAND:
 364		command = MS_CMD_BLOCK_READ;
 365		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 366		msb->state = MSB_RP_SEND_INT_REQ;
 367		return 0;
 368
 369	case MSB_RP_SEND_INT_REQ:
 370		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
 371		/* If dont actually need to send the int read request (only in
 372		 * serial mode), then just fall through
 373		 */
 374		if (msb_read_int_reg(msb, -1))
 375			return 0;
 376		fallthrough;
 377
 378	case MSB_RP_RECEIVE_INT_REQ_RESULT:
 379		intreg = mrq->data[0];
 380		msb->regs.status.interrupt = intreg;
 381
 382		if (intreg & MEMSTICK_INT_CMDNAK)
 383			return msb_exit_state_machine(msb, -EIO);
 384
 385		if (!(intreg & MEMSTICK_INT_CED)) {
 386			msb->state = MSB_RP_SEND_INT_REQ;
 387			goto again;
 388		}
 389
 390		msb->int_polling = false;
 391		msb->state = (intreg & MEMSTICK_INT_ERR) ?
 392			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
 393		goto again;
 394
 395	case MSB_RP_SEND_READ_STATUS_REG:
 396		 /* read the status register to understand source of the INT_ERR */
 397		if (!msb_read_regs(msb,
 398			offsetof(struct ms_register, status),
 399			sizeof(struct ms_status_register)))
 400			return 0;
 401
 402		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 403		return 0;
 404
 405	case MSB_RP_RECEIVE_STATUS_REG:
 406		msb->regs.status = *(struct ms_status_register *)mrq->data;
 407		msb->state = MSB_RP_SEND_OOB_READ;
 408		fallthrough;
 409
 410	case MSB_RP_SEND_OOB_READ:
 411		if (!msb_read_regs(msb,
 412			offsetof(struct ms_register, extra_data),
 413			sizeof(struct ms_extra_data_register)))
 414			return 0;
 415
 416		msb->state = MSB_RP_RECEIVE_OOB_READ;
 417		return 0;
 418
 419	case MSB_RP_RECEIVE_OOB_READ:
 420		msb->regs.extra_data =
 421			*(struct ms_extra_data_register *) mrq->data;
 422		msb->state = MSB_RP_SEND_READ_DATA;
 423		fallthrough;
 424
 425	case MSB_RP_SEND_READ_DATA:
 426		/* Skip that state if we only read the oob */
 427		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
 428			msb->state = MSB_RP_RECEIVE_READ_DATA;
 429			goto again;
 430		}
 431
 432		sg_init_table(sg, ARRAY_SIZE(sg));
 433		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 434			msb->current_sg_offset,
 435			msb->page_size);
 436
 437		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
 438		msb->state = MSB_RP_RECEIVE_READ_DATA;
 439		return 0;
 440
 441	case MSB_RP_RECEIVE_READ_DATA:
 442		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
 443			msb->current_sg_offset += msb->page_size;
 444			return msb_exit_state_machine(msb, 0);
 445		}
 446
 447		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
 448			dbg("read_page: uncorrectable error");
 449			return msb_exit_state_machine(msb, -EBADMSG);
 450		}
 451
 452		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
 453			dbg("read_page: correctable error");
 454			msb->current_sg_offset += msb->page_size;
 455			return msb_exit_state_machine(msb, -EUCLEAN);
 456		} else {
 457			dbg("read_page: INT error, but no status error bits");
 458			return msb_exit_state_machine(msb, -EIO);
 459		}
 460	}
 461
 462	BUG();
 463}
 464
 465/*
 466 * Handler of writes of exactly one block.
 467 * Takes address from msb->regs.param.
 468 * Writes same extra data to blocks, also taken
 469 * from msb->regs.extra
 470 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
 471 * device refuses to take the command or something else
 472 */
 473static int h_msb_write_block(struct memstick_dev *card,
 474					struct memstick_request **out_mrq)
 475{
 476	struct msb_data *msb = memstick_get_drvdata(card);
 477	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 478	struct scatterlist sg[2];
 479	u8 intreg, command;
 480
 481	if (mrq->error)
 482		return msb_exit_state_machine(msb, mrq->error);
 483
 484again:
 485	switch (msb->state) {
 486
 487	/* HACK: Jmicon handling of TPCs between 8 and
 488	 *	sizeof(memstick_request.data) is broken due to hardware
 489	 *	bug in PIO mode that is used for these TPCs
 490	 *	Therefore split the write
 491	 */
 492
 493	case MSB_WB_SEND_WRITE_PARAMS:
 494		if (!msb_write_regs(msb,
 495			offsetof(struct ms_register, param),
 496			sizeof(struct ms_param_register),
 497			&msb->regs.param))
 498			return 0;
 499
 500		msb->state = MSB_WB_SEND_WRITE_OOB;
 501		return 0;
 502
 503	case MSB_WB_SEND_WRITE_OOB:
 504		if (!msb_write_regs(msb,
 505			offsetof(struct ms_register, extra_data),
 506			sizeof(struct ms_extra_data_register),
 507			&msb->regs.extra_data))
 508			return 0;
 509		msb->state = MSB_WB_SEND_WRITE_COMMAND;
 510		return 0;
 511
 512
 513	case MSB_WB_SEND_WRITE_COMMAND:
 514		command = MS_CMD_BLOCK_WRITE;
 515		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 516		msb->state = MSB_WB_SEND_INT_REQ;
 517		return 0;
 518
 519	case MSB_WB_SEND_INT_REQ:
 520		msb->state = MSB_WB_RECEIVE_INT_REQ;
 521		if (msb_read_int_reg(msb, -1))
 522			return 0;
 523		fallthrough;
 524
 525	case MSB_WB_RECEIVE_INT_REQ:
 526		intreg = mrq->data[0];
 527		msb->regs.status.interrupt = intreg;
 528
 529		/* errors mean out of here, and fast... */
 530		if (intreg & (MEMSTICK_INT_CMDNAK))
 531			return msb_exit_state_machine(msb, -EIO);
 532
 533		if (intreg & MEMSTICK_INT_ERR)
 534			return msb_exit_state_machine(msb, -EBADMSG);
 535
 536
 537		/* for last page we need to poll CED */
 538		if (msb->current_page == msb->pages_in_block) {
 539			if (intreg & MEMSTICK_INT_CED)
 540				return msb_exit_state_machine(msb, 0);
 541			msb->state = MSB_WB_SEND_INT_REQ;
 542			goto again;
 543
 544		}
 545
 546		/* for non-last page we need BREQ before writing next chunk */
 547		if (!(intreg & MEMSTICK_INT_BREQ)) {
 548			msb->state = MSB_WB_SEND_INT_REQ;
 549			goto again;
 550		}
 551
 552		msb->int_polling = false;
 553		msb->state = MSB_WB_SEND_WRITE_DATA;
 554		fallthrough;
 555
 556	case MSB_WB_SEND_WRITE_DATA:
 557		sg_init_table(sg, ARRAY_SIZE(sg));
 558
 559		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
 560			msb->current_sg_offset,
 561			msb->page_size) < msb->page_size)
 562			return msb_exit_state_machine(msb, -EIO);
 563
 564		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
 565		mrq->need_card_int = 1;
 566		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
 567		return 0;
 568
 569	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
 570		msb->current_page++;
 571		msb->current_sg_offset += msb->page_size;
 572		msb->state = MSB_WB_SEND_INT_REQ;
 573		goto again;
 574	default:
 575		BUG();
 576	}
 577
 578	return 0;
 579}
 580
 581/*
 582 * This function is used to send simple IO requests to device that consist
 583 * of register write + command
 584 */
 585static int h_msb_send_command(struct memstick_dev *card,
 586					struct memstick_request **out_mrq)
 587{
 588	struct msb_data *msb = memstick_get_drvdata(card);
 589	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 590	u8 intreg;
 591
 592	if (mrq->error) {
 593		dbg("send_command: unknown error");
 594		return msb_exit_state_machine(msb, mrq->error);
 595	}
 596again:
 597	switch (msb->state) {
 598
 599	/* HACK: see h_msb_write_block */
 600	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
 601		if (!msb_write_regs(msb,
 602			offsetof(struct ms_register, param),
 603			sizeof(struct ms_param_register),
 604			&msb->regs.param))
 605			return 0;
 606		msb->state = MSB_SC_SEND_WRITE_OOB;
 607		return 0;
 608
 609	case MSB_SC_SEND_WRITE_OOB:
 610		if (!msb->command_need_oob) {
 611			msb->state = MSB_SC_SEND_COMMAND;
 612			goto again;
 613		}
 614
 615		if (!msb_write_regs(msb,
 616			offsetof(struct ms_register, extra_data),
 617			sizeof(struct ms_extra_data_register),
 618			&msb->regs.extra_data))
 619			return 0;
 620
 621		msb->state = MSB_SC_SEND_COMMAND;
 622		return 0;
 623
 624	case MSB_SC_SEND_COMMAND:
 625		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
 626		msb->state = MSB_SC_SEND_INT_REQ;
 627		return 0;
 628
 629	case MSB_SC_SEND_INT_REQ:
 630		msb->state = MSB_SC_RECEIVE_INT_REQ;
 631		if (msb_read_int_reg(msb, -1))
 632			return 0;
 633		fallthrough;
 634
 635	case MSB_SC_RECEIVE_INT_REQ:
 636		intreg = mrq->data[0];
 637
 638		if (intreg & MEMSTICK_INT_CMDNAK)
 639			return msb_exit_state_machine(msb, -EIO);
 640		if (intreg & MEMSTICK_INT_ERR)
 641			return msb_exit_state_machine(msb, -EBADMSG);
 642
 643		if (!(intreg & MEMSTICK_INT_CED)) {
 644			msb->state = MSB_SC_SEND_INT_REQ;
 645			goto again;
 646		}
 647
 648		return msb_exit_state_machine(msb, 0);
 649	}
 650
 651	BUG();
 652}
 653
 654/* Small handler for card reset */
 655static int h_msb_reset(struct memstick_dev *card,
 656					struct memstick_request **out_mrq)
 657{
 658	u8 command = MS_CMD_RESET;
 659	struct msb_data *msb = memstick_get_drvdata(card);
 660	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 661
 662	if (mrq->error)
 663		return msb_exit_state_machine(msb, mrq->error);
 664
 665	switch (msb->state) {
 666	case MSB_RS_SEND:
 667		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
 668		mrq->need_card_int = 0;
 669		msb->state = MSB_RS_CONFIRM;
 670		return 0;
 671	case MSB_RS_CONFIRM:
 672		return msb_exit_state_machine(msb, 0);
 673	}
 674	BUG();
 675}
 676
 677/* This handler is used to do serial->parallel switch */
 678static int h_msb_parallel_switch(struct memstick_dev *card,
 679					struct memstick_request **out_mrq)
 680{
 681	struct msb_data *msb = memstick_get_drvdata(card);
 682	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
 683	struct memstick_host *host = card->host;
 684
 685	if (mrq->error) {
 686		dbg("parallel_switch: error");
 687		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 688		return msb_exit_state_machine(msb, mrq->error);
 689	}
 690
 691	switch (msb->state) {
 692	case MSB_PS_SEND_SWITCH_COMMAND:
 693		/* Set the parallel interface on memstick side */
 694		msb->regs.param.system |= MEMSTICK_SYS_PAM;
 695
 696		if (!msb_write_regs(msb,
 697			offsetof(struct ms_register, param),
 698			1,
 699			(unsigned char *)&msb->regs.param))
 700			return 0;
 701
 702		msb->state = MSB_PS_SWICH_HOST;
 703		return 0;
 704
 705	case MSB_PS_SWICH_HOST:
 706		 /* Set parallel interface on our side + send a dummy request
 707		  * to see if card responds
 708		  */
 709		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
 710		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
 711		msb->state = MSB_PS_CONFIRM;
 712		return 0;
 713
 714	case MSB_PS_CONFIRM:
 715		return msb_exit_state_machine(msb, 0);
 716	}
 717
 718	BUG();
 719}
 720
 721static int msb_switch_to_parallel(struct msb_data *msb);
 722
 723/* Reset the card, to guard against hw errors beeing treated as bad blocks */
 724static int msb_reset(struct msb_data *msb, bool full)
 725{
 726
 727	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
 728	struct memstick_dev *card = msb->card;
 729	struct memstick_host *host = card->host;
 730	int error;
 731
 732	/* Reset the card */
 733	msb->regs.param.system = MEMSTICK_SYS_BAMD;
 734
 735	if (full) {
 736		error =  host->set_param(host,
 737					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 738		if (error)
 739			goto out_error;
 740
 741		msb_invalidate_reg_window(msb);
 742
 743		error = host->set_param(host,
 744					MEMSTICK_POWER, MEMSTICK_POWER_ON);
 745		if (error)
 746			goto out_error;
 747
 748		error = host->set_param(host,
 749					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
 750		if (error) {
 751out_error:
 752			dbg("Failed to reset the host controller");
 753			msb->read_only = true;
 754			return -EFAULT;
 755		}
 756	}
 757
 758	error = msb_run_state_machine(msb, h_msb_reset);
 759	if (error) {
 760		dbg("Failed to reset the card");
 761		msb->read_only = true;
 762		return -ENODEV;
 763	}
 764
 765	/* Set parallel mode */
 766	if (was_parallel)
 767		msb_switch_to_parallel(msb);
 768	return 0;
 769}
 770
 771/* Attempts to switch interface to parallel mode */
 772static int msb_switch_to_parallel(struct msb_data *msb)
 773{
 774	int error;
 775
 776	error = msb_run_state_machine(msb, h_msb_parallel_switch);
 777	if (error) {
 778		pr_err("Switch to parallel failed");
 779		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
 780		msb_reset(msb, true);
 781		return -EFAULT;
 782	}
 783
 784	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
 785	return 0;
 786}
 787
 788/* Changes overwrite flag on a page */
 789static int msb_set_overwrite_flag(struct msb_data *msb,
 790						u16 pba, u8 page, u8 flag)
 791{
 792	if (msb->read_only)
 793		return -EROFS;
 794
 795	msb->regs.param.block_address = cpu_to_be16(pba);
 796	msb->regs.param.page_address = page;
 797	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
 798	msb->regs.extra_data.overwrite_flag = flag;
 799	msb->command_value = MS_CMD_BLOCK_WRITE;
 800	msb->command_need_oob = true;
 801
 802	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
 803							flag, pba, page);
 804	return msb_run_state_machine(msb, h_msb_send_command);
 805}
 806
 807static int msb_mark_bad(struct msb_data *msb, int pba)
 808{
 809	pr_notice("marking pba %d as bad", pba);
 810	msb_reset(msb, true);
 811	return msb_set_overwrite_flag(
 812			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
 813}
 814
 815static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
 816{
 817	dbg("marking page %d of pba %d as bad", page, pba);
 818	msb_reset(msb, true);
 819	return msb_set_overwrite_flag(msb,
 820		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
 821}
 822
 823/* Erases one physical block */
 824static int msb_erase_block(struct msb_data *msb, u16 pba)
 825{
 826	int error, try;
 827
 828	if (msb->read_only)
 829		return -EROFS;
 830
 831	dbg_verbose("erasing pba %d", pba);
 832
 833	for (try = 1; try < 3; try++) {
 834		msb->regs.param.block_address = cpu_to_be16(pba);
 835		msb->regs.param.page_address = 0;
 836		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
 837		msb->command_value = MS_CMD_BLOCK_ERASE;
 838		msb->command_need_oob = false;
 839
 840
 841		error = msb_run_state_machine(msb, h_msb_send_command);
 842		if (!error || msb_reset(msb, true))
 843			break;
 844	}
 845
 846	if (error) {
 847		pr_err("erase failed, marking pba %d as bad", pba);
 848		msb_mark_bad(msb, pba);
 849	}
 850
 851	dbg_verbose("erase success, marking pba %d as unused", pba);
 852	msb_mark_block_unused(msb, pba);
 853	__set_bit(pba, msb->erased_blocks_bitmap);
 854	return error;
 855}
 856
 857/* Reads one page from device */
 858static int msb_read_page(struct msb_data *msb,
 859	u16 pba, u8 page, struct ms_extra_data_register *extra,
 860					struct scatterlist *sg,  int offset)
 861{
 862	int try, error;
 863
 864	if (pba == MS_BLOCK_INVALID) {
 865		unsigned long flags;
 866		struct sg_mapping_iter miter;
 867		size_t len = msb->page_size;
 868
 869		dbg_verbose("read unmapped sector. returning 0xFF");
 870
 871		local_irq_save(flags);
 872		sg_miter_start(&miter, sg, sg_nents(sg),
 873				SG_MITER_ATOMIC | SG_MITER_TO_SG);
 874
 875		while (sg_miter_next(&miter) && len > 0) {
 876
 877			int chunklen;
 878
 879			if (offset && offset >= miter.length) {
 880				offset -= miter.length;
 881				continue;
 882			}
 883
 884			chunklen = min(miter.length - offset, len);
 885			memset(miter.addr + offset, 0xFF, chunklen);
 886			len -= chunklen;
 887			offset = 0;
 888		}
 889
 890		sg_miter_stop(&miter);
 891		local_irq_restore(flags);
 892
 893		if (offset)
 894			return -EFAULT;
 895
 896		if (extra)
 897			memset(extra, 0xFF, sizeof(*extra));
 898		return 0;
 899	}
 900
 901	if (pba >= msb->block_count) {
 902		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
 903		return -EINVAL;
 904	}
 905
 906	for (try = 1; try < 3; try++) {
 907		msb->regs.param.block_address = cpu_to_be16(pba);
 908		msb->regs.param.page_address = page;
 909		msb->regs.param.cp = MEMSTICK_CP_PAGE;
 910
 911		msb->current_sg = sg;
 912		msb->current_sg_offset = offset;
 913		error = msb_run_state_machine(msb, h_msb_read_page);
 914
 915
 916		if (error == -EUCLEAN) {
 917			pr_notice("correctable error on pba %d, page %d",
 918				pba, page);
 919			error = 0;
 920		}
 921
 922		if (!error && extra)
 923			*extra = msb->regs.extra_data;
 924
 925		if (!error || msb_reset(msb, true))
 926			break;
 927
 928	}
 929
 930	/* Mark bad pages */
 931	if (error == -EBADMSG) {
 932		pr_err("uncorrectable error on read of pba %d, page %d",
 933			pba, page);
 934
 935		if (msb->regs.extra_data.overwrite_flag &
 936					MEMSTICK_OVERWRITE_PGST0)
 937			msb_mark_page_bad(msb, pba, page);
 938		return -EBADMSG;
 939	}
 940
 941	if (error)
 942		pr_err("read of pba %d, page %d failed with error %d",
 943			pba, page, error);
 944	return error;
 945}
 946
 947/* Reads oob of page only */
 948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
 949	struct ms_extra_data_register *extra)
 950{
 951	int error;
 952
 953	BUG_ON(!extra);
 954	msb->regs.param.block_address = cpu_to_be16(pba);
 955	msb->regs.param.page_address = page;
 956	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
 957
 958	if (pba > msb->block_count) {
 959		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
 960		return -EINVAL;
 961	}
 962
 963	error = msb_run_state_machine(msb, h_msb_read_page);
 964	*extra = msb->regs.extra_data;
 965
 966	if (error == -EUCLEAN) {
 967		pr_notice("correctable error on pba %d, page %d",
 968			pba, page);
 969		return 0;
 970	}
 971
 972	return error;
 973}
 974
 975/* Reads a block and compares it with data contained in scatterlist orig_sg */
 976static int msb_verify_block(struct msb_data *msb, u16 pba,
 977				struct scatterlist *orig_sg,  int offset)
 978{
 979	struct scatterlist sg;
 980	int page = 0, error;
 981
 982	sg_init_one(&sg, msb->block_buffer, msb->block_size);
 983
 984	while (page < msb->pages_in_block) {
 985
 986		error = msb_read_page(msb, pba, page,
 987				NULL, &sg, page * msb->page_size);
 988		if (error)
 989			return error;
 990		page++;
 991	}
 992
 993	if (msb_sg_compare_to_buffer(orig_sg, offset,
 994				msb->block_buffer, msb->block_size))
 995		return -EIO;
 996	return 0;
 997}
 998
 999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001			u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003	int error, current_try = 1;
1004
1005	BUG_ON(sg->length < msb->page_size);
1006
1007	if (msb->read_only)
1008		return -EROFS;
1009
1010	if (pba == MS_BLOCK_INVALID) {
1011		pr_err(
1012			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1013		return -EINVAL;
1014	}
1015
1016	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1017		pr_err(
1018		"BUG: write: attempt to write beyond the end of device");
1019		return -EINVAL;
1020	}
1021
1022	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023		pr_err("BUG: write: lba zone mismatch");
1024		return -EINVAL;
1025	}
1026
1027	if (pba == msb->boot_block_locations[0] ||
1028		pba == msb->boot_block_locations[1]) {
1029		pr_err("BUG: write: attempt to write to boot blocks!");
1030		return -EINVAL;
1031	}
1032
1033	while (1) {
1034
1035		if (msb->read_only)
1036			return -EROFS;
1037
1038		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039		msb->regs.param.page_address = 0;
1040		msb->regs.param.block_address = cpu_to_be16(pba);
1041
1042		msb->regs.extra_data.management_flag = 0xFF;
1043		msb->regs.extra_data.overwrite_flag = 0xF8;
1044		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045
1046		msb->current_sg = sg;
1047		msb->current_sg_offset = offset;
1048		msb->current_page = 0;
1049
1050		error = msb_run_state_machine(msb, h_msb_write_block);
1051
1052		/* Sector we just wrote to is assumed erased since its pba
1053		 * was erased. If it wasn't erased, write will succeed
1054		 * and will just clear the bits that were set in the block
1055		 * thus test that what we have written,
1056		 * matches what we expect.
1057		 * We do trust the blocks that we erased
1058		 */
1059		if (!error && (verify_writes ||
1060				!test_bit(pba, msb->erased_blocks_bitmap)))
1061			error = msb_verify_block(msb, pba, sg, offset);
1062
1063		if (!error)
1064			break;
1065
1066		if (current_try > 1 || msb_reset(msb, true))
1067			break;
1068
1069		pr_err("write failed, trying to erase the pba %d", pba);
1070		error = msb_erase_block(msb, pba);
1071		if (error)
1072			break;
1073
1074		current_try++;
1075	}
1076	return error;
1077}
1078
1079/* Finds a free block for write replacement */
1080static u16 msb_get_free_block(struct msb_data *msb, int zone)
1081{
1082	u16 pos;
1083	int pba = zone * MS_BLOCKS_IN_ZONE;
1084	int i;
1085
1086	get_random_bytes(&pos, sizeof(pos));
1087
1088	if (!msb->free_block_count[zone]) {
1089		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090		msb->read_only = true;
1091		return MS_BLOCK_INVALID;
1092	}
1093
1094	pos %= msb->free_block_count[zone];
1095
1096	dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097		msb->free_block_count[zone], pos);
1098
1099	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100							msb->block_count, pba);
1101	for (i = 0; i < pos; ++i)
1102		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103						msb->block_count, pba + 1);
1104
1105	dbg_verbose("result of the free blocks scan: pba %d", pba);
1106
1107	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108		pr_err("BUG: cant get a free block");
1109		msb->read_only = true;
1110		return MS_BLOCK_INVALID;
1111	}
1112
1113	msb_mark_block_used(msb, pba);
1114	return pba;
1115}
1116
1117static int msb_update_block(struct msb_data *msb, u16 lba,
1118	struct scatterlist *sg, int offset)
1119{
1120	u16 pba, new_pba;
1121	int error, try;
1122
1123	pba = msb->lba_to_pba_table[lba];
1124	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1125
1126	if (pba != MS_BLOCK_INVALID) {
1127		dbg_verbose("setting the update flag on the block");
1128		msb_set_overwrite_flag(msb, pba, 0,
1129				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1130	}
1131
1132	for (try = 0; try < 3; try++) {
1133		new_pba = msb_get_free_block(msb,
1134			msb_get_zone_from_lba(lba));
1135
1136		if (new_pba == MS_BLOCK_INVALID) {
1137			error = -EIO;
1138			goto out;
1139		}
1140
1141		dbg_verbose("block update: writing updated block to the pba %d",
1142								new_pba);
1143		error = msb_write_block(msb, new_pba, lba, sg, offset);
1144		if (error == -EBADMSG) {
1145			msb_mark_bad(msb, new_pba);
1146			continue;
1147		}
1148
1149		if (error)
1150			goto out;
1151
1152		dbg_verbose("block update: erasing the old block");
1153		msb_erase_block(msb, pba);
1154		msb->lba_to_pba_table[lba] = new_pba;
1155		return 0;
1156	}
1157out:
1158	if (error) {
1159		pr_err("block update error after %d tries,  switching to r/o mode", try);
1160		msb->read_only = true;
1161	}
1162	return error;
1163}
1164
1165/* Converts endiannes in the boot block for easy use */
1166static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1167{
1168	p->header.block_id = be16_to_cpu(p->header.block_id);
1169	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170	p->entry.disabled_block.start_addr
1171		= be32_to_cpu(p->entry.disabled_block.start_addr);
1172	p->entry.disabled_block.data_size
1173		= be32_to_cpu(p->entry.disabled_block.data_size);
1174	p->entry.cis_idi.start_addr
1175		= be32_to_cpu(p->entry.cis_idi.start_addr);
1176	p->entry.cis_idi.data_size
1177		= be32_to_cpu(p->entry.cis_idi.data_size);
1178	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180	p->attr.number_of_effective_blocks
1181		= be16_to_cpu(p->attr.number_of_effective_blocks);
1182	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183	p->attr.memory_manufacturer_code
1184		= be16_to_cpu(p->attr.memory_manufacturer_code);
1185	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186	p->attr.implemented_capacity
1187		= be16_to_cpu(p->attr.implemented_capacity);
1188	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1190}
1191
1192static int msb_read_boot_blocks(struct msb_data *msb)
1193{
1194	int pba = 0;
1195	struct scatterlist sg;
1196	struct ms_extra_data_register extra;
1197	struct ms_boot_page *page;
1198
1199	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201	msb->boot_block_count = 0;
1202
1203	dbg_verbose("Start of a scan for the boot blocks");
1204
1205	if (!msb->boot_page) {
1206		page = kmalloc_array(2, sizeof(struct ms_boot_page),
1207				     GFP_KERNEL);
1208		if (!page)
1209			return -ENOMEM;
1210
1211		msb->boot_page = page;
1212	} else
1213		page = msb->boot_page;
1214
1215	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1216
1217	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1218
1219		sg_init_one(&sg, page, sizeof(*page));
1220		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1221			dbg("boot scan: can't read pba %d", pba);
1222			continue;
1223		}
1224
1225		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1226			dbg("management flag doesn't indicate boot block %d",
1227									pba);
1228			continue;
1229		}
1230
1231		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1232			dbg("the pba at %d doesn't contain boot block ID", pba);
1233			continue;
1234		}
1235
1236		msb_fix_boot_page_endianness(page);
1237		msb->boot_block_locations[msb->boot_block_count] = pba;
1238
1239		page++;
1240		msb->boot_block_count++;
1241
1242		if (msb->boot_block_count == 2)
1243			break;
1244	}
1245
1246	if (!msb->boot_block_count) {
1247		pr_err("media doesn't contain master page, aborting");
1248		return -EIO;
1249	}
1250
1251	dbg_verbose("End of scan for boot blocks");
1252	return 0;
1253}
1254
1255static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1256{
1257	struct ms_boot_page *boot_block;
1258	struct scatterlist sg;
1259	u16 *buffer = NULL;
1260	int offset = 0;
1261	int i, error = 0;
1262	int data_size, data_offset, page, page_offset, size_to_read;
1263	u16 pba;
1264
1265	BUG_ON(block_nr > 1);
1266	boot_block = &msb->boot_page[block_nr];
1267	pba = msb->boot_block_locations[block_nr];
1268
1269	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1270		return -EINVAL;
1271
1272	data_size = boot_block->entry.disabled_block.data_size;
1273	data_offset = sizeof(struct ms_boot_page) +
1274			boot_block->entry.disabled_block.start_addr;
1275	if (!data_size)
1276		return 0;
1277
1278	page = data_offset / msb->page_size;
1279	page_offset = data_offset % msb->page_size;
1280	size_to_read =
1281		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1282			msb->page_size;
1283
1284	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1285		pba, data_offset, data_size);
1286
1287	buffer = kzalloc(size_to_read, GFP_KERNEL);
1288	if (!buffer)
1289		return -ENOMEM;
1290
1291	/* Read the buffer */
1292	sg_init_one(&sg, buffer, size_to_read);
1293
1294	while (offset < size_to_read) {
1295		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1296		if (error)
1297			goto out;
1298
1299		page++;
1300		offset += msb->page_size;
1301
1302		if (page == msb->pages_in_block) {
1303			pr_err(
1304			"bad block table extends beyond the boot block");
1305			break;
1306		}
1307	}
1308
1309	/* Process the bad block table */
1310	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1311
1312		u16 bad_block = be16_to_cpu(buffer[i]);
1313
1314		if (bad_block >= msb->block_count) {
1315			dbg("bad block table contains invalid block %d",
1316								bad_block);
1317			continue;
1318		}
1319
1320		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1321			dbg("duplicate bad block %d in the table",
1322				bad_block);
1323			continue;
1324		}
1325
1326		dbg("block %d is marked as factory bad", bad_block);
1327		msb_mark_block_used(msb, bad_block);
1328	}
1329out:
1330	kfree(buffer);
1331	return error;
1332}
1333
1334static int msb_ftl_initialize(struct msb_data *msb)
1335{
1336	int i;
1337
1338	if (msb->ftl_initialized)
1339		return 0;
1340
1341	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1342	msb->logical_block_count = msb->zone_count * 496 - 2;
1343
1344	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1345	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1346	msb->lba_to_pba_table =
1347		kmalloc_array(msb->logical_block_count, sizeof(u16),
1348			      GFP_KERNEL);
1349
1350	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1351						!msb->erased_blocks_bitmap) {
1352		kfree(msb->used_blocks_bitmap);
 
1353		kfree(msb->lba_to_pba_table);
1354		kfree(msb->erased_blocks_bitmap);
1355		return -ENOMEM;
1356	}
1357
1358	for (i = 0; i < msb->zone_count; i++)
1359		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1360
1361	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1362			msb->logical_block_count * sizeof(u16));
1363
1364	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1365		msb->zone_count, msb->logical_block_count);
1366
1367	msb->ftl_initialized = true;
1368	return 0;
1369}
1370
1371static int msb_ftl_scan(struct msb_data *msb)
1372{
1373	u16 pba, lba, other_block;
1374	u8 overwrite_flag, management_flag, other_overwrite_flag;
1375	int error;
1376	struct ms_extra_data_register extra;
1377	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1378
1379	if (!overwrite_flags)
1380		return -ENOMEM;
1381
1382	dbg("Start of media scanning");
1383	for (pba = 0; pba < msb->block_count; pba++) {
1384
1385		if (pba == msb->boot_block_locations[0] ||
1386			pba == msb->boot_block_locations[1]) {
1387			dbg_verbose("pba %05d -> [boot block]", pba);
1388			msb_mark_block_used(msb, pba);
1389			continue;
1390		}
1391
1392		if (test_bit(pba, msb->used_blocks_bitmap)) {
1393			dbg_verbose("pba %05d -> [factory bad]", pba);
1394			continue;
1395		}
1396
1397		memset(&extra, 0, sizeof(extra));
1398		error = msb_read_oob(msb, pba, 0, &extra);
1399
1400		/* can't trust the page if we can't read the oob */
1401		if (error == -EBADMSG) {
1402			pr_notice(
1403			"oob of pba %d damaged, will try to erase it", pba);
1404			msb_mark_block_used(msb, pba);
1405			msb_erase_block(msb, pba);
1406			continue;
1407		} else if (error) {
1408			pr_err("unknown error %d on read of oob of pba %d - aborting",
1409				error, pba);
1410
1411			kfree(overwrite_flags);
1412			return error;
1413		}
1414
1415		lba = be16_to_cpu(extra.logical_address);
1416		management_flag = extra.management_flag;
1417		overwrite_flag = extra.overwrite_flag;
1418		overwrite_flags[pba] = overwrite_flag;
1419
1420		/* Skip bad blocks */
1421		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1422			dbg("pba %05d -> [BAD]", pba);
1423			msb_mark_block_used(msb, pba);
1424			continue;
1425		}
1426
1427		/* Skip system/drm blocks */
1428		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1429			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1430			dbg("pba %05d -> [reserved management flag %02x]",
1431							pba, management_flag);
1432			msb_mark_block_used(msb, pba);
1433			continue;
1434		}
1435
1436		/* Erase temporary tables */
1437		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1438			dbg("pba %05d -> [temp table] - will erase", pba);
1439
1440			msb_mark_block_used(msb, pba);
1441			msb_erase_block(msb, pba);
1442			continue;
1443		}
1444
1445		if (lba == MS_BLOCK_INVALID) {
1446			dbg_verbose("pba %05d -> [free]", pba);
1447			continue;
1448		}
1449
1450		msb_mark_block_used(msb, pba);
1451
1452		/* Block has LBA not according to zoning*/
1453		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1454			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1455								pba, lba);
1456			msb_erase_block(msb, pba);
1457			continue;
1458		}
1459
1460		/* No collisions - great */
1461		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1462			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1463			msb->lba_to_pba_table[lba] = pba;
1464			continue;
1465		}
1466
1467		other_block = msb->lba_to_pba_table[lba];
1468		other_overwrite_flag = overwrite_flags[other_block];
1469
1470		pr_notice("Collision between pba %d and pba %d",
1471			pba, other_block);
1472
1473		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1474			pr_notice("pba %d is marked as stable, use it", pba);
1475			msb_erase_block(msb, other_block);
1476			msb->lba_to_pba_table[lba] = pba;
1477			continue;
1478		}
1479
1480		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1481			pr_notice("pba %d is marked as stable, use it",
1482								other_block);
1483			msb_erase_block(msb, pba);
1484			continue;
1485		}
1486
1487		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1488				pba, other_block, other_block);
1489
1490		msb_erase_block(msb, other_block);
1491		msb->lba_to_pba_table[lba] = pba;
1492	}
1493
1494	dbg("End of media scanning");
1495	kfree(overwrite_flags);
1496	return 0;
1497}
1498
1499static void msb_cache_flush_timer(struct timer_list *t)
1500{
1501	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1502
1503	msb->need_flush_cache = true;
1504	queue_work(msb->io_queue, &msb->io_work);
1505}
1506
1507
1508static void msb_cache_discard(struct msb_data *msb)
1509{
1510	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1511		return;
1512
1513	del_timer_sync(&msb->cache_flush_timer);
1514
1515	dbg_verbose("Discarding the write cache");
1516	msb->cache_block_lba = MS_BLOCK_INVALID;
1517	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1518}
1519
1520static int msb_cache_init(struct msb_data *msb)
1521{
1522	timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1523
1524	if (!msb->cache)
1525		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1526	if (!msb->cache)
1527		return -ENOMEM;
1528
1529	msb_cache_discard(msb);
1530	return 0;
1531}
1532
1533static int msb_cache_flush(struct msb_data *msb)
1534{
1535	struct scatterlist sg;
1536	struct ms_extra_data_register extra;
1537	int page, offset, error;
1538	u16 pba, lba;
1539
1540	if (msb->read_only)
1541		return -EROFS;
1542
1543	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1544		return 0;
1545
1546	lba = msb->cache_block_lba;
1547	pba = msb->lba_to_pba_table[lba];
1548
1549	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550						pba, msb->cache_block_lba);
1551
1552	sg_init_one(&sg, msb->cache , msb->block_size);
1553
1554	/* Read all missing pages in cache */
1555	for (page = 0; page < msb->pages_in_block; page++) {
1556
1557		if (test_bit(page, &msb->valid_cache_bitmap))
1558			continue;
1559
1560		offset = page * msb->page_size;
1561
1562		dbg_verbose("reading non-present sector %d of cache block %d",
1563			page, lba);
1564		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1565
1566		/* Bad pages are copied with 00 page status */
1567		if (error == -EBADMSG) {
1568			pr_err("read error on sector %d, contents probably damaged", page);
1569			continue;
1570		}
1571
1572		if (error)
1573			return error;
1574
1575		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1576							MEMSTICK_OV_PG_NORMAL) {
1577			dbg("page %d is marked as bad", page);
1578			continue;
1579		}
1580
1581		set_bit(page, &msb->valid_cache_bitmap);
1582	}
1583
1584	/* Write the cache now */
1585	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1586	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1587
1588	/* Mark invalid pages */
1589	if (!error) {
1590		for (page = 0; page < msb->pages_in_block; page++) {
1591
1592			if (test_bit(page, &msb->valid_cache_bitmap))
1593				continue;
1594
1595			dbg("marking page %d as containing damaged data",
1596				page);
1597			msb_set_overwrite_flag(msb,
1598				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1599		}
1600	}
1601
1602	msb_cache_discard(msb);
1603	return error;
1604}
1605
1606static int msb_cache_write(struct msb_data *msb, int lba,
1607	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1608{
1609	int error;
1610	struct scatterlist sg_tmp[10];
1611
1612	if (msb->read_only)
1613		return -EROFS;
1614
1615	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1616						lba != msb->cache_block_lba)
1617		if (add_to_cache_only)
1618			return 0;
1619
1620	/* If we need to write different block */
1621	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1622						lba != msb->cache_block_lba) {
1623		dbg_verbose("first flush the cache");
1624		error = msb_cache_flush(msb);
1625		if (error)
1626			return error;
1627	}
1628
1629	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1630		msb->cache_block_lba  = lba;
1631		mod_timer(&msb->cache_flush_timer,
1632			jiffies + msecs_to_jiffies(cache_flush_timeout));
1633	}
1634
1635	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1636
1637	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1638	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1639
1640	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1641		msb->cache + page * msb->page_size, msb->page_size);
1642
1643	set_bit(page, &msb->valid_cache_bitmap);
1644	return 0;
1645}
1646
1647static int msb_cache_read(struct msb_data *msb, int lba,
1648				int page, struct scatterlist *sg, int offset)
1649{
1650	int pba = msb->lba_to_pba_table[lba];
1651	struct scatterlist sg_tmp[10];
1652	int error = 0;
1653
1654	if (lba == msb->cache_block_lba &&
1655			test_bit(page, &msb->valid_cache_bitmap)) {
1656
1657		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1658							lba, pba, page);
1659
1660		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1661		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1662			offset, msb->page_size);
1663		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664			msb->cache + msb->page_size * page,
1665							msb->page_size);
1666	} else {
1667		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1668							lba, pba, page);
1669
1670		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1671		if (error)
1672			return error;
1673
1674		msb_cache_write(msb, lba, page, true, sg, offset);
1675	}
1676	return error;
1677}
1678
1679/* Emulated geometry table
1680 * This table content isn't that importaint,
1681 * One could put here different values, providing that they still
1682 * cover whole disk.
1683 * 64 MB entry is what windows reports for my 64M memstick
1684 */
1685
1686static const struct chs_entry chs_table[] = {
1687/*        size sectors cylynders  heads */
1688	{ 4,    16,    247,       2  },
1689	{ 8,    16,    495,       2  },
1690	{ 16,   16,    495,       4  },
1691	{ 32,   16,    991,       4  },
1692	{ 64,   16,    991,       8  },
1693	{128,   16,    991,       16 },
1694	{ 0 }
1695};
1696
1697/* Load information about the card */
1698static int msb_init_card(struct memstick_dev *card)
1699{
1700	struct msb_data *msb = memstick_get_drvdata(card);
1701	struct memstick_host *host = card->host;
1702	struct ms_boot_page *boot_block;
1703	int error = 0, i, raw_size_in_megs;
1704
1705	msb->caps = 0;
1706
1707	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1708				card->id.class <= MEMSTICK_CLASS_ROM)
1709		msb->read_only = true;
1710
1711	msb->state = -1;
1712	error = msb_reset(msb, false);
1713	if (error)
1714		return error;
1715
1716	/* Due to a bug in Jmicron driver written by Alex Dubov,
1717	 * its serial mode barely works,
1718	 * so we switch to parallel mode right away
1719	 */
1720	if (host->caps & MEMSTICK_CAP_PAR4)
1721		msb_switch_to_parallel(msb);
1722
1723	msb->page_size = sizeof(struct ms_boot_page);
1724
1725	/* Read the boot page */
1726	error = msb_read_boot_blocks(msb);
1727	if (error)
1728		return -EIO;
1729
1730	boot_block = &msb->boot_page[0];
1731
1732	/* Save intersting attributes from boot page */
1733	msb->block_count = boot_block->attr.number_of_blocks;
1734	msb->page_size = boot_block->attr.page_size;
1735
1736	msb->pages_in_block = boot_block->attr.block_size * 2;
1737	msb->block_size = msb->page_size * msb->pages_in_block;
1738
1739	if (msb->page_size > PAGE_SIZE) {
1740		/* this isn't supported by linux at all, anyway*/
1741		dbg("device page %d size isn't supported", msb->page_size);
1742		return -EINVAL;
1743	}
1744
1745	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1746	if (!msb->block_buffer)
1747		return -ENOMEM;
1748
1749	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1750
1751	for (i = 0; chs_table[i].size; i++) {
1752
1753		if (chs_table[i].size != raw_size_in_megs)
1754			continue;
1755
1756		msb->geometry.cylinders = chs_table[i].cyl;
1757		msb->geometry.heads = chs_table[i].head;
1758		msb->geometry.sectors = chs_table[i].sec;
1759		break;
1760	}
1761
1762	if (boot_block->attr.transfer_supporting == 1)
1763		msb->caps |= MEMSTICK_CAP_PAR4;
1764
1765	if (boot_block->attr.device_type & 0x03)
1766		msb->read_only = true;
1767
1768	dbg("Total block count = %d", msb->block_count);
1769	dbg("Each block consists of %d pages", msb->pages_in_block);
1770	dbg("Page size = %d bytes", msb->page_size);
1771	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1772	dbg("Read only: %d", msb->read_only);
1773
1774#if 0
1775	/* Now we can switch the interface */
1776	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1777		msb_switch_to_parallel(msb);
1778#endif
1779
1780	error = msb_cache_init(msb);
1781	if (error)
1782		return error;
1783
1784	error = msb_ftl_initialize(msb);
1785	if (error)
1786		return error;
1787
1788
1789	/* Read the bad block table */
1790	error = msb_read_bad_block_table(msb, 0);
1791
1792	if (error && error != -ENOMEM) {
1793		dbg("failed to read bad block table from primary boot block, trying from backup");
1794		error = msb_read_bad_block_table(msb, 1);
1795	}
1796
1797	if (error)
1798		return error;
1799
1800	/* *drum roll* Scan the media */
1801	error = msb_ftl_scan(msb);
1802	if (error) {
1803		pr_err("Scan of media failed");
1804		return error;
1805	}
1806
1807	return 0;
1808
1809}
1810
1811static int msb_do_write_request(struct msb_data *msb, int lba,
1812	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1813{
1814	int error = 0;
1815	off_t offset = 0;
1816	*sucessfuly_written = 0;
1817
1818	while (offset < len) {
1819		if (page == 0 && len - offset >= msb->block_size) {
1820
1821			if (msb->cache_block_lba == lba)
1822				msb_cache_discard(msb);
1823
1824			dbg_verbose("Writing whole lba %d", lba);
1825			error = msb_update_block(msb, lba, sg, offset);
1826			if (error)
1827				return error;
1828
1829			offset += msb->block_size;
1830			*sucessfuly_written += msb->block_size;
1831			lba++;
1832			continue;
1833		}
1834
1835		error = msb_cache_write(msb, lba, page, false, sg, offset);
1836		if (error)
1837			return error;
1838
1839		offset += msb->page_size;
1840		*sucessfuly_written += msb->page_size;
1841
1842		page++;
1843		if (page == msb->pages_in_block) {
1844			page = 0;
1845			lba++;
1846		}
1847	}
1848	return 0;
1849}
1850
1851static int msb_do_read_request(struct msb_data *msb, int lba,
1852		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1853{
1854	int error = 0;
1855	int offset = 0;
1856	*sucessfuly_read = 0;
1857
1858	while (offset < len) {
1859
1860		error = msb_cache_read(msb, lba, page, sg, offset);
1861		if (error)
1862			return error;
1863
1864		offset += msb->page_size;
1865		*sucessfuly_read += msb->page_size;
1866
1867		page++;
1868		if (page == msb->pages_in_block) {
1869			page = 0;
1870			lba++;
1871		}
1872	}
1873	return 0;
1874}
1875
1876static void msb_io_work(struct work_struct *work)
1877{
1878	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1879	int page, error, len;
1880	sector_t lba;
1881	struct scatterlist *sg = msb->prealloc_sg;
1882	struct request *req;
1883
1884	dbg_verbose("IO: work started");
1885
1886	while (1) {
1887		spin_lock_irq(&msb->q_lock);
1888
1889		if (msb->need_flush_cache) {
1890			msb->need_flush_cache = false;
1891			spin_unlock_irq(&msb->q_lock);
1892			msb_cache_flush(msb);
1893			continue;
1894		}
1895
1896		req = msb->req;
1897		if (!req) {
1898			dbg_verbose("IO: no more requests exiting");
1899			spin_unlock_irq(&msb->q_lock);
1900			return;
1901		}
1902
1903		spin_unlock_irq(&msb->q_lock);
1904
1905		/* process the request */
1906		dbg_verbose("IO: processing new request");
1907		blk_rq_map_sg(msb->queue, req, sg);
1908
1909		lba = blk_rq_pos(req);
1910
1911		sector_div(lba, msb->page_size / 512);
1912		page = sector_div(lba, msb->pages_in_block);
1913
1914		if (rq_data_dir(msb->req) == READ)
1915			error = msb_do_read_request(msb, lba, page, sg,
1916				blk_rq_bytes(req), &len);
1917		else
1918			error = msb_do_write_request(msb, lba, page, sg,
1919				blk_rq_bytes(req), &len);
1920
1921		if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1922			__blk_mq_end_request(req, BLK_STS_OK);
1923			spin_lock_irq(&msb->q_lock);
1924			msb->req = NULL;
1925			spin_unlock_irq(&msb->q_lock);
1926		}
1927
1928		if (error && msb->req) {
1929			blk_status_t ret = errno_to_blk_status(error);
1930
1931			dbg_verbose("IO: ending one sector of the request with error");
1932			blk_mq_end_request(req, ret);
1933			spin_lock_irq(&msb->q_lock);
1934			msb->req = NULL;
1935			spin_unlock_irq(&msb->q_lock);
1936		}
1937
1938		if (msb->req)
1939			dbg_verbose("IO: request still pending");
1940	}
1941}
1942
1943static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1944static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1945
1946static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1947{
1948	struct gendisk *disk = bdev->bd_disk;
1949	struct msb_data *msb = disk->private_data;
1950
1951	dbg_verbose("block device open");
1952
1953	mutex_lock(&msb_disk_lock);
1954
1955	if (msb && msb->card)
1956		msb->usage_count++;
1957
1958	mutex_unlock(&msb_disk_lock);
1959	return 0;
1960}
1961
1962static void msb_data_clear(struct msb_data *msb)
1963{
1964	kfree(msb->boot_page);
1965	kfree(msb->used_blocks_bitmap);
 
1966	kfree(msb->lba_to_pba_table);
1967	kfree(msb->cache);
1968	msb->card = NULL;
1969}
1970
1971static int msb_disk_release(struct gendisk *disk)
 
 
 
 
 
 
 
 
1972{
1973	struct msb_data *msb = disk->private_data;
1974
1975	dbg_verbose("block device release");
1976	mutex_lock(&msb_disk_lock);
1977
1978	if (msb) {
1979		if (msb->usage_count)
1980			msb->usage_count--;
1981
1982		if (!msb->usage_count) {
1983			disk->private_data = NULL;
1984			idr_remove(&msb_disk_idr, msb->disk_id);
1985			put_disk(disk);
1986			kfree(msb);
1987		}
1988	}
1989	mutex_unlock(&msb_disk_lock);
1990	return 0;
1991}
1992
1993static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1994{
1995	msb_disk_release(disk);
1996}
1997
1998static int msb_bd_getgeo(struct block_device *bdev,
1999				 struct hd_geometry *geo)
2000{
2001	struct msb_data *msb = bdev->bd_disk->private_data;
2002	*geo = msb->geometry;
2003	return 0;
2004}
2005
2006static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
2007				 const struct blk_mq_queue_data *bd)
2008{
2009	struct memstick_dev *card = hctx->queue->queuedata;
2010	struct msb_data *msb = memstick_get_drvdata(card);
2011	struct request *req = bd->rq;
2012
2013	dbg_verbose("Submit request");
2014
2015	spin_lock_irq(&msb->q_lock);
2016
2017	if (msb->card_dead) {
2018		dbg("Refusing requests on removed card");
2019
2020		WARN_ON(!msb->io_queue_stopped);
2021
2022		spin_unlock_irq(&msb->q_lock);
2023		blk_mq_start_request(req);
2024		return BLK_STS_IOERR;
2025	}
2026
2027	if (msb->req) {
2028		spin_unlock_irq(&msb->q_lock);
2029		return BLK_STS_DEV_RESOURCE;
2030	}
2031
2032	blk_mq_start_request(req);
2033	msb->req = req;
2034
2035	if (!msb->io_queue_stopped)
2036		queue_work(msb->io_queue, &msb->io_work);
2037
2038	spin_unlock_irq(&msb->q_lock);
2039	return BLK_STS_OK;
2040}
2041
2042static int msb_check_card(struct memstick_dev *card)
2043{
2044	struct msb_data *msb = memstick_get_drvdata(card);
2045
2046	return (msb->card_dead == 0);
2047}
2048
2049static void msb_stop(struct memstick_dev *card)
2050{
2051	struct msb_data *msb = memstick_get_drvdata(card);
2052	unsigned long flags;
2053
2054	dbg("Stopping all msblock IO");
2055
2056	blk_mq_stop_hw_queues(msb->queue);
2057	spin_lock_irqsave(&msb->q_lock, flags);
2058	msb->io_queue_stopped = true;
2059	spin_unlock_irqrestore(&msb->q_lock, flags);
2060
2061	del_timer_sync(&msb->cache_flush_timer);
2062	flush_workqueue(msb->io_queue);
2063
2064	spin_lock_irqsave(&msb->q_lock, flags);
2065	if (msb->req) {
2066		blk_mq_requeue_request(msb->req, false);
2067		msb->req = NULL;
2068	}
2069	spin_unlock_irqrestore(&msb->q_lock, flags);
2070}
2071
2072static void msb_start(struct memstick_dev *card)
2073{
2074	struct msb_data *msb = memstick_get_drvdata(card);
2075	unsigned long flags;
2076
2077	dbg("Resuming IO from msblock");
2078
2079	msb_invalidate_reg_window(msb);
2080
2081	spin_lock_irqsave(&msb->q_lock, flags);
2082	if (!msb->io_queue_stopped || msb->card_dead) {
2083		spin_unlock_irqrestore(&msb->q_lock, flags);
2084		return;
2085	}
2086	spin_unlock_irqrestore(&msb->q_lock, flags);
2087
2088	/* Kick cache flush anyway, its harmless */
2089	msb->need_flush_cache = true;
2090	msb->io_queue_stopped = false;
2091
2092	blk_mq_start_hw_queues(msb->queue);
2093
2094	queue_work(msb->io_queue, &msb->io_work);
2095
2096}
2097
2098static const struct block_device_operations msb_bdops = {
2099	.open    = msb_bd_open,
2100	.release = msb_bd_release,
2101	.getgeo  = msb_bd_getgeo,
2102	.owner   = THIS_MODULE
2103};
2104
2105static const struct blk_mq_ops msb_mq_ops = {
2106	.queue_rq	= msb_queue_rq,
2107};
2108
2109/* Registers the block device */
2110static int msb_init_disk(struct memstick_dev *card)
2111{
2112	struct msb_data *msb = memstick_get_drvdata(card);
2113	int rc;
2114	unsigned long capacity;
2115
2116	mutex_lock(&msb_disk_lock);
2117	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2118	mutex_unlock(&msb_disk_lock);
2119
2120	if (msb->disk_id  < 0)
2121		return msb->disk_id;
2122
2123	rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
2124				     BLK_MQ_F_SHOULD_MERGE);
2125	if (rc)
2126		goto out_release_id;
2127
2128	msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
2129	if (IS_ERR(msb->disk)) {
2130		rc = PTR_ERR(msb->disk);
2131		goto out_free_tag_set;
2132	}
2133	msb->queue = msb->disk->queue;
2134
2135	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2136	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2137	blk_queue_max_segment_size(msb->queue,
2138				   MS_BLOCK_MAX_PAGES * msb->page_size);
2139	blk_queue_logical_block_size(msb->queue, msb->page_size);
2140
2141	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2142	msb->disk->fops = &msb_bdops;
2143	msb->disk->private_data = msb;
2144
2145	capacity = msb->pages_in_block * msb->logical_block_count;
2146	capacity *= (msb->page_size / 512);
2147	set_capacity(msb->disk, capacity);
2148	dbg("Set total disk size to %lu sectors", capacity);
2149
2150	msb->usage_count = 1;
2151	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
 
 
 
 
 
2152	INIT_WORK(&msb->io_work, msb_io_work);
2153	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2154
2155	if (msb->read_only)
2156		set_disk_ro(msb->disk, 1);
2157
2158	msb_start(card);
2159	device_add_disk(&card->dev, msb->disk, NULL);
 
 
2160	dbg("Disk added");
2161	return 0;
2162
 
 
 
 
2163out_free_tag_set:
2164	blk_mq_free_tag_set(&msb->tag_set);
2165out_release_id:
2166	mutex_lock(&msb_disk_lock);
2167	idr_remove(&msb_disk_idr, msb->disk_id);
2168	mutex_unlock(&msb_disk_lock);
2169	return rc;
2170}
2171
2172static int msb_probe(struct memstick_dev *card)
2173{
2174	struct msb_data *msb;
2175	int rc = 0;
2176
2177	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2178	if (!msb)
2179		return -ENOMEM;
2180	memstick_set_drvdata(card, msb);
2181	msb->card = card;
2182	spin_lock_init(&msb->q_lock);
2183
2184	rc = msb_init_card(card);
2185	if (rc)
2186		goto out_free;
2187
2188	rc = msb_init_disk(card);
2189	if (!rc) {
2190		card->check = msb_check_card;
2191		card->stop = msb_stop;
2192		card->start = msb_start;
2193		return 0;
2194	}
2195out_free:
2196	memstick_set_drvdata(card, NULL);
2197	msb_data_clear(msb);
2198	kfree(msb);
2199	return rc;
2200}
2201
2202static void msb_remove(struct memstick_dev *card)
2203{
2204	struct msb_data *msb = memstick_get_drvdata(card);
2205	unsigned long flags;
2206
2207	if (!msb->io_queue_stopped)
2208		msb_stop(card);
2209
2210	dbg("Removing the disk device");
2211
2212	/* Take care of unhandled + new requests from now on */
2213	spin_lock_irqsave(&msb->q_lock, flags);
2214	msb->card_dead = true;
2215	spin_unlock_irqrestore(&msb->q_lock, flags);
2216	blk_mq_start_hw_queues(msb->queue);
2217
2218	/* Remove the disk */
2219	del_gendisk(msb->disk);
2220	blk_cleanup_queue(msb->queue);
2221	blk_mq_free_tag_set(&msb->tag_set);
2222	msb->queue = NULL;
2223
2224	mutex_lock(&msb_disk_lock);
2225	msb_data_clear(msb);
2226	mutex_unlock(&msb_disk_lock);
2227
2228	msb_disk_release(msb->disk);
2229	memstick_set_drvdata(card, NULL);
2230}
2231
2232#ifdef CONFIG_PM
2233
2234static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2235{
2236	msb_stop(card);
2237	return 0;
2238}
2239
2240static int msb_resume(struct memstick_dev *card)
2241{
2242	struct msb_data *msb = memstick_get_drvdata(card);
2243	struct msb_data *new_msb = NULL;
2244	bool card_dead = true;
2245
2246#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2247	msb->card_dead = true;
2248	return 0;
2249#endif
2250	mutex_lock(&card->host->lock);
2251
2252	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2253	if (!new_msb)
2254		goto out;
2255
2256	new_msb->card = card;
2257	memstick_set_drvdata(card, new_msb);
2258	spin_lock_init(&new_msb->q_lock);
2259	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2260
2261	if (msb_init_card(card))
2262		goto out;
2263
2264	if (msb->block_size != new_msb->block_size)
2265		goto out;
2266
2267	if (memcmp(msb->boot_page, new_msb->boot_page,
2268					sizeof(struct ms_boot_page)))
2269		goto out;
2270
2271	if (msb->logical_block_count != new_msb->logical_block_count ||
2272		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2273						msb->logical_block_count))
2274		goto out;
2275
2276	if (msb->block_count != new_msb->block_count ||
2277		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2278							msb->block_count / 8))
2279		goto out;
2280
2281	card_dead = false;
2282out:
2283	if (card_dead)
2284		dbg("Card was removed/replaced during suspend");
2285
2286	msb->card_dead = card_dead;
2287	memstick_set_drvdata(card, msb);
2288
2289	if (new_msb) {
2290		msb_data_clear(new_msb);
2291		kfree(new_msb);
2292	}
2293
2294	msb_start(card);
2295	mutex_unlock(&card->host->lock);
2296	return 0;
2297}
2298#else
2299
2300#define msb_suspend NULL
2301#define msb_resume NULL
2302
2303#endif /* CONFIG_PM */
2304
2305static struct memstick_device_id msb_id_tbl[] = {
2306	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2307	 MEMSTICK_CLASS_FLASH},
2308
2309	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2310	 MEMSTICK_CLASS_ROM},
2311
2312	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2313	 MEMSTICK_CLASS_RO},
2314
2315	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2316	 MEMSTICK_CLASS_WP},
2317
2318	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2319	 MEMSTICK_CLASS_DUO},
2320	{}
2321};
2322MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2323
2324
2325static struct memstick_driver msb_driver = {
2326	.driver = {
2327		.name  = DRIVER_NAME,
2328		.owner = THIS_MODULE
2329	},
2330	.id_table = msb_id_tbl,
2331	.probe    = msb_probe,
2332	.remove   = msb_remove,
2333	.suspend  = msb_suspend,
2334	.resume   = msb_resume
2335};
2336
2337static int __init msb_init(void)
2338{
2339	int rc = memstick_register_driver(&msb_driver);
2340
2341	if (rc)
2342		pr_err("failed to register memstick driver (error %d)\n", rc);
2343
2344	return rc;
2345}
2346
2347static void __exit msb_exit(void)
2348{
2349	memstick_unregister_driver(&msb_driver);
2350	idr_destroy(&msb_disk_idr);
2351}
2352
2353module_init(msb_init);
2354module_exit(msb_exit);
2355
2356module_param(cache_flush_timeout, int, S_IRUGO);
2357MODULE_PARM_DESC(cache_flush_timeout,
2358				"Cache flush timeout in msec (1000 default)");
2359module_param(debug, int, S_IRUGO | S_IWUSR);
2360MODULE_PARM_DESC(debug, "Debug level (0-2)");
2361
2362module_param(verify_writes, bool, S_IRUGO);
2363MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2364
2365MODULE_LICENSE("GPL");
2366MODULE_AUTHOR("Maxim Levitsky");
2367MODULE_DESCRIPTION("Sony MemoryStick block device driver");