Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ms_block.c - Sony MemoryStick (legacy) storage support
4
5 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
6 *
7 * Minor portions of the driver were copied from mspro_block.c which is
8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
9 */
10#define DRIVER_NAME "ms_block"
11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
12
13#include <linux/module.h>
14#include <linux/blk-mq.h>
15#include <linux/memstick.h>
16#include <linux/idr.h>
17#include <linux/hdreg.h>
18#include <linux/delay.h>
19#include <linux/slab.h>
20#include <linux/random.h>
21#include <linux/bitmap.h>
22#include <linux/scatterlist.h>
23#include <linux/jiffies.h>
24#include <linux/workqueue.h>
25#include <linux/mutex.h>
26#include "ms_block.h"
27
28static int debug;
29static int cache_flush_timeout = 1000;
30static bool verify_writes;
31
32/*
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
35 */
36static size_t msb_sg_copy(struct scatterlist *sg_from,
37 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
38{
39 size_t copied = 0;
40
41 while (offset > 0) {
42 if (offset >= sg_from->length) {
43 if (sg_is_last(sg_from))
44 return 0;
45
46 offset -= sg_from->length;
47 sg_from = sg_next(sg_from);
48 continue;
49 }
50
51 copied = min(len, sg_from->length - offset);
52 sg_set_page(sg_to, sg_page(sg_from),
53 copied, sg_from->offset + offset);
54
55 len -= copied;
56 offset = 0;
57
58 if (sg_is_last(sg_from) || !len)
59 goto out;
60
61 sg_to = sg_next(sg_to);
62 to_nents--;
63 sg_from = sg_next(sg_from);
64 }
65
66 while (len > sg_from->length && to_nents--) {
67 len -= sg_from->length;
68 copied += sg_from->length;
69
70 sg_set_page(sg_to, sg_page(sg_from),
71 sg_from->length, sg_from->offset);
72
73 if (sg_is_last(sg_from) || !len)
74 goto out;
75
76 sg_from = sg_next(sg_from);
77 sg_to = sg_next(sg_to);
78 }
79
80 if (len && to_nents) {
81 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
82 copied += len;
83 }
84out:
85 sg_mark_end(sg_to);
86 return copied;
87}
88
89/*
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
93 */
94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
95 size_t offset, u8 *buffer, size_t len)
96{
97 int retval = 0, cmplen;
98 struct sg_mapping_iter miter;
99
100 sg_miter_start(&miter, sg, sg_nents(sg),
101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
102
103 while (sg_miter_next(&miter) && len > 0) {
104 if (offset >= miter.length) {
105 offset -= miter.length;
106 continue;
107 }
108
109 cmplen = min(miter.length - offset, len);
110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
111 if (retval)
112 break;
113
114 buffer += cmplen;
115 len -= cmplen;
116 offset = 0;
117 }
118
119 if (!retval && len)
120 retval = -1;
121
122 sg_miter_stop(&miter);
123 return retval;
124}
125
126
127/* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
132 */
133static int msb_get_zone_from_lba(int lba)
134{
135 if (lba < 494)
136 return 0;
137 return ((lba - 494) / 496) + 1;
138}
139
140/* Get zone of physical block. Trivial */
141static int msb_get_zone_from_pba(int pba)
142{
143 return pba / MS_BLOCKS_IN_ZONE;
144}
145
146/* Debug test to validate free block counts */
147static int msb_validate_used_block_bitmap(struct msb_data *msb)
148{
149 int total_free_blocks = 0;
150 int i;
151
152 if (!debug)
153 return 0;
154
155 for (i = 0; i < msb->zone_count; i++)
156 total_free_blocks += msb->free_block_count[i];
157
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
159 msb->block_count) == total_free_blocks)
160 return 0;
161
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb->read_only = true;
164 return -EINVAL;
165}
166
167/* Mark physical block as used */
168static void msb_mark_block_used(struct msb_data *msb, int pba)
169{
170 int zone = msb_get_zone_from_pba(pba);
171
172 if (test_bit(pba, msb->used_blocks_bitmap)) {
173 pr_err(
174 "BUG: attempt to mark already used pba %d as used", pba);
175 msb->read_only = true;
176 return;
177 }
178
179 if (msb_validate_used_block_bitmap(msb))
180 return;
181
182 /* No races because all IO is single threaded */
183 __set_bit(pba, msb->used_blocks_bitmap);
184 msb->free_block_count[zone]--;
185}
186
187/* Mark physical block as free */
188static void msb_mark_block_unused(struct msb_data *msb, int pba)
189{
190 int zone = msb_get_zone_from_pba(pba);
191
192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
194 msb->read_only = true;
195 return;
196 }
197
198 if (msb_validate_used_block_bitmap(msb))
199 return;
200
201 /* No races because all IO is single threaded */
202 __clear_bit(pba, msb->used_blocks_bitmap);
203 msb->free_block_count[zone]++;
204}
205
206/* Invalidate current register window */
207static void msb_invalidate_reg_window(struct msb_data *msb)
208{
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
213 msb->addr_valid = false;
214}
215
216/* Start a state machine */
217static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
218 (struct memstick_dev *card, struct memstick_request **req))
219{
220 struct memstick_dev *card = msb->card;
221
222 WARN_ON(msb->state != -1);
223 msb->int_polling = false;
224 msb->state = 0;
225 msb->exit_error = 0;
226
227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
228
229 card->next_request = state_func;
230 memstick_new_req(card->host);
231 wait_for_completion(&card->mrq_complete);
232
233 WARN_ON(msb->state != -1);
234 return msb->exit_error;
235}
236
237/* State machines call that to exit */
238static int msb_exit_state_machine(struct msb_data *msb, int error)
239{
240 WARN_ON(msb->state == -1);
241
242 msb->state = -1;
243 msb->exit_error = error;
244 msb->card->next_request = h_msb_default_bad;
245
246 /* Invalidate reg window on errors */
247 if (error)
248 msb_invalidate_reg_window(msb);
249
250 complete(&msb->card->mrq_complete);
251 return -ENXIO;
252}
253
254/* read INT register */
255static int msb_read_int_reg(struct msb_data *msb, long timeout)
256{
257 struct memstick_request *mrq = &msb->card->current_mrq;
258
259 WARN_ON(msb->state == -1);
260
261 if (!msb->int_polling) {
262 msb->int_timeout = jiffies +
263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
264 msb->int_polling = true;
265 } else if (time_after(jiffies, msb->int_timeout)) {
266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
267 return 0;
268 }
269
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
271 mrq->need_card_int && !mrq->error) {
272 mrq->data[0] = mrq->int_reg;
273 mrq->need_card_int = false;
274 return 0;
275 } else {
276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
277 return 1;
278 }
279}
280
281/* Read a register */
282static int msb_read_regs(struct msb_data *msb, int offset, int len)
283{
284 struct memstick_request *req = &msb->card->current_mrq;
285
286 if (msb->reg_addr.r_offset != offset ||
287 msb->reg_addr.r_length != len || !msb->addr_valid) {
288
289 msb->reg_addr.r_offset = offset;
290 msb->reg_addr.r_length = len;
291 msb->addr_valid = true;
292
293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
294 &msb->reg_addr, sizeof(msb->reg_addr));
295 return 0;
296 }
297
298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
299 return 1;
300}
301
302/* Write a card register */
303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
304{
305 struct memstick_request *req = &msb->card->current_mrq;
306
307 if (msb->reg_addr.w_offset != offset ||
308 msb->reg_addr.w_length != len || !msb->addr_valid) {
309
310 msb->reg_addr.w_offset = offset;
311 msb->reg_addr.w_length = len;
312 msb->addr_valid = true;
313
314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
315 &msb->reg_addr, sizeof(msb->reg_addr));
316 return 0;
317 }
318
319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
320 return 1;
321}
322
323/* Handler for absence of IO */
324static int h_msb_default_bad(struct memstick_dev *card,
325 struct memstick_request **mrq)
326{
327 return -ENXIO;
328}
329
330/*
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
334 */
335static int h_msb_read_page(struct memstick_dev *card,
336 struct memstick_request **out_mrq)
337{
338 struct msb_data *msb = memstick_get_drvdata(card);
339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
340 struct scatterlist sg[2];
341 u8 command, intreg;
342
343 if (mrq->error) {
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb, mrq->error);
346 }
347again:
348 switch (msb->state) {
349 case MSB_RP_SEND_BLOCK_ADDRESS:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 * the reg window, and thus it returns request for that.
352 * Then we stay in this state and retry
353 */
354 if (!msb_write_regs(msb,
355 offsetof(struct ms_register, param),
356 sizeof(struct ms_param_register),
357 (unsigned char *)&msb->regs.param))
358 return 0;
359
360 msb->state = MSB_RP_SEND_READ_COMMAND;
361 return 0;
362
363 case MSB_RP_SEND_READ_COMMAND:
364 command = MS_CMD_BLOCK_READ;
365 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
366 msb->state = MSB_RP_SEND_INT_REQ;
367 return 0;
368
369 case MSB_RP_SEND_INT_REQ:
370 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
371 /* If dont actually need to send the int read request (only in
372 * serial mode), then just fall through
373 */
374 if (msb_read_int_reg(msb, -1))
375 return 0;
376 fallthrough;
377
378 case MSB_RP_RECEIVE_INT_REQ_RESULT:
379 intreg = mrq->data[0];
380 msb->regs.status.interrupt = intreg;
381
382 if (intreg & MEMSTICK_INT_CMDNAK)
383 return msb_exit_state_machine(msb, -EIO);
384
385 if (!(intreg & MEMSTICK_INT_CED)) {
386 msb->state = MSB_RP_SEND_INT_REQ;
387 goto again;
388 }
389
390 msb->int_polling = false;
391 msb->state = (intreg & MEMSTICK_INT_ERR) ?
392 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
393 goto again;
394
395 case MSB_RP_SEND_READ_STATUS_REG:
396 /* read the status register to understand source of the INT_ERR */
397 if (!msb_read_regs(msb,
398 offsetof(struct ms_register, status),
399 sizeof(struct ms_status_register)))
400 return 0;
401
402 msb->state = MSB_RP_RECEIVE_STATUS_REG;
403 return 0;
404
405 case MSB_RP_RECEIVE_STATUS_REG:
406 msb->regs.status = *(struct ms_status_register *)mrq->data;
407 msb->state = MSB_RP_SEND_OOB_READ;
408 fallthrough;
409
410 case MSB_RP_SEND_OOB_READ:
411 if (!msb_read_regs(msb,
412 offsetof(struct ms_register, extra_data),
413 sizeof(struct ms_extra_data_register)))
414 return 0;
415
416 msb->state = MSB_RP_RECEIVE_OOB_READ;
417 return 0;
418
419 case MSB_RP_RECEIVE_OOB_READ:
420 msb->regs.extra_data =
421 *(struct ms_extra_data_register *) mrq->data;
422 msb->state = MSB_RP_SEND_READ_DATA;
423 fallthrough;
424
425 case MSB_RP_SEND_READ_DATA:
426 /* Skip that state if we only read the oob */
427 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
428 msb->state = MSB_RP_RECEIVE_READ_DATA;
429 goto again;
430 }
431
432 sg_init_table(sg, ARRAY_SIZE(sg));
433 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
434 msb->current_sg_offset,
435 msb->page_size);
436
437 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
438 msb->state = MSB_RP_RECEIVE_READ_DATA;
439 return 0;
440
441 case MSB_RP_RECEIVE_READ_DATA:
442 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
443 msb->current_sg_offset += msb->page_size;
444 return msb_exit_state_machine(msb, 0);
445 }
446
447 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
448 dbg("read_page: uncorrectable error");
449 return msb_exit_state_machine(msb, -EBADMSG);
450 }
451
452 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
453 dbg("read_page: correctable error");
454 msb->current_sg_offset += msb->page_size;
455 return msb_exit_state_machine(msb, -EUCLEAN);
456 } else {
457 dbg("read_page: INT error, but no status error bits");
458 return msb_exit_state_machine(msb, -EIO);
459 }
460 }
461
462 BUG();
463}
464
465/*
466 * Handler of writes of exactly one block.
467 * Takes address from msb->regs.param.
468 * Writes same extra data to blocks, also taken
469 * from msb->regs.extra
470 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
471 * device refuses to take the command or something else
472 */
473static int h_msb_write_block(struct memstick_dev *card,
474 struct memstick_request **out_mrq)
475{
476 struct msb_data *msb = memstick_get_drvdata(card);
477 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
478 struct scatterlist sg[2];
479 u8 intreg, command;
480
481 if (mrq->error)
482 return msb_exit_state_machine(msb, mrq->error);
483
484again:
485 switch (msb->state) {
486
487 /* HACK: Jmicon handling of TPCs between 8 and
488 * sizeof(memstick_request.data) is broken due to hardware
489 * bug in PIO mode that is used for these TPCs
490 * Therefore split the write
491 */
492
493 case MSB_WB_SEND_WRITE_PARAMS:
494 if (!msb_write_regs(msb,
495 offsetof(struct ms_register, param),
496 sizeof(struct ms_param_register),
497 &msb->regs.param))
498 return 0;
499
500 msb->state = MSB_WB_SEND_WRITE_OOB;
501 return 0;
502
503 case MSB_WB_SEND_WRITE_OOB:
504 if (!msb_write_regs(msb,
505 offsetof(struct ms_register, extra_data),
506 sizeof(struct ms_extra_data_register),
507 &msb->regs.extra_data))
508 return 0;
509 msb->state = MSB_WB_SEND_WRITE_COMMAND;
510 return 0;
511
512
513 case MSB_WB_SEND_WRITE_COMMAND:
514 command = MS_CMD_BLOCK_WRITE;
515 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
516 msb->state = MSB_WB_SEND_INT_REQ;
517 return 0;
518
519 case MSB_WB_SEND_INT_REQ:
520 msb->state = MSB_WB_RECEIVE_INT_REQ;
521 if (msb_read_int_reg(msb, -1))
522 return 0;
523 fallthrough;
524
525 case MSB_WB_RECEIVE_INT_REQ:
526 intreg = mrq->data[0];
527 msb->regs.status.interrupt = intreg;
528
529 /* errors mean out of here, and fast... */
530 if (intreg & (MEMSTICK_INT_CMDNAK))
531 return msb_exit_state_machine(msb, -EIO);
532
533 if (intreg & MEMSTICK_INT_ERR)
534 return msb_exit_state_machine(msb, -EBADMSG);
535
536
537 /* for last page we need to poll CED */
538 if (msb->current_page == msb->pages_in_block) {
539 if (intreg & MEMSTICK_INT_CED)
540 return msb_exit_state_machine(msb, 0);
541 msb->state = MSB_WB_SEND_INT_REQ;
542 goto again;
543
544 }
545
546 /* for non-last page we need BREQ before writing next chunk */
547 if (!(intreg & MEMSTICK_INT_BREQ)) {
548 msb->state = MSB_WB_SEND_INT_REQ;
549 goto again;
550 }
551
552 msb->int_polling = false;
553 msb->state = MSB_WB_SEND_WRITE_DATA;
554 fallthrough;
555
556 case MSB_WB_SEND_WRITE_DATA:
557 sg_init_table(sg, ARRAY_SIZE(sg));
558
559 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
560 msb->current_sg_offset,
561 msb->page_size) < msb->page_size)
562 return msb_exit_state_machine(msb, -EIO);
563
564 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
565 mrq->need_card_int = 1;
566 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
567 return 0;
568
569 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
570 msb->current_page++;
571 msb->current_sg_offset += msb->page_size;
572 msb->state = MSB_WB_SEND_INT_REQ;
573 goto again;
574 default:
575 BUG();
576 }
577
578 return 0;
579}
580
581/*
582 * This function is used to send simple IO requests to device that consist
583 * of register write + command
584 */
585static int h_msb_send_command(struct memstick_dev *card,
586 struct memstick_request **out_mrq)
587{
588 struct msb_data *msb = memstick_get_drvdata(card);
589 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
590 u8 intreg;
591
592 if (mrq->error) {
593 dbg("send_command: unknown error");
594 return msb_exit_state_machine(msb, mrq->error);
595 }
596again:
597 switch (msb->state) {
598
599 /* HACK: see h_msb_write_block */
600 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
601 if (!msb_write_regs(msb,
602 offsetof(struct ms_register, param),
603 sizeof(struct ms_param_register),
604 &msb->regs.param))
605 return 0;
606 msb->state = MSB_SC_SEND_WRITE_OOB;
607 return 0;
608
609 case MSB_SC_SEND_WRITE_OOB:
610 if (!msb->command_need_oob) {
611 msb->state = MSB_SC_SEND_COMMAND;
612 goto again;
613 }
614
615 if (!msb_write_regs(msb,
616 offsetof(struct ms_register, extra_data),
617 sizeof(struct ms_extra_data_register),
618 &msb->regs.extra_data))
619 return 0;
620
621 msb->state = MSB_SC_SEND_COMMAND;
622 return 0;
623
624 case MSB_SC_SEND_COMMAND:
625 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
626 msb->state = MSB_SC_SEND_INT_REQ;
627 return 0;
628
629 case MSB_SC_SEND_INT_REQ:
630 msb->state = MSB_SC_RECEIVE_INT_REQ;
631 if (msb_read_int_reg(msb, -1))
632 return 0;
633 fallthrough;
634
635 case MSB_SC_RECEIVE_INT_REQ:
636 intreg = mrq->data[0];
637
638 if (intreg & MEMSTICK_INT_CMDNAK)
639 return msb_exit_state_machine(msb, -EIO);
640 if (intreg & MEMSTICK_INT_ERR)
641 return msb_exit_state_machine(msb, -EBADMSG);
642
643 if (!(intreg & MEMSTICK_INT_CED)) {
644 msb->state = MSB_SC_SEND_INT_REQ;
645 goto again;
646 }
647
648 return msb_exit_state_machine(msb, 0);
649 }
650
651 BUG();
652}
653
654/* Small handler for card reset */
655static int h_msb_reset(struct memstick_dev *card,
656 struct memstick_request **out_mrq)
657{
658 u8 command = MS_CMD_RESET;
659 struct msb_data *msb = memstick_get_drvdata(card);
660 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
661
662 if (mrq->error)
663 return msb_exit_state_machine(msb, mrq->error);
664
665 switch (msb->state) {
666 case MSB_RS_SEND:
667 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
668 mrq->need_card_int = 0;
669 msb->state = MSB_RS_CONFIRM;
670 return 0;
671 case MSB_RS_CONFIRM:
672 return msb_exit_state_machine(msb, 0);
673 }
674 BUG();
675}
676
677/* This handler is used to do serial->parallel switch */
678static int h_msb_parallel_switch(struct memstick_dev *card,
679 struct memstick_request **out_mrq)
680{
681 struct msb_data *msb = memstick_get_drvdata(card);
682 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
683 struct memstick_host *host = card->host;
684
685 if (mrq->error) {
686 dbg("parallel_switch: error");
687 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
688 return msb_exit_state_machine(msb, mrq->error);
689 }
690
691 switch (msb->state) {
692 case MSB_PS_SEND_SWITCH_COMMAND:
693 /* Set the parallel interface on memstick side */
694 msb->regs.param.system |= MEMSTICK_SYS_PAM;
695
696 if (!msb_write_regs(msb,
697 offsetof(struct ms_register, param),
698 1,
699 (unsigned char *)&msb->regs.param))
700 return 0;
701
702 msb->state = MSB_PS_SWICH_HOST;
703 return 0;
704
705 case MSB_PS_SWICH_HOST:
706 /* Set parallel interface on our side + send a dummy request
707 * to see if card responds
708 */
709 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
710 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
711 msb->state = MSB_PS_CONFIRM;
712 return 0;
713
714 case MSB_PS_CONFIRM:
715 return msb_exit_state_machine(msb, 0);
716 }
717
718 BUG();
719}
720
721static int msb_switch_to_parallel(struct msb_data *msb);
722
723/* Reset the card, to guard against hw errors beeing treated as bad blocks */
724static int msb_reset(struct msb_data *msb, bool full)
725{
726
727 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
728 struct memstick_dev *card = msb->card;
729 struct memstick_host *host = card->host;
730 int error;
731
732 /* Reset the card */
733 msb->regs.param.system = MEMSTICK_SYS_BAMD;
734
735 if (full) {
736 error = host->set_param(host,
737 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
738 if (error)
739 goto out_error;
740
741 msb_invalidate_reg_window(msb);
742
743 error = host->set_param(host,
744 MEMSTICK_POWER, MEMSTICK_POWER_ON);
745 if (error)
746 goto out_error;
747
748 error = host->set_param(host,
749 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
750 if (error) {
751out_error:
752 dbg("Failed to reset the host controller");
753 msb->read_only = true;
754 return -EFAULT;
755 }
756 }
757
758 error = msb_run_state_machine(msb, h_msb_reset);
759 if (error) {
760 dbg("Failed to reset the card");
761 msb->read_only = true;
762 return -ENODEV;
763 }
764
765 /* Set parallel mode */
766 if (was_parallel)
767 msb_switch_to_parallel(msb);
768 return 0;
769}
770
771/* Attempts to switch interface to parallel mode */
772static int msb_switch_to_parallel(struct msb_data *msb)
773{
774 int error;
775
776 error = msb_run_state_machine(msb, h_msb_parallel_switch);
777 if (error) {
778 pr_err("Switch to parallel failed");
779 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
780 msb_reset(msb, true);
781 return -EFAULT;
782 }
783
784 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
785 return 0;
786}
787
788/* Changes overwrite flag on a page */
789static int msb_set_overwrite_flag(struct msb_data *msb,
790 u16 pba, u8 page, u8 flag)
791{
792 if (msb->read_only)
793 return -EROFS;
794
795 msb->regs.param.block_address = cpu_to_be16(pba);
796 msb->regs.param.page_address = page;
797 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
798 msb->regs.extra_data.overwrite_flag = flag;
799 msb->command_value = MS_CMD_BLOCK_WRITE;
800 msb->command_need_oob = true;
801
802 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
803 flag, pba, page);
804 return msb_run_state_machine(msb, h_msb_send_command);
805}
806
807static int msb_mark_bad(struct msb_data *msb, int pba)
808{
809 pr_notice("marking pba %d as bad", pba);
810 msb_reset(msb, true);
811 return msb_set_overwrite_flag(
812 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
813}
814
815static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
816{
817 dbg("marking page %d of pba %d as bad", page, pba);
818 msb_reset(msb, true);
819 return msb_set_overwrite_flag(msb,
820 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
821}
822
823/* Erases one physical block */
824static int msb_erase_block(struct msb_data *msb, u16 pba)
825{
826 int error, try;
827
828 if (msb->read_only)
829 return -EROFS;
830
831 dbg_verbose("erasing pba %d", pba);
832
833 for (try = 1; try < 3; try++) {
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
839
840
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
843 break;
844 }
845
846 if (error) {
847 pr_err("erase failed, marking pba %d as bad", pba);
848 msb_mark_bad(msb, pba);
849 }
850
851 dbg_verbose("erase success, marking pba %d as unused", pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
854 return error;
855}
856
857/* Reads one page from device */
858static int msb_read_page(struct msb_data *msb,
859 u16 pba, u8 page, struct ms_extra_data_register *extra,
860 struct scatterlist *sg, int offset)
861{
862 int try, error;
863
864 if (pba == MS_BLOCK_INVALID) {
865 unsigned long flags;
866 struct sg_mapping_iter miter;
867 size_t len = msb->page_size;
868
869 dbg_verbose("read unmapped sector. returning 0xFF");
870
871 local_irq_save(flags);
872 sg_miter_start(&miter, sg, sg_nents(sg),
873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
874
875 while (sg_miter_next(&miter) && len > 0) {
876
877 int chunklen;
878
879 if (offset && offset >= miter.length) {
880 offset -= miter.length;
881 continue;
882 }
883
884 chunklen = min(miter.length - offset, len);
885 memset(miter.addr + offset, 0xFF, chunklen);
886 len -= chunklen;
887 offset = 0;
888 }
889
890 sg_miter_stop(&miter);
891 local_irq_restore(flags);
892
893 if (offset)
894 return -EFAULT;
895
896 if (extra)
897 memset(extra, 0xFF, sizeof(*extra));
898 return 0;
899 }
900
901 if (pba >= msb->block_count) {
902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
903 return -EINVAL;
904 }
905
906 for (try = 1; try < 3; try++) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
910
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
914
915
916 if (error == -EUCLEAN) {
917 pr_notice("correctable error on pba %d, page %d",
918 pba, page);
919 error = 0;
920 }
921
922 if (!error && extra)
923 *extra = msb->regs.extra_data;
924
925 if (!error || msb_reset(msb, true))
926 break;
927
928 }
929
930 /* Mark bad pages */
931 if (error == -EBADMSG) {
932 pr_err("uncorrectable error on read of pba %d, page %d",
933 pba, page);
934
935 if (msb->regs.extra_data.overwrite_flag &
936 MEMSTICK_OVERWRITE_PGST0)
937 msb_mark_page_bad(msb, pba, page);
938 return -EBADMSG;
939 }
940
941 if (error)
942 pr_err("read of pba %d, page %d failed with error %d",
943 pba, page, error);
944 return error;
945}
946
947/* Reads oob of page only */
948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 struct ms_extra_data_register *extra)
950{
951 int error;
952
953 BUG_ON(!extra);
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
957
958 if (pba > msb->block_count) {
959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
960 return -EINVAL;
961 }
962
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
965
966 if (error == -EUCLEAN) {
967 pr_notice("correctable error on pba %d, page %d",
968 pba, page);
969 return 0;
970 }
971
972 return error;
973}
974
975/* Reads a block and compares it with data contained in scatterlist orig_sg */
976static int msb_verify_block(struct msb_data *msb, u16 pba,
977 struct scatterlist *orig_sg, int offset)
978{
979 struct scatterlist sg;
980 int page = 0, error;
981
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
983
984 while (page < msb->pages_in_block) {
985
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
988 if (error)
989 return error;
990 page++;
991 }
992
993 if (msb_sg_compare_to_buffer(orig_sg, offset,
994 msb->block_buffer, msb->block_size))
995 return -EIO;
996 return 0;
997}
998
999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003 int error, current_try = 1;
1004
1005 BUG_ON(sg->length < msb->page_size);
1006
1007 if (msb->read_only)
1008 return -EROFS;
1009
1010 if (pba == MS_BLOCK_INVALID) {
1011 pr_err(
1012 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1013 return -EINVAL;
1014 }
1015
1016 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1017 pr_err(
1018 "BUG: write: attempt to write beyond the end of device");
1019 return -EINVAL;
1020 }
1021
1022 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023 pr_err("BUG: write: lba zone mismatch");
1024 return -EINVAL;
1025 }
1026
1027 if (pba == msb->boot_block_locations[0] ||
1028 pba == msb->boot_block_locations[1]) {
1029 pr_err("BUG: write: attempt to write to boot blocks!");
1030 return -EINVAL;
1031 }
1032
1033 while (1) {
1034
1035 if (msb->read_only)
1036 return -EROFS;
1037
1038 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039 msb->regs.param.page_address = 0;
1040 msb->regs.param.block_address = cpu_to_be16(pba);
1041
1042 msb->regs.extra_data.management_flag = 0xFF;
1043 msb->regs.extra_data.overwrite_flag = 0xF8;
1044 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045
1046 msb->current_sg = sg;
1047 msb->current_sg_offset = offset;
1048 msb->current_page = 0;
1049
1050 error = msb_run_state_machine(msb, h_msb_write_block);
1051
1052 /* Sector we just wrote to is assumed erased since its pba
1053 * was erased. If it wasn't erased, write will succeed
1054 * and will just clear the bits that were set in the block
1055 * thus test that what we have written,
1056 * matches what we expect.
1057 * We do trust the blocks that we erased
1058 */
1059 if (!error && (verify_writes ||
1060 !test_bit(pba, msb->erased_blocks_bitmap)))
1061 error = msb_verify_block(msb, pba, sg, offset);
1062
1063 if (!error)
1064 break;
1065
1066 if (current_try > 1 || msb_reset(msb, true))
1067 break;
1068
1069 pr_err("write failed, trying to erase the pba %d", pba);
1070 error = msb_erase_block(msb, pba);
1071 if (error)
1072 break;
1073
1074 current_try++;
1075 }
1076 return error;
1077}
1078
1079/* Finds a free block for write replacement */
1080static u16 msb_get_free_block(struct msb_data *msb, int zone)
1081{
1082 u16 pos;
1083 int pba = zone * MS_BLOCKS_IN_ZONE;
1084 int i;
1085
1086 get_random_bytes(&pos, sizeof(pos));
1087
1088 if (!msb->free_block_count[zone]) {
1089 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090 msb->read_only = true;
1091 return MS_BLOCK_INVALID;
1092 }
1093
1094 pos %= msb->free_block_count[zone];
1095
1096 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097 msb->free_block_count[zone], pos);
1098
1099 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100 msb->block_count, pba);
1101 for (i = 0; i < pos; ++i)
1102 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103 msb->block_count, pba + 1);
1104
1105 dbg_verbose("result of the free blocks scan: pba %d", pba);
1106
1107 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108 pr_err("BUG: can't get a free block");
1109 msb->read_only = true;
1110 return MS_BLOCK_INVALID;
1111 }
1112
1113 msb_mark_block_used(msb, pba);
1114 return pba;
1115}
1116
1117static int msb_update_block(struct msb_data *msb, u16 lba,
1118 struct scatterlist *sg, int offset)
1119{
1120 u16 pba, new_pba;
1121 int error, try;
1122
1123 pba = msb->lba_to_pba_table[lba];
1124 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1125
1126 if (pba != MS_BLOCK_INVALID) {
1127 dbg_verbose("setting the update flag on the block");
1128 msb_set_overwrite_flag(msb, pba, 0,
1129 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1130 }
1131
1132 for (try = 0; try < 3; try++) {
1133 new_pba = msb_get_free_block(msb,
1134 msb_get_zone_from_lba(lba));
1135
1136 if (new_pba == MS_BLOCK_INVALID) {
1137 error = -EIO;
1138 goto out;
1139 }
1140
1141 dbg_verbose("block update: writing updated block to the pba %d",
1142 new_pba);
1143 error = msb_write_block(msb, new_pba, lba, sg, offset);
1144 if (error == -EBADMSG) {
1145 msb_mark_bad(msb, new_pba);
1146 continue;
1147 }
1148
1149 if (error)
1150 goto out;
1151
1152 dbg_verbose("block update: erasing the old block");
1153 msb_erase_block(msb, pba);
1154 msb->lba_to_pba_table[lba] = new_pba;
1155 return 0;
1156 }
1157out:
1158 if (error) {
1159 pr_err("block update error after %d tries, switching to r/o mode", try);
1160 msb->read_only = true;
1161 }
1162 return error;
1163}
1164
1165/* Converts endiannes in the boot block for easy use */
1166static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1167{
1168 p->header.block_id = be16_to_cpu(p->header.block_id);
1169 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170 p->entry.disabled_block.start_addr
1171 = be32_to_cpu(p->entry.disabled_block.start_addr);
1172 p->entry.disabled_block.data_size
1173 = be32_to_cpu(p->entry.disabled_block.data_size);
1174 p->entry.cis_idi.start_addr
1175 = be32_to_cpu(p->entry.cis_idi.start_addr);
1176 p->entry.cis_idi.data_size
1177 = be32_to_cpu(p->entry.cis_idi.data_size);
1178 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180 p->attr.number_of_effective_blocks
1181 = be16_to_cpu(p->attr.number_of_effective_blocks);
1182 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183 p->attr.memory_manufacturer_code
1184 = be16_to_cpu(p->attr.memory_manufacturer_code);
1185 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186 p->attr.implemented_capacity
1187 = be16_to_cpu(p->attr.implemented_capacity);
1188 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1190}
1191
1192static int msb_read_boot_blocks(struct msb_data *msb)
1193{
1194 int pba = 0;
1195 struct scatterlist sg;
1196 struct ms_extra_data_register extra;
1197 struct ms_boot_page *page;
1198
1199 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201 msb->boot_block_count = 0;
1202
1203 dbg_verbose("Start of a scan for the boot blocks");
1204
1205 if (!msb->boot_page) {
1206 page = kmalloc_array(2, sizeof(struct ms_boot_page),
1207 GFP_KERNEL);
1208 if (!page)
1209 return -ENOMEM;
1210
1211 msb->boot_page = page;
1212 } else
1213 page = msb->boot_page;
1214
1215 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1216
1217 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1218
1219 sg_init_one(&sg, page, sizeof(*page));
1220 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1221 dbg("boot scan: can't read pba %d", pba);
1222 continue;
1223 }
1224
1225 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1226 dbg("management flag doesn't indicate boot block %d",
1227 pba);
1228 continue;
1229 }
1230
1231 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1232 dbg("the pba at %d doesn't contain boot block ID", pba);
1233 continue;
1234 }
1235
1236 msb_fix_boot_page_endianness(page);
1237 msb->boot_block_locations[msb->boot_block_count] = pba;
1238
1239 page++;
1240 msb->boot_block_count++;
1241
1242 if (msb->boot_block_count == 2)
1243 break;
1244 }
1245
1246 if (!msb->boot_block_count) {
1247 pr_err("media doesn't contain master page, aborting");
1248 return -EIO;
1249 }
1250
1251 dbg_verbose("End of scan for boot blocks");
1252 return 0;
1253}
1254
1255static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1256{
1257 struct ms_boot_page *boot_block;
1258 struct scatterlist sg;
1259 u16 *buffer = NULL;
1260 int offset = 0;
1261 int i, error = 0;
1262 int data_size, data_offset, page, page_offset, size_to_read;
1263 u16 pba;
1264
1265 BUG_ON(block_nr > 1);
1266 boot_block = &msb->boot_page[block_nr];
1267 pba = msb->boot_block_locations[block_nr];
1268
1269 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1270 return -EINVAL;
1271
1272 data_size = boot_block->entry.disabled_block.data_size;
1273 data_offset = sizeof(struct ms_boot_page) +
1274 boot_block->entry.disabled_block.start_addr;
1275 if (!data_size)
1276 return 0;
1277
1278 page = data_offset / msb->page_size;
1279 page_offset = data_offset % msb->page_size;
1280 size_to_read =
1281 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1282 msb->page_size;
1283
1284 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1285 pba, data_offset, data_size);
1286
1287 buffer = kzalloc(size_to_read, GFP_KERNEL);
1288 if (!buffer)
1289 return -ENOMEM;
1290
1291 /* Read the buffer */
1292 sg_init_one(&sg, buffer, size_to_read);
1293
1294 while (offset < size_to_read) {
1295 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1296 if (error)
1297 goto out;
1298
1299 page++;
1300 offset += msb->page_size;
1301
1302 if (page == msb->pages_in_block) {
1303 pr_err(
1304 "bad block table extends beyond the boot block");
1305 break;
1306 }
1307 }
1308
1309 /* Process the bad block table */
1310 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1311
1312 u16 bad_block = be16_to_cpu(buffer[i]);
1313
1314 if (bad_block >= msb->block_count) {
1315 dbg("bad block table contains invalid block %d",
1316 bad_block);
1317 continue;
1318 }
1319
1320 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1321 dbg("duplicate bad block %d in the table",
1322 bad_block);
1323 continue;
1324 }
1325
1326 dbg("block %d is marked as factory bad", bad_block);
1327 msb_mark_block_used(msb, bad_block);
1328 }
1329out:
1330 kfree(buffer);
1331 return error;
1332}
1333
1334static int msb_ftl_initialize(struct msb_data *msb)
1335{
1336 int i;
1337
1338 if (msb->ftl_initialized)
1339 return 0;
1340
1341 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1342 msb->logical_block_count = msb->zone_count * 496 - 2;
1343
1344 msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1345 msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1346 msb->lba_to_pba_table =
1347 kmalloc_array(msb->logical_block_count, sizeof(u16),
1348 GFP_KERNEL);
1349
1350 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1351 !msb->erased_blocks_bitmap) {
1352 bitmap_free(msb->used_blocks_bitmap);
1353 bitmap_free(msb->erased_blocks_bitmap);
1354 kfree(msb->lba_to_pba_table);
1355 return -ENOMEM;
1356 }
1357
1358 for (i = 0; i < msb->zone_count; i++)
1359 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1360
1361 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1362 msb->logical_block_count * sizeof(u16));
1363
1364 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1365 msb->zone_count, msb->logical_block_count);
1366
1367 msb->ftl_initialized = true;
1368 return 0;
1369}
1370
1371static int msb_ftl_scan(struct msb_data *msb)
1372{
1373 u16 pba, lba, other_block;
1374 u8 overwrite_flag, management_flag, other_overwrite_flag;
1375 int error;
1376 struct ms_extra_data_register extra;
1377 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1378
1379 if (!overwrite_flags)
1380 return -ENOMEM;
1381
1382 dbg("Start of media scanning");
1383 for (pba = 0; pba < msb->block_count; pba++) {
1384
1385 if (pba == msb->boot_block_locations[0] ||
1386 pba == msb->boot_block_locations[1]) {
1387 dbg_verbose("pba %05d -> [boot block]", pba);
1388 msb_mark_block_used(msb, pba);
1389 continue;
1390 }
1391
1392 if (test_bit(pba, msb->used_blocks_bitmap)) {
1393 dbg_verbose("pba %05d -> [factory bad]", pba);
1394 continue;
1395 }
1396
1397 memset(&extra, 0, sizeof(extra));
1398 error = msb_read_oob(msb, pba, 0, &extra);
1399
1400 /* can't trust the page if we can't read the oob */
1401 if (error == -EBADMSG) {
1402 pr_notice(
1403 "oob of pba %d damaged, will try to erase it", pba);
1404 msb_mark_block_used(msb, pba);
1405 msb_erase_block(msb, pba);
1406 continue;
1407 } else if (error) {
1408 pr_err("unknown error %d on read of oob of pba %d - aborting",
1409 error, pba);
1410
1411 kfree(overwrite_flags);
1412 return error;
1413 }
1414
1415 lba = be16_to_cpu(extra.logical_address);
1416 management_flag = extra.management_flag;
1417 overwrite_flag = extra.overwrite_flag;
1418 overwrite_flags[pba] = overwrite_flag;
1419
1420 /* Skip bad blocks */
1421 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1422 dbg("pba %05d -> [BAD]", pba);
1423 msb_mark_block_used(msb, pba);
1424 continue;
1425 }
1426
1427 /* Skip system/drm blocks */
1428 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1429 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1430 dbg("pba %05d -> [reserved management flag %02x]",
1431 pba, management_flag);
1432 msb_mark_block_used(msb, pba);
1433 continue;
1434 }
1435
1436 /* Erase temporary tables */
1437 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1438 dbg("pba %05d -> [temp table] - will erase", pba);
1439
1440 msb_mark_block_used(msb, pba);
1441 msb_erase_block(msb, pba);
1442 continue;
1443 }
1444
1445 if (lba == MS_BLOCK_INVALID) {
1446 dbg_verbose("pba %05d -> [free]", pba);
1447 continue;
1448 }
1449
1450 msb_mark_block_used(msb, pba);
1451
1452 /* Block has LBA not according to zoning*/
1453 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1454 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1455 pba, lba);
1456 msb_erase_block(msb, pba);
1457 continue;
1458 }
1459
1460 /* No collisions - great */
1461 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1462 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1463 msb->lba_to_pba_table[lba] = pba;
1464 continue;
1465 }
1466
1467 other_block = msb->lba_to_pba_table[lba];
1468 other_overwrite_flag = overwrite_flags[other_block];
1469
1470 pr_notice("Collision between pba %d and pba %d",
1471 pba, other_block);
1472
1473 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1474 pr_notice("pba %d is marked as stable, use it", pba);
1475 msb_erase_block(msb, other_block);
1476 msb->lba_to_pba_table[lba] = pba;
1477 continue;
1478 }
1479
1480 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1481 pr_notice("pba %d is marked as stable, use it",
1482 other_block);
1483 msb_erase_block(msb, pba);
1484 continue;
1485 }
1486
1487 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1488 pba, other_block, other_block);
1489
1490 msb_erase_block(msb, other_block);
1491 msb->lba_to_pba_table[lba] = pba;
1492 }
1493
1494 dbg("End of media scanning");
1495 kfree(overwrite_flags);
1496 return 0;
1497}
1498
1499static void msb_cache_flush_timer(struct timer_list *t)
1500{
1501 struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1502
1503 msb->need_flush_cache = true;
1504 queue_work(msb->io_queue, &msb->io_work);
1505}
1506
1507
1508static void msb_cache_discard(struct msb_data *msb)
1509{
1510 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1511 return;
1512
1513 del_timer_sync(&msb->cache_flush_timer);
1514
1515 dbg_verbose("Discarding the write cache");
1516 msb->cache_block_lba = MS_BLOCK_INVALID;
1517 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1518}
1519
1520static int msb_cache_init(struct msb_data *msb)
1521{
1522 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1523
1524 if (!msb->cache)
1525 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1526 if (!msb->cache)
1527 return -ENOMEM;
1528
1529 msb_cache_discard(msb);
1530 return 0;
1531}
1532
1533static int msb_cache_flush(struct msb_data *msb)
1534{
1535 struct scatterlist sg;
1536 struct ms_extra_data_register extra;
1537 int page, offset, error;
1538 u16 pba, lba;
1539
1540 if (msb->read_only)
1541 return -EROFS;
1542
1543 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1544 return 0;
1545
1546 lba = msb->cache_block_lba;
1547 pba = msb->lba_to_pba_table[lba];
1548
1549 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550 pba, msb->cache_block_lba);
1551
1552 sg_init_one(&sg, msb->cache , msb->block_size);
1553
1554 /* Read all missing pages in cache */
1555 for (page = 0; page < msb->pages_in_block; page++) {
1556
1557 if (test_bit(page, &msb->valid_cache_bitmap))
1558 continue;
1559
1560 offset = page * msb->page_size;
1561
1562 dbg_verbose("reading non-present sector %d of cache block %d",
1563 page, lba);
1564 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1565
1566 /* Bad pages are copied with 00 page status */
1567 if (error == -EBADMSG) {
1568 pr_err("read error on sector %d, contents probably damaged", page);
1569 continue;
1570 }
1571
1572 if (error)
1573 return error;
1574
1575 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1576 MEMSTICK_OV_PG_NORMAL) {
1577 dbg("page %d is marked as bad", page);
1578 continue;
1579 }
1580
1581 set_bit(page, &msb->valid_cache_bitmap);
1582 }
1583
1584 /* Write the cache now */
1585 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1586 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1587
1588 /* Mark invalid pages */
1589 if (!error) {
1590 for (page = 0; page < msb->pages_in_block; page++) {
1591
1592 if (test_bit(page, &msb->valid_cache_bitmap))
1593 continue;
1594
1595 dbg("marking page %d as containing damaged data",
1596 page);
1597 msb_set_overwrite_flag(msb,
1598 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1599 }
1600 }
1601
1602 msb_cache_discard(msb);
1603 return error;
1604}
1605
1606static int msb_cache_write(struct msb_data *msb, int lba,
1607 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1608{
1609 int error;
1610 struct scatterlist sg_tmp[10];
1611
1612 if (msb->read_only)
1613 return -EROFS;
1614
1615 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1616 lba != msb->cache_block_lba)
1617 if (add_to_cache_only)
1618 return 0;
1619
1620 /* If we need to write different block */
1621 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1622 lba != msb->cache_block_lba) {
1623 dbg_verbose("first flush the cache");
1624 error = msb_cache_flush(msb);
1625 if (error)
1626 return error;
1627 }
1628
1629 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1630 msb->cache_block_lba = lba;
1631 mod_timer(&msb->cache_flush_timer,
1632 jiffies + msecs_to_jiffies(cache_flush_timeout));
1633 }
1634
1635 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1636
1637 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1638 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1639
1640 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1641 msb->cache + page * msb->page_size, msb->page_size);
1642
1643 set_bit(page, &msb->valid_cache_bitmap);
1644 return 0;
1645}
1646
1647static int msb_cache_read(struct msb_data *msb, int lba,
1648 int page, struct scatterlist *sg, int offset)
1649{
1650 int pba = msb->lba_to_pba_table[lba];
1651 struct scatterlist sg_tmp[10];
1652 int error = 0;
1653
1654 if (lba == msb->cache_block_lba &&
1655 test_bit(page, &msb->valid_cache_bitmap)) {
1656
1657 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1658 lba, pba, page);
1659
1660 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1661 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1662 offset, msb->page_size);
1663 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664 msb->cache + msb->page_size * page,
1665 msb->page_size);
1666 } else {
1667 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1668 lba, pba, page);
1669
1670 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1671 if (error)
1672 return error;
1673
1674 msb_cache_write(msb, lba, page, true, sg, offset);
1675 }
1676 return error;
1677}
1678
1679/* Emulated geometry table
1680 * This table content isn't that importaint,
1681 * One could put here different values, providing that they still
1682 * cover whole disk.
1683 * 64 MB entry is what windows reports for my 64M memstick
1684 */
1685
1686static const struct chs_entry chs_table[] = {
1687/* size sectors cylynders heads */
1688 { 4, 16, 247, 2 },
1689 { 8, 16, 495, 2 },
1690 { 16, 16, 495, 4 },
1691 { 32, 16, 991, 4 },
1692 { 64, 16, 991, 8 },
1693 {128, 16, 991, 16 },
1694 { 0 }
1695};
1696
1697/* Load information about the card */
1698static int msb_init_card(struct memstick_dev *card)
1699{
1700 struct msb_data *msb = memstick_get_drvdata(card);
1701 struct memstick_host *host = card->host;
1702 struct ms_boot_page *boot_block;
1703 int error = 0, i, raw_size_in_megs;
1704
1705 msb->caps = 0;
1706
1707 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1708 card->id.class <= MEMSTICK_CLASS_ROM)
1709 msb->read_only = true;
1710
1711 msb->state = -1;
1712 error = msb_reset(msb, false);
1713 if (error)
1714 return error;
1715
1716 /* Due to a bug in Jmicron driver written by Alex Dubov,
1717 * its serial mode barely works,
1718 * so we switch to parallel mode right away
1719 */
1720 if (host->caps & MEMSTICK_CAP_PAR4)
1721 msb_switch_to_parallel(msb);
1722
1723 msb->page_size = sizeof(struct ms_boot_page);
1724
1725 /* Read the boot page */
1726 error = msb_read_boot_blocks(msb);
1727 if (error)
1728 return -EIO;
1729
1730 boot_block = &msb->boot_page[0];
1731
1732 /* Save intersting attributes from boot page */
1733 msb->block_count = boot_block->attr.number_of_blocks;
1734 msb->page_size = boot_block->attr.page_size;
1735
1736 msb->pages_in_block = boot_block->attr.block_size * 2;
1737 msb->block_size = msb->page_size * msb->pages_in_block;
1738
1739 if ((size_t)msb->page_size > PAGE_SIZE) {
1740 /* this isn't supported by linux at all, anyway*/
1741 dbg("device page %d size isn't supported", msb->page_size);
1742 return -EINVAL;
1743 }
1744
1745 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1746 if (!msb->block_buffer)
1747 return -ENOMEM;
1748
1749 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1750
1751 for (i = 0; chs_table[i].size; i++) {
1752
1753 if (chs_table[i].size != raw_size_in_megs)
1754 continue;
1755
1756 msb->geometry.cylinders = chs_table[i].cyl;
1757 msb->geometry.heads = chs_table[i].head;
1758 msb->geometry.sectors = chs_table[i].sec;
1759 break;
1760 }
1761
1762 if (boot_block->attr.transfer_supporting == 1)
1763 msb->caps |= MEMSTICK_CAP_PAR4;
1764
1765 if (boot_block->attr.device_type & 0x03)
1766 msb->read_only = true;
1767
1768 dbg("Total block count = %d", msb->block_count);
1769 dbg("Each block consists of %d pages", msb->pages_in_block);
1770 dbg("Page size = %d bytes", msb->page_size);
1771 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1772 dbg("Read only: %d", msb->read_only);
1773
1774#if 0
1775 /* Now we can switch the interface */
1776 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1777 msb_switch_to_parallel(msb);
1778#endif
1779
1780 error = msb_cache_init(msb);
1781 if (error)
1782 return error;
1783
1784 error = msb_ftl_initialize(msb);
1785 if (error)
1786 return error;
1787
1788
1789 /* Read the bad block table */
1790 error = msb_read_bad_block_table(msb, 0);
1791
1792 if (error && error != -ENOMEM) {
1793 dbg("failed to read bad block table from primary boot block, trying from backup");
1794 error = msb_read_bad_block_table(msb, 1);
1795 }
1796
1797 if (error)
1798 return error;
1799
1800 /* *drum roll* Scan the media */
1801 error = msb_ftl_scan(msb);
1802 if (error) {
1803 pr_err("Scan of media failed");
1804 return error;
1805 }
1806
1807 return 0;
1808
1809}
1810
1811static int msb_do_write_request(struct msb_data *msb, int lba,
1812 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1813{
1814 int error = 0;
1815 off_t offset = 0;
1816 *sucessfuly_written = 0;
1817
1818 while (offset < len) {
1819 if (page == 0 && len - offset >= msb->block_size) {
1820
1821 if (msb->cache_block_lba == lba)
1822 msb_cache_discard(msb);
1823
1824 dbg_verbose("Writing whole lba %d", lba);
1825 error = msb_update_block(msb, lba, sg, offset);
1826 if (error)
1827 return error;
1828
1829 offset += msb->block_size;
1830 *sucessfuly_written += msb->block_size;
1831 lba++;
1832 continue;
1833 }
1834
1835 error = msb_cache_write(msb, lba, page, false, sg, offset);
1836 if (error)
1837 return error;
1838
1839 offset += msb->page_size;
1840 *sucessfuly_written += msb->page_size;
1841
1842 page++;
1843 if (page == msb->pages_in_block) {
1844 page = 0;
1845 lba++;
1846 }
1847 }
1848 return 0;
1849}
1850
1851static int msb_do_read_request(struct msb_data *msb, int lba,
1852 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1853{
1854 int error = 0;
1855 int offset = 0;
1856 *sucessfuly_read = 0;
1857
1858 while (offset < len) {
1859
1860 error = msb_cache_read(msb, lba, page, sg, offset);
1861 if (error)
1862 return error;
1863
1864 offset += msb->page_size;
1865 *sucessfuly_read += msb->page_size;
1866
1867 page++;
1868 if (page == msb->pages_in_block) {
1869 page = 0;
1870 lba++;
1871 }
1872 }
1873 return 0;
1874}
1875
1876static void msb_io_work(struct work_struct *work)
1877{
1878 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1879 int page, error, len;
1880 sector_t lba;
1881 struct scatterlist *sg = msb->prealloc_sg;
1882 struct request *req;
1883
1884 dbg_verbose("IO: work started");
1885
1886 while (1) {
1887 spin_lock_irq(&msb->q_lock);
1888
1889 if (msb->need_flush_cache) {
1890 msb->need_flush_cache = false;
1891 spin_unlock_irq(&msb->q_lock);
1892 msb_cache_flush(msb);
1893 continue;
1894 }
1895
1896 req = msb->req;
1897 if (!req) {
1898 dbg_verbose("IO: no more requests exiting");
1899 spin_unlock_irq(&msb->q_lock);
1900 return;
1901 }
1902
1903 spin_unlock_irq(&msb->q_lock);
1904
1905 /* process the request */
1906 dbg_verbose("IO: processing new request");
1907 blk_rq_map_sg(msb->queue, req, sg);
1908
1909 lba = blk_rq_pos(req);
1910
1911 sector_div(lba, msb->page_size / 512);
1912 page = sector_div(lba, msb->pages_in_block);
1913
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(req), &len);
1917 else
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(req), &len);
1920
1921 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1922 __blk_mq_end_request(req, BLK_STS_OK);
1923 spin_lock_irq(&msb->q_lock);
1924 msb->req = NULL;
1925 spin_unlock_irq(&msb->q_lock);
1926 }
1927
1928 if (error && msb->req) {
1929 blk_status_t ret = errno_to_blk_status(error);
1930
1931 dbg_verbose("IO: ending one sector of the request with error");
1932 blk_mq_end_request(req, ret);
1933 spin_lock_irq(&msb->q_lock);
1934 msb->req = NULL;
1935 spin_unlock_irq(&msb->q_lock);
1936 }
1937
1938 if (msb->req)
1939 dbg_verbose("IO: request still pending");
1940 }
1941}
1942
1943static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1944static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1945
1946static void msb_data_clear(struct msb_data *msb)
1947{
1948 kfree(msb->boot_page);
1949 bitmap_free(msb->used_blocks_bitmap);
1950 bitmap_free(msb->erased_blocks_bitmap);
1951 kfree(msb->lba_to_pba_table);
1952 kfree(msb->cache);
1953 msb->card = NULL;
1954}
1955
1956static int msb_bd_getgeo(struct block_device *bdev,
1957 struct hd_geometry *geo)
1958{
1959 struct msb_data *msb = bdev->bd_disk->private_data;
1960 *geo = msb->geometry;
1961 return 0;
1962}
1963
1964static void msb_bd_free_disk(struct gendisk *disk)
1965{
1966 struct msb_data *msb = disk->private_data;
1967
1968 mutex_lock(&msb_disk_lock);
1969 idr_remove(&msb_disk_idr, msb->disk_id);
1970 mutex_unlock(&msb_disk_lock);
1971
1972 kfree(msb);
1973}
1974
1975static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1976 const struct blk_mq_queue_data *bd)
1977{
1978 struct memstick_dev *card = hctx->queue->queuedata;
1979 struct msb_data *msb = memstick_get_drvdata(card);
1980 struct request *req = bd->rq;
1981
1982 dbg_verbose("Submit request");
1983
1984 spin_lock_irq(&msb->q_lock);
1985
1986 if (msb->card_dead) {
1987 dbg("Refusing requests on removed card");
1988
1989 WARN_ON(!msb->io_queue_stopped);
1990
1991 spin_unlock_irq(&msb->q_lock);
1992 blk_mq_start_request(req);
1993 return BLK_STS_IOERR;
1994 }
1995
1996 if (msb->req) {
1997 spin_unlock_irq(&msb->q_lock);
1998 return BLK_STS_DEV_RESOURCE;
1999 }
2000
2001 blk_mq_start_request(req);
2002 msb->req = req;
2003
2004 if (!msb->io_queue_stopped)
2005 queue_work(msb->io_queue, &msb->io_work);
2006
2007 spin_unlock_irq(&msb->q_lock);
2008 return BLK_STS_OK;
2009}
2010
2011static int msb_check_card(struct memstick_dev *card)
2012{
2013 struct msb_data *msb = memstick_get_drvdata(card);
2014
2015 return (msb->card_dead == 0);
2016}
2017
2018static void msb_stop(struct memstick_dev *card)
2019{
2020 struct msb_data *msb = memstick_get_drvdata(card);
2021 unsigned long flags;
2022
2023 dbg("Stopping all msblock IO");
2024
2025 blk_mq_stop_hw_queues(msb->queue);
2026 spin_lock_irqsave(&msb->q_lock, flags);
2027 msb->io_queue_stopped = true;
2028 spin_unlock_irqrestore(&msb->q_lock, flags);
2029
2030 del_timer_sync(&msb->cache_flush_timer);
2031 flush_workqueue(msb->io_queue);
2032
2033 spin_lock_irqsave(&msb->q_lock, flags);
2034 if (msb->req) {
2035 blk_mq_requeue_request(msb->req, false);
2036 msb->req = NULL;
2037 }
2038 spin_unlock_irqrestore(&msb->q_lock, flags);
2039}
2040
2041static void msb_start(struct memstick_dev *card)
2042{
2043 struct msb_data *msb = memstick_get_drvdata(card);
2044 unsigned long flags;
2045
2046 dbg("Resuming IO from msblock");
2047
2048 msb_invalidate_reg_window(msb);
2049
2050 spin_lock_irqsave(&msb->q_lock, flags);
2051 if (!msb->io_queue_stopped || msb->card_dead) {
2052 spin_unlock_irqrestore(&msb->q_lock, flags);
2053 return;
2054 }
2055 spin_unlock_irqrestore(&msb->q_lock, flags);
2056
2057 /* Kick cache flush anyway, its harmless */
2058 msb->need_flush_cache = true;
2059 msb->io_queue_stopped = false;
2060
2061 blk_mq_start_hw_queues(msb->queue);
2062
2063 queue_work(msb->io_queue, &msb->io_work);
2064
2065}
2066
2067static const struct block_device_operations msb_bdops = {
2068 .owner = THIS_MODULE,
2069 .getgeo = msb_bd_getgeo,
2070 .free_disk = msb_bd_free_disk,
2071};
2072
2073static const struct blk_mq_ops msb_mq_ops = {
2074 .queue_rq = msb_queue_rq,
2075};
2076
2077/* Registers the block device */
2078static int msb_init_disk(struct memstick_dev *card)
2079{
2080 struct msb_data *msb = memstick_get_drvdata(card);
2081 int rc;
2082 unsigned long capacity;
2083
2084 mutex_lock(&msb_disk_lock);
2085 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2086 mutex_unlock(&msb_disk_lock);
2087
2088 if (msb->disk_id < 0)
2089 return msb->disk_id;
2090
2091 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
2092 BLK_MQ_F_SHOULD_MERGE);
2093 if (rc)
2094 goto out_release_id;
2095
2096 msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
2097 if (IS_ERR(msb->disk)) {
2098 rc = PTR_ERR(msb->disk);
2099 goto out_free_tag_set;
2100 }
2101 msb->queue = msb->disk->queue;
2102
2103 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2104 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2105 blk_queue_max_segment_size(msb->queue,
2106 MS_BLOCK_MAX_PAGES * msb->page_size);
2107 blk_queue_logical_block_size(msb->queue, msb->page_size);
2108
2109 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2110 msb->disk->fops = &msb_bdops;
2111 msb->disk->private_data = msb;
2112
2113 capacity = msb->pages_in_block * msb->logical_block_count;
2114 capacity *= (msb->page_size / 512);
2115 set_capacity(msb->disk, capacity);
2116 dbg("Set total disk size to %lu sectors", capacity);
2117
2118 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2119 if (!msb->io_queue) {
2120 rc = -ENOMEM;
2121 goto out_cleanup_disk;
2122 }
2123
2124 INIT_WORK(&msb->io_work, msb_io_work);
2125 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2126
2127 if (msb->read_only)
2128 set_disk_ro(msb->disk, 1);
2129
2130 msb_start(card);
2131 rc = device_add_disk(&card->dev, msb->disk, NULL);
2132 if (rc)
2133 goto out_destroy_workqueue;
2134 dbg("Disk added");
2135 return 0;
2136
2137out_destroy_workqueue:
2138 destroy_workqueue(msb->io_queue);
2139out_cleanup_disk:
2140 put_disk(msb->disk);
2141out_free_tag_set:
2142 blk_mq_free_tag_set(&msb->tag_set);
2143out_release_id:
2144 mutex_lock(&msb_disk_lock);
2145 idr_remove(&msb_disk_idr, msb->disk_id);
2146 mutex_unlock(&msb_disk_lock);
2147 return rc;
2148}
2149
2150static int msb_probe(struct memstick_dev *card)
2151{
2152 struct msb_data *msb;
2153 int rc = 0;
2154
2155 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2156 if (!msb)
2157 return -ENOMEM;
2158 memstick_set_drvdata(card, msb);
2159 msb->card = card;
2160 spin_lock_init(&msb->q_lock);
2161
2162 rc = msb_init_card(card);
2163 if (rc)
2164 goto out_free;
2165
2166 rc = msb_init_disk(card);
2167 if (!rc) {
2168 card->check = msb_check_card;
2169 card->stop = msb_stop;
2170 card->start = msb_start;
2171 return 0;
2172 }
2173out_free:
2174 memstick_set_drvdata(card, NULL);
2175 msb_data_clear(msb);
2176 kfree(msb);
2177 return rc;
2178}
2179
2180static void msb_remove(struct memstick_dev *card)
2181{
2182 struct msb_data *msb = memstick_get_drvdata(card);
2183 unsigned long flags;
2184
2185 if (!msb->io_queue_stopped)
2186 msb_stop(card);
2187
2188 dbg("Removing the disk device");
2189
2190 /* Take care of unhandled + new requests from now on */
2191 spin_lock_irqsave(&msb->q_lock, flags);
2192 msb->card_dead = true;
2193 spin_unlock_irqrestore(&msb->q_lock, flags);
2194 blk_mq_start_hw_queues(msb->queue);
2195
2196 /* Remove the disk */
2197 del_gendisk(msb->disk);
2198 blk_mq_free_tag_set(&msb->tag_set);
2199 msb->queue = NULL;
2200
2201 mutex_lock(&msb_disk_lock);
2202 msb_data_clear(msb);
2203 mutex_unlock(&msb_disk_lock);
2204
2205 put_disk(msb->disk);
2206 memstick_set_drvdata(card, NULL);
2207}
2208
2209#ifdef CONFIG_PM
2210
2211static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2212{
2213 msb_stop(card);
2214 return 0;
2215}
2216
2217static int msb_resume(struct memstick_dev *card)
2218{
2219 struct msb_data *msb = memstick_get_drvdata(card);
2220 struct msb_data *new_msb = NULL;
2221 bool card_dead = true;
2222
2223#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2224 msb->card_dead = true;
2225 return 0;
2226#endif
2227 mutex_lock(&card->host->lock);
2228
2229 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2230 if (!new_msb)
2231 goto out;
2232
2233 new_msb->card = card;
2234 memstick_set_drvdata(card, new_msb);
2235 spin_lock_init(&new_msb->q_lock);
2236 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2237
2238 if (msb_init_card(card))
2239 goto out;
2240
2241 if (msb->block_size != new_msb->block_size)
2242 goto out;
2243
2244 if (memcmp(msb->boot_page, new_msb->boot_page,
2245 sizeof(struct ms_boot_page)))
2246 goto out;
2247
2248 if (msb->logical_block_count != new_msb->logical_block_count ||
2249 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2250 msb->logical_block_count))
2251 goto out;
2252
2253 if (msb->block_count != new_msb->block_count ||
2254 !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2255 msb->block_count))
2256 goto out;
2257
2258 card_dead = false;
2259out:
2260 if (card_dead)
2261 dbg("Card was removed/replaced during suspend");
2262
2263 msb->card_dead = card_dead;
2264 memstick_set_drvdata(card, msb);
2265
2266 if (new_msb) {
2267 msb_data_clear(new_msb);
2268 kfree(new_msb);
2269 }
2270
2271 msb_start(card);
2272 mutex_unlock(&card->host->lock);
2273 return 0;
2274}
2275#else
2276
2277#define msb_suspend NULL
2278#define msb_resume NULL
2279
2280#endif /* CONFIG_PM */
2281
2282static struct memstick_device_id msb_id_tbl[] = {
2283 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2284 MEMSTICK_CLASS_FLASH},
2285
2286 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2287 MEMSTICK_CLASS_ROM},
2288
2289 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2290 MEMSTICK_CLASS_RO},
2291
2292 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2293 MEMSTICK_CLASS_WP},
2294
2295 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2296 MEMSTICK_CLASS_DUO},
2297 {}
2298};
2299MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2300
2301
2302static struct memstick_driver msb_driver = {
2303 .driver = {
2304 .name = DRIVER_NAME,
2305 .owner = THIS_MODULE
2306 },
2307 .id_table = msb_id_tbl,
2308 .probe = msb_probe,
2309 .remove = msb_remove,
2310 .suspend = msb_suspend,
2311 .resume = msb_resume
2312};
2313
2314static int __init msb_init(void)
2315{
2316 int rc = memstick_register_driver(&msb_driver);
2317
2318 if (rc)
2319 pr_err("failed to register memstick driver (error %d)\n", rc);
2320
2321 return rc;
2322}
2323
2324static void __exit msb_exit(void)
2325{
2326 memstick_unregister_driver(&msb_driver);
2327 idr_destroy(&msb_disk_idr);
2328}
2329
2330module_init(msb_init);
2331module_exit(msb_exit);
2332
2333module_param(cache_flush_timeout, int, S_IRUGO);
2334MODULE_PARM_DESC(cache_flush_timeout,
2335 "Cache flush timeout in msec (1000 default)");
2336module_param(debug, int, S_IRUGO | S_IWUSR);
2337MODULE_PARM_DESC(debug, "Debug level (0-2)");
2338
2339module_param(verify_writes, bool, S_IRUGO);
2340MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2341
2342MODULE_LICENSE("GPL");
2343MODULE_AUTHOR("Maxim Levitsky");
2344MODULE_DESCRIPTION("Sony MemoryStick block device driver");
1/*
2 * ms_block.c - Sony MemoryStick (legacy) storage support
3
4 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Minor portions of the driver were copied from mspro_block.c which is
11 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
12 *
13 */
14#define DRIVER_NAME "ms_block"
15#define pr_fmt(fmt) DRIVER_NAME ": " fmt
16
17#include <linux/module.h>
18#include <linux/blkdev.h>
19#include <linux/memstick.h>
20#include <linux/idr.h>
21#include <linux/hdreg.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/random.h>
25#include <linux/bitmap.h>
26#include <linux/scatterlist.h>
27#include <linux/jiffies.h>
28#include <linux/workqueue.h>
29#include <linux/mutex.h>
30#include "ms_block.h"
31
32static int debug;
33static int cache_flush_timeout = 1000;
34static bool verify_writes;
35
36/*
37 * Copies section of 'sg_from' starting from offset 'offset' and with length
38 * 'len' To another scatterlist of to_nents enties
39 */
40static size_t msb_sg_copy(struct scatterlist *sg_from,
41 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
42{
43 size_t copied = 0;
44
45 while (offset > 0) {
46 if (offset >= sg_from->length) {
47 if (sg_is_last(sg_from))
48 return 0;
49
50 offset -= sg_from->length;
51 sg_from = sg_next(sg_from);
52 continue;
53 }
54
55 copied = min(len, sg_from->length - offset);
56 sg_set_page(sg_to, sg_page(sg_from),
57 copied, sg_from->offset + offset);
58
59 len -= copied;
60 offset = 0;
61
62 if (sg_is_last(sg_from) || !len)
63 goto out;
64
65 sg_to = sg_next(sg_to);
66 to_nents--;
67 sg_from = sg_next(sg_from);
68 }
69
70 while (len > sg_from->length && to_nents--) {
71 len -= sg_from->length;
72 copied += sg_from->length;
73
74 sg_set_page(sg_to, sg_page(sg_from),
75 sg_from->length, sg_from->offset);
76
77 if (sg_is_last(sg_from) || !len)
78 goto out;
79
80 sg_from = sg_next(sg_from);
81 sg_to = sg_next(sg_to);
82 }
83
84 if (len && to_nents) {
85 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
86 copied += len;
87 }
88out:
89 sg_mark_end(sg_to);
90 return copied;
91}
92
93/*
94 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
95 * to linear buffer of length 'len' at address 'buffer'
96 * Returns 0 if equal and -1 otherwice
97 */
98static int msb_sg_compare_to_buffer(struct scatterlist *sg,
99 size_t offset, u8 *buffer, size_t len)
100{
101 int retval = 0, cmplen;
102 struct sg_mapping_iter miter;
103
104 sg_miter_start(&miter, sg, sg_nents(sg),
105 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
106
107 while (sg_miter_next(&miter) && len > 0) {
108 if (offset >= miter.length) {
109 offset -= miter.length;
110 continue;
111 }
112
113 cmplen = min(miter.length - offset, len);
114 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
115 if (retval)
116 break;
117
118 buffer += cmplen;
119 len -= cmplen;
120 offset = 0;
121 }
122
123 if (!retval && len)
124 retval = -1;
125
126 sg_miter_stop(&miter);
127 return retval;
128}
129
130
131/* Get zone at which block with logical address 'lba' lives
132 * Flash is broken into zones.
133 * Each zone consists of 512 eraseblocks, out of which in first
134 * zone 494 are used and 496 are for all following zones.
135 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
136*/
137static int msb_get_zone_from_lba(int lba)
138{
139 if (lba < 494)
140 return 0;
141 return ((lba - 494) / 496) + 1;
142}
143
144/* Get zone of physical block. Trivial */
145static int msb_get_zone_from_pba(int pba)
146{
147 return pba / MS_BLOCKS_IN_ZONE;
148}
149
150/* Debug test to validate free block counts */
151static int msb_validate_used_block_bitmap(struct msb_data *msb)
152{
153 int total_free_blocks = 0;
154 int i;
155
156 if (!debug)
157 return 0;
158
159 for (i = 0; i < msb->zone_count; i++)
160 total_free_blocks += msb->free_block_count[i];
161
162 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
163 msb->block_count) == total_free_blocks)
164 return 0;
165
166 pr_err("BUG: free block counts don't match the bitmap");
167 msb->read_only = true;
168 return -EINVAL;
169}
170
171/* Mark physical block as used */
172static void msb_mark_block_used(struct msb_data *msb, int pba)
173{
174 int zone = msb_get_zone_from_pba(pba);
175
176 if (test_bit(pba, msb->used_blocks_bitmap)) {
177 pr_err(
178 "BUG: attempt to mark already used pba %d as used", pba);
179 msb->read_only = true;
180 return;
181 }
182
183 if (msb_validate_used_block_bitmap(msb))
184 return;
185
186 /* No races because all IO is single threaded */
187 __set_bit(pba, msb->used_blocks_bitmap);
188 msb->free_block_count[zone]--;
189}
190
191/* Mark physical block as free */
192static void msb_mark_block_unused(struct msb_data *msb, int pba)
193{
194 int zone = msb_get_zone_from_pba(pba);
195
196 if (!test_bit(pba, msb->used_blocks_bitmap)) {
197 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
198 msb->read_only = true;
199 return;
200 }
201
202 if (msb_validate_used_block_bitmap(msb))
203 return;
204
205 /* No races because all IO is single threaded */
206 __clear_bit(pba, msb->used_blocks_bitmap);
207 msb->free_block_count[zone]++;
208}
209
210/* Invalidate current register window */
211static void msb_invalidate_reg_window(struct msb_data *msb)
212{
213 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
214 msb->reg_addr.w_length = sizeof(struct ms_id_register);
215 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
216 msb->reg_addr.r_length = sizeof(struct ms_id_register);
217 msb->addr_valid = false;
218}
219
220/* Start a state machine */
221static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
222 (struct memstick_dev *card, struct memstick_request **req))
223{
224 struct memstick_dev *card = msb->card;
225
226 WARN_ON(msb->state != -1);
227 msb->int_polling = false;
228 msb->state = 0;
229 msb->exit_error = 0;
230
231 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
232
233 card->next_request = state_func;
234 memstick_new_req(card->host);
235 wait_for_completion(&card->mrq_complete);
236
237 WARN_ON(msb->state != -1);
238 return msb->exit_error;
239}
240
241/* State machines call that to exit */
242static int msb_exit_state_machine(struct msb_data *msb, int error)
243{
244 WARN_ON(msb->state == -1);
245
246 msb->state = -1;
247 msb->exit_error = error;
248 msb->card->next_request = h_msb_default_bad;
249
250 /* Invalidate reg window on errors */
251 if (error)
252 msb_invalidate_reg_window(msb);
253
254 complete(&msb->card->mrq_complete);
255 return -ENXIO;
256}
257
258/* read INT register */
259static int msb_read_int_reg(struct msb_data *msb, long timeout)
260{
261 struct memstick_request *mrq = &msb->card->current_mrq;
262
263 WARN_ON(msb->state == -1);
264
265 if (!msb->int_polling) {
266 msb->int_timeout = jiffies +
267 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
268 msb->int_polling = true;
269 } else if (time_after(jiffies, msb->int_timeout)) {
270 mrq->data[0] = MEMSTICK_INT_CMDNAK;
271 return 0;
272 }
273
274 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
275 mrq->need_card_int && !mrq->error) {
276 mrq->data[0] = mrq->int_reg;
277 mrq->need_card_int = false;
278 return 0;
279 } else {
280 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
281 return 1;
282 }
283}
284
285/* Read a register */
286static int msb_read_regs(struct msb_data *msb, int offset, int len)
287{
288 struct memstick_request *req = &msb->card->current_mrq;
289
290 if (msb->reg_addr.r_offset != offset ||
291 msb->reg_addr.r_length != len || !msb->addr_valid) {
292
293 msb->reg_addr.r_offset = offset;
294 msb->reg_addr.r_length = len;
295 msb->addr_valid = true;
296
297 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
298 &msb->reg_addr, sizeof(msb->reg_addr));
299 return 0;
300 }
301
302 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
303 return 1;
304}
305
306/* Write a card register */
307static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
308{
309 struct memstick_request *req = &msb->card->current_mrq;
310
311 if (msb->reg_addr.w_offset != offset ||
312 msb->reg_addr.w_length != len || !msb->addr_valid) {
313
314 msb->reg_addr.w_offset = offset;
315 msb->reg_addr.w_length = len;
316 msb->addr_valid = true;
317
318 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
319 &msb->reg_addr, sizeof(msb->reg_addr));
320 return 0;
321 }
322
323 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
324 return 1;
325}
326
327/* Handler for absence of IO */
328static int h_msb_default_bad(struct memstick_dev *card,
329 struct memstick_request **mrq)
330{
331 return -ENXIO;
332}
333
334/*
335 * This function is a handler for reads of one page from device.
336 * Writes output to msb->current_sg, takes sector address from msb->reg.param
337 * Can also be used to read extra data only. Set params accordintly.
338 */
339static int h_msb_read_page(struct memstick_dev *card,
340 struct memstick_request **out_mrq)
341{
342 struct msb_data *msb = memstick_get_drvdata(card);
343 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
344 struct scatterlist sg[2];
345 u8 command, intreg;
346
347 if (mrq->error) {
348 dbg("read_page, unknown error");
349 return msb_exit_state_machine(msb, mrq->error);
350 }
351again:
352 switch (msb->state) {
353 case MSB_RP_SEND_BLOCK_ADDRESS:
354 /* msb_write_regs sometimes "fails" because it needs to update
355 the reg window, and thus it returns request for that.
356 Then we stay in this state and retry */
357 if (!msb_write_regs(msb,
358 offsetof(struct ms_register, param),
359 sizeof(struct ms_param_register),
360 (unsigned char *)&msb->regs.param))
361 return 0;
362
363 msb->state = MSB_RP_SEND_READ_COMMAND;
364 return 0;
365
366 case MSB_RP_SEND_READ_COMMAND:
367 command = MS_CMD_BLOCK_READ;
368 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
369 msb->state = MSB_RP_SEND_INT_REQ;
370 return 0;
371
372 case MSB_RP_SEND_INT_REQ:
373 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
374 /* If dont actually need to send the int read request (only in
375 serial mode), then just fall through */
376 if (msb_read_int_reg(msb, -1))
377 return 0;
378 /* fallthrough */
379
380 case MSB_RP_RECEIVE_INT_REQ_RESULT:
381 intreg = mrq->data[0];
382 msb->regs.status.interrupt = intreg;
383
384 if (intreg & MEMSTICK_INT_CMDNAK)
385 return msb_exit_state_machine(msb, -EIO);
386
387 if (!(intreg & MEMSTICK_INT_CED)) {
388 msb->state = MSB_RP_SEND_INT_REQ;
389 goto again;
390 }
391
392 msb->int_polling = false;
393 msb->state = (intreg & MEMSTICK_INT_ERR) ?
394 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
395 goto again;
396
397 case MSB_RP_SEND_READ_STATUS_REG:
398 /* read the status register to understand source of the INT_ERR */
399 if (!msb_read_regs(msb,
400 offsetof(struct ms_register, status),
401 sizeof(struct ms_status_register)))
402 return 0;
403
404 msb->state = MSB_RP_RECEIVE_STATUS_REG;
405 return 0;
406
407 case MSB_RP_RECEIVE_STATUS_REG:
408 msb->regs.status = *(struct ms_status_register *)mrq->data;
409 msb->state = MSB_RP_SEND_OOB_READ;
410 /* fallthrough */
411
412 case MSB_RP_SEND_OOB_READ:
413 if (!msb_read_regs(msb,
414 offsetof(struct ms_register, extra_data),
415 sizeof(struct ms_extra_data_register)))
416 return 0;
417
418 msb->state = MSB_RP_RECEIVE_OOB_READ;
419 return 0;
420
421 case MSB_RP_RECEIVE_OOB_READ:
422 msb->regs.extra_data =
423 *(struct ms_extra_data_register *) mrq->data;
424 msb->state = MSB_RP_SEND_READ_DATA;
425 /* fallthrough */
426
427 case MSB_RP_SEND_READ_DATA:
428 /* Skip that state if we only read the oob */
429 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
430 msb->state = MSB_RP_RECEIVE_READ_DATA;
431 goto again;
432 }
433
434 sg_init_table(sg, ARRAY_SIZE(sg));
435 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
436 msb->current_sg_offset,
437 msb->page_size);
438
439 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
440 msb->state = MSB_RP_RECEIVE_READ_DATA;
441 return 0;
442
443 case MSB_RP_RECEIVE_READ_DATA:
444 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
445 msb->current_sg_offset += msb->page_size;
446 return msb_exit_state_machine(msb, 0);
447 }
448
449 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
450 dbg("read_page: uncorrectable error");
451 return msb_exit_state_machine(msb, -EBADMSG);
452 }
453
454 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
455 dbg("read_page: correctable error");
456 msb->current_sg_offset += msb->page_size;
457 return msb_exit_state_machine(msb, -EUCLEAN);
458 } else {
459 dbg("read_page: INT error, but no status error bits");
460 return msb_exit_state_machine(msb, -EIO);
461 }
462 }
463
464 BUG();
465}
466
467/*
468 * Handler of writes of exactly one block.
469 * Takes address from msb->regs.param.
470 * Writes same extra data to blocks, also taken
471 * from msb->regs.extra
472 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
473 * device refuses to take the command or something else
474 */
475static int h_msb_write_block(struct memstick_dev *card,
476 struct memstick_request **out_mrq)
477{
478 struct msb_data *msb = memstick_get_drvdata(card);
479 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
480 struct scatterlist sg[2];
481 u8 intreg, command;
482
483 if (mrq->error)
484 return msb_exit_state_machine(msb, mrq->error);
485
486again:
487 switch (msb->state) {
488
489 /* HACK: Jmicon handling of TPCs between 8 and
490 * sizeof(memstick_request.data) is broken due to hardware
491 * bug in PIO mode that is used for these TPCs
492 * Therefore split the write
493 */
494
495 case MSB_WB_SEND_WRITE_PARAMS:
496 if (!msb_write_regs(msb,
497 offsetof(struct ms_register, param),
498 sizeof(struct ms_param_register),
499 &msb->regs.param))
500 return 0;
501
502 msb->state = MSB_WB_SEND_WRITE_OOB;
503 return 0;
504
505 case MSB_WB_SEND_WRITE_OOB:
506 if (!msb_write_regs(msb,
507 offsetof(struct ms_register, extra_data),
508 sizeof(struct ms_extra_data_register),
509 &msb->regs.extra_data))
510 return 0;
511 msb->state = MSB_WB_SEND_WRITE_COMMAND;
512 return 0;
513
514
515 case MSB_WB_SEND_WRITE_COMMAND:
516 command = MS_CMD_BLOCK_WRITE;
517 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
518 msb->state = MSB_WB_SEND_INT_REQ;
519 return 0;
520
521 case MSB_WB_SEND_INT_REQ:
522 msb->state = MSB_WB_RECEIVE_INT_REQ;
523 if (msb_read_int_reg(msb, -1))
524 return 0;
525 /* fallthrough */
526
527 case MSB_WB_RECEIVE_INT_REQ:
528 intreg = mrq->data[0];
529 msb->regs.status.interrupt = intreg;
530
531 /* errors mean out of here, and fast... */
532 if (intreg & (MEMSTICK_INT_CMDNAK))
533 return msb_exit_state_machine(msb, -EIO);
534
535 if (intreg & MEMSTICK_INT_ERR)
536 return msb_exit_state_machine(msb, -EBADMSG);
537
538
539 /* for last page we need to poll CED */
540 if (msb->current_page == msb->pages_in_block) {
541 if (intreg & MEMSTICK_INT_CED)
542 return msb_exit_state_machine(msb, 0);
543 msb->state = MSB_WB_SEND_INT_REQ;
544 goto again;
545
546 }
547
548 /* for non-last page we need BREQ before writing next chunk */
549 if (!(intreg & MEMSTICK_INT_BREQ)) {
550 msb->state = MSB_WB_SEND_INT_REQ;
551 goto again;
552 }
553
554 msb->int_polling = false;
555 msb->state = MSB_WB_SEND_WRITE_DATA;
556 /* fallthrough */
557
558 case MSB_WB_SEND_WRITE_DATA:
559 sg_init_table(sg, ARRAY_SIZE(sg));
560
561 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
562 msb->current_sg_offset,
563 msb->page_size) < msb->page_size)
564 return msb_exit_state_machine(msb, -EIO);
565
566 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
567 mrq->need_card_int = 1;
568 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
569 return 0;
570
571 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
572 msb->current_page++;
573 msb->current_sg_offset += msb->page_size;
574 msb->state = MSB_WB_SEND_INT_REQ;
575 goto again;
576 default:
577 BUG();
578 }
579
580 return 0;
581}
582
583/*
584 * This function is used to send simple IO requests to device that consist
585 * of register write + command
586 */
587static int h_msb_send_command(struct memstick_dev *card,
588 struct memstick_request **out_mrq)
589{
590 struct msb_data *msb = memstick_get_drvdata(card);
591 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
592 u8 intreg;
593
594 if (mrq->error) {
595 dbg("send_command: unknown error");
596 return msb_exit_state_machine(msb, mrq->error);
597 }
598again:
599 switch (msb->state) {
600
601 /* HACK: see h_msb_write_block */
602 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
603 if (!msb_write_regs(msb,
604 offsetof(struct ms_register, param),
605 sizeof(struct ms_param_register),
606 &msb->regs.param))
607 return 0;
608 msb->state = MSB_SC_SEND_WRITE_OOB;
609 return 0;
610
611 case MSB_SC_SEND_WRITE_OOB:
612 if (!msb->command_need_oob) {
613 msb->state = MSB_SC_SEND_COMMAND;
614 goto again;
615 }
616
617 if (!msb_write_regs(msb,
618 offsetof(struct ms_register, extra_data),
619 sizeof(struct ms_extra_data_register),
620 &msb->regs.extra_data))
621 return 0;
622
623 msb->state = MSB_SC_SEND_COMMAND;
624 return 0;
625
626 case MSB_SC_SEND_COMMAND:
627 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
628 msb->state = MSB_SC_SEND_INT_REQ;
629 return 0;
630
631 case MSB_SC_SEND_INT_REQ:
632 msb->state = MSB_SC_RECEIVE_INT_REQ;
633 if (msb_read_int_reg(msb, -1))
634 return 0;
635 /* fallthrough */
636
637 case MSB_SC_RECEIVE_INT_REQ:
638 intreg = mrq->data[0];
639
640 if (intreg & MEMSTICK_INT_CMDNAK)
641 return msb_exit_state_machine(msb, -EIO);
642 if (intreg & MEMSTICK_INT_ERR)
643 return msb_exit_state_machine(msb, -EBADMSG);
644
645 if (!(intreg & MEMSTICK_INT_CED)) {
646 msb->state = MSB_SC_SEND_INT_REQ;
647 goto again;
648 }
649
650 return msb_exit_state_machine(msb, 0);
651 }
652
653 BUG();
654}
655
656/* Small handler for card reset */
657static int h_msb_reset(struct memstick_dev *card,
658 struct memstick_request **out_mrq)
659{
660 u8 command = MS_CMD_RESET;
661 struct msb_data *msb = memstick_get_drvdata(card);
662 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
663
664 if (mrq->error)
665 return msb_exit_state_machine(msb, mrq->error);
666
667 switch (msb->state) {
668 case MSB_RS_SEND:
669 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
670 mrq->need_card_int = 0;
671 msb->state = MSB_RS_CONFIRM;
672 return 0;
673 case MSB_RS_CONFIRM:
674 return msb_exit_state_machine(msb, 0);
675 }
676 BUG();
677}
678
679/* This handler is used to do serial->parallel switch */
680static int h_msb_parallel_switch(struct memstick_dev *card,
681 struct memstick_request **out_mrq)
682{
683 struct msb_data *msb = memstick_get_drvdata(card);
684 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
685 struct memstick_host *host = card->host;
686
687 if (mrq->error) {
688 dbg("parallel_switch: error");
689 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
690 return msb_exit_state_machine(msb, mrq->error);
691 }
692
693 switch (msb->state) {
694 case MSB_PS_SEND_SWITCH_COMMAND:
695 /* Set the parallel interface on memstick side */
696 msb->regs.param.system |= MEMSTICK_SYS_PAM;
697
698 if (!msb_write_regs(msb,
699 offsetof(struct ms_register, param),
700 1,
701 (unsigned char *)&msb->regs.param))
702 return 0;
703
704 msb->state = MSB_PS_SWICH_HOST;
705 return 0;
706
707 case MSB_PS_SWICH_HOST:
708 /* Set parallel interface on our side + send a dummy request
709 to see if card responds */
710 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
711 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
712 msb->state = MSB_PS_CONFIRM;
713 return 0;
714
715 case MSB_PS_CONFIRM:
716 return msb_exit_state_machine(msb, 0);
717 }
718
719 BUG();
720}
721
722static int msb_switch_to_parallel(struct msb_data *msb);
723
724/* Reset the card, to guard against hw errors beeing treated as bad blocks */
725static int msb_reset(struct msb_data *msb, bool full)
726{
727
728 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
729 struct memstick_dev *card = msb->card;
730 struct memstick_host *host = card->host;
731 int error;
732
733 /* Reset the card */
734 msb->regs.param.system = MEMSTICK_SYS_BAMD;
735
736 if (full) {
737 error = host->set_param(host,
738 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
739 if (error)
740 goto out_error;
741
742 msb_invalidate_reg_window(msb);
743
744 error = host->set_param(host,
745 MEMSTICK_POWER, MEMSTICK_POWER_ON);
746 if (error)
747 goto out_error;
748
749 error = host->set_param(host,
750 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
751 if (error) {
752out_error:
753 dbg("Failed to reset the host controller");
754 msb->read_only = true;
755 return -EFAULT;
756 }
757 }
758
759 error = msb_run_state_machine(msb, h_msb_reset);
760 if (error) {
761 dbg("Failed to reset the card");
762 msb->read_only = true;
763 return -ENODEV;
764 }
765
766 /* Set parallel mode */
767 if (was_parallel)
768 msb_switch_to_parallel(msb);
769 return 0;
770}
771
772/* Attempts to switch interface to parallel mode */
773static int msb_switch_to_parallel(struct msb_data *msb)
774{
775 int error;
776
777 error = msb_run_state_machine(msb, h_msb_parallel_switch);
778 if (error) {
779 pr_err("Switch to parallel failed");
780 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
781 msb_reset(msb, true);
782 return -EFAULT;
783 }
784
785 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
786 return 0;
787}
788
789/* Changes overwrite flag on a page */
790static int msb_set_overwrite_flag(struct msb_data *msb,
791 u16 pba, u8 page, u8 flag)
792{
793 if (msb->read_only)
794 return -EROFS;
795
796 msb->regs.param.block_address = cpu_to_be16(pba);
797 msb->regs.param.page_address = page;
798 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
799 msb->regs.extra_data.overwrite_flag = flag;
800 msb->command_value = MS_CMD_BLOCK_WRITE;
801 msb->command_need_oob = true;
802
803 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
804 flag, pba, page);
805 return msb_run_state_machine(msb, h_msb_send_command);
806}
807
808static int msb_mark_bad(struct msb_data *msb, int pba)
809{
810 pr_notice("marking pba %d as bad", pba);
811 msb_reset(msb, true);
812 return msb_set_overwrite_flag(
813 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
814}
815
816static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
817{
818 dbg("marking page %d of pba %d as bad", page, pba);
819 msb_reset(msb, true);
820 return msb_set_overwrite_flag(msb,
821 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
822}
823
824/* Erases one physical block */
825static int msb_erase_block(struct msb_data *msb, u16 pba)
826{
827 int error, try;
828 if (msb->read_only)
829 return -EROFS;
830
831 dbg_verbose("erasing pba %d", pba);
832
833 for (try = 1; try < 3; try++) {
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
839
840
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
843 break;
844 }
845
846 if (error) {
847 pr_err("erase failed, marking pba %d as bad", pba);
848 msb_mark_bad(msb, pba);
849 }
850
851 dbg_verbose("erase success, marking pba %d as unused", pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
854 return error;
855}
856
857/* Reads one page from device */
858static int msb_read_page(struct msb_data *msb,
859 u16 pba, u8 page, struct ms_extra_data_register *extra,
860 struct scatterlist *sg, int offset)
861{
862 int try, error;
863
864 if (pba == MS_BLOCK_INVALID) {
865 unsigned long flags;
866 struct sg_mapping_iter miter;
867 size_t len = msb->page_size;
868
869 dbg_verbose("read unmapped sector. returning 0xFF");
870
871 local_irq_save(flags);
872 sg_miter_start(&miter, sg, sg_nents(sg),
873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
874
875 while (sg_miter_next(&miter) && len > 0) {
876
877 int chunklen;
878
879 if (offset && offset >= miter.length) {
880 offset -= miter.length;
881 continue;
882 }
883
884 chunklen = min(miter.length - offset, len);
885 memset(miter.addr + offset, 0xFF, chunklen);
886 len -= chunklen;
887 offset = 0;
888 }
889
890 sg_miter_stop(&miter);
891 local_irq_restore(flags);
892
893 if (offset)
894 return -EFAULT;
895
896 if (extra)
897 memset(extra, 0xFF, sizeof(*extra));
898 return 0;
899 }
900
901 if (pba >= msb->block_count) {
902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
903 return -EINVAL;
904 }
905
906 for (try = 1; try < 3; try++) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
910
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
914
915
916 if (error == -EUCLEAN) {
917 pr_notice("correctable error on pba %d, page %d",
918 pba, page);
919 error = 0;
920 }
921
922 if (!error && extra)
923 *extra = msb->regs.extra_data;
924
925 if (!error || msb_reset(msb, true))
926 break;
927
928 }
929
930 /* Mark bad pages */
931 if (error == -EBADMSG) {
932 pr_err("uncorrectable error on read of pba %d, page %d",
933 pba, page);
934
935 if (msb->regs.extra_data.overwrite_flag &
936 MEMSTICK_OVERWRITE_PGST0)
937 msb_mark_page_bad(msb, pba, page);
938 return -EBADMSG;
939 }
940
941 if (error)
942 pr_err("read of pba %d, page %d failed with error %d",
943 pba, page, error);
944 return error;
945}
946
947/* Reads oob of page only */
948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 struct ms_extra_data_register *extra)
950{
951 int error;
952
953 BUG_ON(!extra);
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
957
958 if (pba > msb->block_count) {
959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
960 return -EINVAL;
961 }
962
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
965
966 if (error == -EUCLEAN) {
967 pr_notice("correctable error on pba %d, page %d",
968 pba, page);
969 return 0;
970 }
971
972 return error;
973}
974
975/* Reads a block and compares it with data contained in scatterlist orig_sg */
976static int msb_verify_block(struct msb_data *msb, u16 pba,
977 struct scatterlist *orig_sg, int offset)
978{
979 struct scatterlist sg;
980 int page = 0, error;
981
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
983
984 while (page < msb->pages_in_block) {
985
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
988 if (error)
989 return error;
990 page++;
991 }
992
993 if (msb_sg_compare_to_buffer(orig_sg, offset,
994 msb->block_buffer, msb->block_size))
995 return -EIO;
996 return 0;
997}
998
999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003 int error, current_try = 1;
1004 BUG_ON(sg->length < msb->page_size);
1005
1006 if (msb->read_only)
1007 return -EROFS;
1008
1009 if (pba == MS_BLOCK_INVALID) {
1010 pr_err(
1011 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1012 return -EINVAL;
1013 }
1014
1015 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1016 pr_err(
1017 "BUG: write: attempt to write beyond the end of device");
1018 return -EINVAL;
1019 }
1020
1021 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1022 pr_err("BUG: write: lba zone mismatch");
1023 return -EINVAL;
1024 }
1025
1026 if (pba == msb->boot_block_locations[0] ||
1027 pba == msb->boot_block_locations[1]) {
1028 pr_err("BUG: write: attempt to write to boot blocks!");
1029 return -EINVAL;
1030 }
1031
1032 while (1) {
1033
1034 if (msb->read_only)
1035 return -EROFS;
1036
1037 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1038 msb->regs.param.page_address = 0;
1039 msb->regs.param.block_address = cpu_to_be16(pba);
1040
1041 msb->regs.extra_data.management_flag = 0xFF;
1042 msb->regs.extra_data.overwrite_flag = 0xF8;
1043 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1044
1045 msb->current_sg = sg;
1046 msb->current_sg_offset = offset;
1047 msb->current_page = 0;
1048
1049 error = msb_run_state_machine(msb, h_msb_write_block);
1050
1051 /* Sector we just wrote to is assumed erased since its pba
1052 was erased. If it wasn't erased, write will succeed
1053 and will just clear the bits that were set in the block
1054 thus test that what we have written,
1055 matches what we expect.
1056 We do trust the blocks that we erased */
1057 if (!error && (verify_writes ||
1058 !test_bit(pba, msb->erased_blocks_bitmap)))
1059 error = msb_verify_block(msb, pba, sg, offset);
1060
1061 if (!error)
1062 break;
1063
1064 if (current_try > 1 || msb_reset(msb, true))
1065 break;
1066
1067 pr_err("write failed, trying to erase the pba %d", pba);
1068 error = msb_erase_block(msb, pba);
1069 if (error)
1070 break;
1071
1072 current_try++;
1073 }
1074 return error;
1075}
1076
1077/* Finds a free block for write replacement */
1078static u16 msb_get_free_block(struct msb_data *msb, int zone)
1079{
1080 u16 pos;
1081 int pba = zone * MS_BLOCKS_IN_ZONE;
1082 int i;
1083
1084 get_random_bytes(&pos, sizeof(pos));
1085
1086 if (!msb->free_block_count[zone]) {
1087 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1088 msb->read_only = true;
1089 return MS_BLOCK_INVALID;
1090 }
1091
1092 pos %= msb->free_block_count[zone];
1093
1094 dbg_verbose("have %d choices for a free block, selected randomally: %d",
1095 msb->free_block_count[zone], pos);
1096
1097 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1098 msb->block_count, pba);
1099 for (i = 0; i < pos; ++i)
1100 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1101 msb->block_count, pba + 1);
1102
1103 dbg_verbose("result of the free blocks scan: pba %d", pba);
1104
1105 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1106 pr_err("BUG: cant get a free block");
1107 msb->read_only = true;
1108 return MS_BLOCK_INVALID;
1109 }
1110
1111 msb_mark_block_used(msb, pba);
1112 return pba;
1113}
1114
1115static int msb_update_block(struct msb_data *msb, u16 lba,
1116 struct scatterlist *sg, int offset)
1117{
1118 u16 pba, new_pba;
1119 int error, try;
1120
1121 pba = msb->lba_to_pba_table[lba];
1122 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1123
1124 if (pba != MS_BLOCK_INVALID) {
1125 dbg_verbose("setting the update flag on the block");
1126 msb_set_overwrite_flag(msb, pba, 0,
1127 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1128 }
1129
1130 for (try = 0; try < 3; try++) {
1131 new_pba = msb_get_free_block(msb,
1132 msb_get_zone_from_lba(lba));
1133
1134 if (new_pba == MS_BLOCK_INVALID) {
1135 error = -EIO;
1136 goto out;
1137 }
1138
1139 dbg_verbose("block update: writing updated block to the pba %d",
1140 new_pba);
1141 error = msb_write_block(msb, new_pba, lba, sg, offset);
1142 if (error == -EBADMSG) {
1143 msb_mark_bad(msb, new_pba);
1144 continue;
1145 }
1146
1147 if (error)
1148 goto out;
1149
1150 dbg_verbose("block update: erasing the old block");
1151 msb_erase_block(msb, pba);
1152 msb->lba_to_pba_table[lba] = new_pba;
1153 return 0;
1154 }
1155out:
1156 if (error) {
1157 pr_err("block update error after %d tries, switching to r/o mode", try);
1158 msb->read_only = true;
1159 }
1160 return error;
1161}
1162
1163/* Converts endiannes in the boot block for easy use */
1164static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1165{
1166 p->header.block_id = be16_to_cpu(p->header.block_id);
1167 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1168 p->entry.disabled_block.start_addr
1169 = be32_to_cpu(p->entry.disabled_block.start_addr);
1170 p->entry.disabled_block.data_size
1171 = be32_to_cpu(p->entry.disabled_block.data_size);
1172 p->entry.cis_idi.start_addr
1173 = be32_to_cpu(p->entry.cis_idi.start_addr);
1174 p->entry.cis_idi.data_size
1175 = be32_to_cpu(p->entry.cis_idi.data_size);
1176 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1177 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1178 p->attr.number_of_effective_blocks
1179 = be16_to_cpu(p->attr.number_of_effective_blocks);
1180 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1181 p->attr.memory_manufacturer_code
1182 = be16_to_cpu(p->attr.memory_manufacturer_code);
1183 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1184 p->attr.implemented_capacity
1185 = be16_to_cpu(p->attr.implemented_capacity);
1186 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1187 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1188}
1189
1190static int msb_read_boot_blocks(struct msb_data *msb)
1191{
1192 int pba = 0;
1193 struct scatterlist sg;
1194 struct ms_extra_data_register extra;
1195 struct ms_boot_page *page;
1196
1197 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1198 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1199 msb->boot_block_count = 0;
1200
1201 dbg_verbose("Start of a scan for the boot blocks");
1202
1203 if (!msb->boot_page) {
1204 page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
1205 if (!page)
1206 return -ENOMEM;
1207
1208 msb->boot_page = page;
1209 } else
1210 page = msb->boot_page;
1211
1212 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1213
1214 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1215
1216 sg_init_one(&sg, page, sizeof(*page));
1217 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1218 dbg("boot scan: can't read pba %d", pba);
1219 continue;
1220 }
1221
1222 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1223 dbg("management flag doesn't indicate boot block %d",
1224 pba);
1225 continue;
1226 }
1227
1228 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1229 dbg("the pba at %d doesn' contain boot block ID", pba);
1230 continue;
1231 }
1232
1233 msb_fix_boot_page_endianness(page);
1234 msb->boot_block_locations[msb->boot_block_count] = pba;
1235
1236 page++;
1237 msb->boot_block_count++;
1238
1239 if (msb->boot_block_count == 2)
1240 break;
1241 }
1242
1243 if (!msb->boot_block_count) {
1244 pr_err("media doesn't contain master page, aborting");
1245 return -EIO;
1246 }
1247
1248 dbg_verbose("End of scan for boot blocks");
1249 return 0;
1250}
1251
1252static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1253{
1254 struct ms_boot_page *boot_block;
1255 struct scatterlist sg;
1256 u16 *buffer = NULL;
1257 int offset = 0;
1258 int i, error = 0;
1259 int data_size, data_offset, page, page_offset, size_to_read;
1260 u16 pba;
1261
1262 BUG_ON(block_nr > 1);
1263 boot_block = &msb->boot_page[block_nr];
1264 pba = msb->boot_block_locations[block_nr];
1265
1266 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1267 return -EINVAL;
1268
1269 data_size = boot_block->entry.disabled_block.data_size;
1270 data_offset = sizeof(struct ms_boot_page) +
1271 boot_block->entry.disabled_block.start_addr;
1272 if (!data_size)
1273 return 0;
1274
1275 page = data_offset / msb->page_size;
1276 page_offset = data_offset % msb->page_size;
1277 size_to_read =
1278 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1279 msb->page_size;
1280
1281 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1282 pba, data_offset, data_size);
1283
1284 buffer = kzalloc(size_to_read, GFP_KERNEL);
1285 if (!buffer)
1286 return -ENOMEM;
1287
1288 /* Read the buffer */
1289 sg_init_one(&sg, buffer, size_to_read);
1290
1291 while (offset < size_to_read) {
1292 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1293 if (error)
1294 goto out;
1295
1296 page++;
1297 offset += msb->page_size;
1298
1299 if (page == msb->pages_in_block) {
1300 pr_err(
1301 "bad block table extends beyond the boot block");
1302 break;
1303 }
1304 }
1305
1306 /* Process the bad block table */
1307 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1308
1309 u16 bad_block = be16_to_cpu(buffer[i]);
1310
1311 if (bad_block >= msb->block_count) {
1312 dbg("bad block table contains invalid block %d",
1313 bad_block);
1314 continue;
1315 }
1316
1317 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1318 dbg("duplicate bad block %d in the table",
1319 bad_block);
1320 continue;
1321 }
1322
1323 dbg("block %d is marked as factory bad", bad_block);
1324 msb_mark_block_used(msb, bad_block);
1325 }
1326out:
1327 kfree(buffer);
1328 return error;
1329}
1330
1331static int msb_ftl_initialize(struct msb_data *msb)
1332{
1333 int i;
1334
1335 if (msb->ftl_initialized)
1336 return 0;
1337
1338 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1339 msb->logical_block_count = msb->zone_count * 496 - 2;
1340
1341 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1342 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1343 msb->lba_to_pba_table =
1344 kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
1345
1346 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1347 !msb->erased_blocks_bitmap) {
1348 kfree(msb->used_blocks_bitmap);
1349 kfree(msb->lba_to_pba_table);
1350 kfree(msb->erased_blocks_bitmap);
1351 return -ENOMEM;
1352 }
1353
1354 for (i = 0; i < msb->zone_count; i++)
1355 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1356
1357 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1358 msb->logical_block_count * sizeof(u16));
1359
1360 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1361 msb->zone_count, msb->logical_block_count);
1362
1363 msb->ftl_initialized = true;
1364 return 0;
1365}
1366
1367static int msb_ftl_scan(struct msb_data *msb)
1368{
1369 u16 pba, lba, other_block;
1370 u8 overwrite_flag, management_flag, other_overwrite_flag;
1371 int error;
1372 struct ms_extra_data_register extra;
1373 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1374
1375 if (!overwrite_flags)
1376 return -ENOMEM;
1377
1378 dbg("Start of media scanning");
1379 for (pba = 0; pba < msb->block_count; pba++) {
1380
1381 if (pba == msb->boot_block_locations[0] ||
1382 pba == msb->boot_block_locations[1]) {
1383 dbg_verbose("pba %05d -> [boot block]", pba);
1384 msb_mark_block_used(msb, pba);
1385 continue;
1386 }
1387
1388 if (test_bit(pba, msb->used_blocks_bitmap)) {
1389 dbg_verbose("pba %05d -> [factory bad]", pba);
1390 continue;
1391 }
1392
1393 memset(&extra, 0, sizeof(extra));
1394 error = msb_read_oob(msb, pba, 0, &extra);
1395
1396 /* can't trust the page if we can't read the oob */
1397 if (error == -EBADMSG) {
1398 pr_notice(
1399 "oob of pba %d damaged, will try to erase it", pba);
1400 msb_mark_block_used(msb, pba);
1401 msb_erase_block(msb, pba);
1402 continue;
1403 } else if (error) {
1404 pr_err("unknown error %d on read of oob of pba %d - aborting",
1405 error, pba);
1406
1407 kfree(overwrite_flags);
1408 return error;
1409 }
1410
1411 lba = be16_to_cpu(extra.logical_address);
1412 management_flag = extra.management_flag;
1413 overwrite_flag = extra.overwrite_flag;
1414 overwrite_flags[pba] = overwrite_flag;
1415
1416 /* Skip bad blocks */
1417 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1418 dbg("pba %05d -> [BAD]", pba);
1419 msb_mark_block_used(msb, pba);
1420 continue;
1421 }
1422
1423 /* Skip system/drm blocks */
1424 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1425 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1426 dbg("pba %05d -> [reserved management flag %02x]",
1427 pba, management_flag);
1428 msb_mark_block_used(msb, pba);
1429 continue;
1430 }
1431
1432 /* Erase temporary tables */
1433 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1434 dbg("pba %05d -> [temp table] - will erase", pba);
1435
1436 msb_mark_block_used(msb, pba);
1437 msb_erase_block(msb, pba);
1438 continue;
1439 }
1440
1441 if (lba == MS_BLOCK_INVALID) {
1442 dbg_verbose("pba %05d -> [free]", pba);
1443 continue;
1444 }
1445
1446 msb_mark_block_used(msb, pba);
1447
1448 /* Block has LBA not according to zoning*/
1449 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1450 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1451 pba, lba);
1452 msb_erase_block(msb, pba);
1453 continue;
1454 }
1455
1456 /* No collisions - great */
1457 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1458 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1459 msb->lba_to_pba_table[lba] = pba;
1460 continue;
1461 }
1462
1463 other_block = msb->lba_to_pba_table[lba];
1464 other_overwrite_flag = overwrite_flags[other_block];
1465
1466 pr_notice("Collision between pba %d and pba %d",
1467 pba, other_block);
1468
1469 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1470 pr_notice("pba %d is marked as stable, use it", pba);
1471 msb_erase_block(msb, other_block);
1472 msb->lba_to_pba_table[lba] = pba;
1473 continue;
1474 }
1475
1476 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1477 pr_notice("pba %d is marked as stable, use it",
1478 other_block);
1479 msb_erase_block(msb, pba);
1480 continue;
1481 }
1482
1483 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1484 pba, other_block, other_block);
1485
1486 msb_erase_block(msb, other_block);
1487 msb->lba_to_pba_table[lba] = pba;
1488 }
1489
1490 dbg("End of media scanning");
1491 kfree(overwrite_flags);
1492 return 0;
1493}
1494
1495static void msb_cache_flush_timer(unsigned long data)
1496{
1497 struct msb_data *msb = (struct msb_data *)data;
1498 msb->need_flush_cache = true;
1499 queue_work(msb->io_queue, &msb->io_work);
1500}
1501
1502
1503static void msb_cache_discard(struct msb_data *msb)
1504{
1505 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1506 return;
1507
1508 del_timer_sync(&msb->cache_flush_timer);
1509
1510 dbg_verbose("Discarding the write cache");
1511 msb->cache_block_lba = MS_BLOCK_INVALID;
1512 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1513}
1514
1515static int msb_cache_init(struct msb_data *msb)
1516{
1517 setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
1518 (unsigned long)msb);
1519
1520 if (!msb->cache)
1521 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1522 if (!msb->cache)
1523 return -ENOMEM;
1524
1525 msb_cache_discard(msb);
1526 return 0;
1527}
1528
1529static int msb_cache_flush(struct msb_data *msb)
1530{
1531 struct scatterlist sg;
1532 struct ms_extra_data_register extra;
1533 int page, offset, error;
1534 u16 pba, lba;
1535
1536 if (msb->read_only)
1537 return -EROFS;
1538
1539 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1540 return 0;
1541
1542 lba = msb->cache_block_lba;
1543 pba = msb->lba_to_pba_table[lba];
1544
1545 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1546 pba, msb->cache_block_lba);
1547
1548 sg_init_one(&sg, msb->cache , msb->block_size);
1549
1550 /* Read all missing pages in cache */
1551 for (page = 0; page < msb->pages_in_block; page++) {
1552
1553 if (test_bit(page, &msb->valid_cache_bitmap))
1554 continue;
1555
1556 offset = page * msb->page_size;
1557
1558 dbg_verbose("reading non-present sector %d of cache block %d",
1559 page, lba);
1560 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1561
1562 /* Bad pages are copied with 00 page status */
1563 if (error == -EBADMSG) {
1564 pr_err("read error on sector %d, contents probably damaged", page);
1565 continue;
1566 }
1567
1568 if (error)
1569 return error;
1570
1571 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1572 MEMSTICK_OV_PG_NORMAL) {
1573 dbg("page %d is marked as bad", page);
1574 continue;
1575 }
1576
1577 set_bit(page, &msb->valid_cache_bitmap);
1578 }
1579
1580 /* Write the cache now */
1581 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1582 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1583
1584 /* Mark invalid pages */
1585 if (!error) {
1586 for (page = 0; page < msb->pages_in_block; page++) {
1587
1588 if (test_bit(page, &msb->valid_cache_bitmap))
1589 continue;
1590
1591 dbg("marking page %d as containing damaged data",
1592 page);
1593 msb_set_overwrite_flag(msb,
1594 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1595 }
1596 }
1597
1598 msb_cache_discard(msb);
1599 return error;
1600}
1601
1602static int msb_cache_write(struct msb_data *msb, int lba,
1603 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1604{
1605 int error;
1606 struct scatterlist sg_tmp[10];
1607
1608 if (msb->read_only)
1609 return -EROFS;
1610
1611 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1612 lba != msb->cache_block_lba)
1613 if (add_to_cache_only)
1614 return 0;
1615
1616 /* If we need to write different block */
1617 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1618 lba != msb->cache_block_lba) {
1619 dbg_verbose("first flush the cache");
1620 error = msb_cache_flush(msb);
1621 if (error)
1622 return error;
1623 }
1624
1625 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1626 msb->cache_block_lba = lba;
1627 mod_timer(&msb->cache_flush_timer,
1628 jiffies + msecs_to_jiffies(cache_flush_timeout));
1629 }
1630
1631 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1632
1633 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1634 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1635
1636 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1637 msb->cache + page * msb->page_size, msb->page_size);
1638
1639 set_bit(page, &msb->valid_cache_bitmap);
1640 return 0;
1641}
1642
1643static int msb_cache_read(struct msb_data *msb, int lba,
1644 int page, struct scatterlist *sg, int offset)
1645{
1646 int pba = msb->lba_to_pba_table[lba];
1647 struct scatterlist sg_tmp[10];
1648 int error = 0;
1649
1650 if (lba == msb->cache_block_lba &&
1651 test_bit(page, &msb->valid_cache_bitmap)) {
1652
1653 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1654 lba, pba, page);
1655
1656 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1657 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1658 offset, msb->page_size);
1659 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1660 msb->cache + msb->page_size * page,
1661 msb->page_size);
1662 } else {
1663 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1664 lba, pba, page);
1665
1666 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1667 if (error)
1668 return error;
1669
1670 msb_cache_write(msb, lba, page, true, sg, offset);
1671 }
1672 return error;
1673}
1674
1675/* Emulated geometry table
1676 * This table content isn't that importaint,
1677 * One could put here different values, providing that they still
1678 * cover whole disk.
1679 * 64 MB entry is what windows reports for my 64M memstick */
1680
1681static const struct chs_entry chs_table[] = {
1682/* size sectors cylynders heads */
1683 { 4, 16, 247, 2 },
1684 { 8, 16, 495, 2 },
1685 { 16, 16, 495, 4 },
1686 { 32, 16, 991, 4 },
1687 { 64, 16, 991, 8 },
1688 {128, 16, 991, 16 },
1689 { 0 }
1690};
1691
1692/* Load information about the card */
1693static int msb_init_card(struct memstick_dev *card)
1694{
1695 struct msb_data *msb = memstick_get_drvdata(card);
1696 struct memstick_host *host = card->host;
1697 struct ms_boot_page *boot_block;
1698 int error = 0, i, raw_size_in_megs;
1699
1700 msb->caps = 0;
1701
1702 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1703 card->id.class <= MEMSTICK_CLASS_ROM)
1704 msb->read_only = true;
1705
1706 msb->state = -1;
1707 error = msb_reset(msb, false);
1708 if (error)
1709 return error;
1710
1711 /* Due to a bug in Jmicron driver written by Alex Dubov,
1712 its serial mode barely works,
1713 so we switch to parallel mode right away */
1714 if (host->caps & MEMSTICK_CAP_PAR4)
1715 msb_switch_to_parallel(msb);
1716
1717 msb->page_size = sizeof(struct ms_boot_page);
1718
1719 /* Read the boot page */
1720 error = msb_read_boot_blocks(msb);
1721 if (error)
1722 return -EIO;
1723
1724 boot_block = &msb->boot_page[0];
1725
1726 /* Save intersting attributes from boot page */
1727 msb->block_count = boot_block->attr.number_of_blocks;
1728 msb->page_size = boot_block->attr.page_size;
1729
1730 msb->pages_in_block = boot_block->attr.block_size * 2;
1731 msb->block_size = msb->page_size * msb->pages_in_block;
1732
1733 if (msb->page_size > PAGE_SIZE) {
1734 /* this isn't supported by linux at all, anyway*/
1735 dbg("device page %d size isn't supported", msb->page_size);
1736 return -EINVAL;
1737 }
1738
1739 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1740 if (!msb->block_buffer)
1741 return -ENOMEM;
1742
1743 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1744
1745 for (i = 0; chs_table[i].size; i++) {
1746
1747 if (chs_table[i].size != raw_size_in_megs)
1748 continue;
1749
1750 msb->geometry.cylinders = chs_table[i].cyl;
1751 msb->geometry.heads = chs_table[i].head;
1752 msb->geometry.sectors = chs_table[i].sec;
1753 break;
1754 }
1755
1756 if (boot_block->attr.transfer_supporting == 1)
1757 msb->caps |= MEMSTICK_CAP_PAR4;
1758
1759 if (boot_block->attr.device_type & 0x03)
1760 msb->read_only = true;
1761
1762 dbg("Total block count = %d", msb->block_count);
1763 dbg("Each block consists of %d pages", msb->pages_in_block);
1764 dbg("Page size = %d bytes", msb->page_size);
1765 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1766 dbg("Read only: %d", msb->read_only);
1767
1768#if 0
1769 /* Now we can switch the interface */
1770 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1771 msb_switch_to_parallel(msb);
1772#endif
1773
1774 error = msb_cache_init(msb);
1775 if (error)
1776 return error;
1777
1778 error = msb_ftl_initialize(msb);
1779 if (error)
1780 return error;
1781
1782
1783 /* Read the bad block table */
1784 error = msb_read_bad_block_table(msb, 0);
1785
1786 if (error && error != -ENOMEM) {
1787 dbg("failed to read bad block table from primary boot block, trying from backup");
1788 error = msb_read_bad_block_table(msb, 1);
1789 }
1790
1791 if (error)
1792 return error;
1793
1794 /* *drum roll* Scan the media */
1795 error = msb_ftl_scan(msb);
1796 if (error) {
1797 pr_err("Scan of media failed");
1798 return error;
1799 }
1800
1801 return 0;
1802
1803}
1804
1805static int msb_do_write_request(struct msb_data *msb, int lba,
1806 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1807{
1808 int error = 0;
1809 off_t offset = 0;
1810 *sucessfuly_written = 0;
1811
1812 while (offset < len) {
1813 if (page == 0 && len - offset >= msb->block_size) {
1814
1815 if (msb->cache_block_lba == lba)
1816 msb_cache_discard(msb);
1817
1818 dbg_verbose("Writing whole lba %d", lba);
1819 error = msb_update_block(msb, lba, sg, offset);
1820 if (error)
1821 return error;
1822
1823 offset += msb->block_size;
1824 *sucessfuly_written += msb->block_size;
1825 lba++;
1826 continue;
1827 }
1828
1829 error = msb_cache_write(msb, lba, page, false, sg, offset);
1830 if (error)
1831 return error;
1832
1833 offset += msb->page_size;
1834 *sucessfuly_written += msb->page_size;
1835
1836 page++;
1837 if (page == msb->pages_in_block) {
1838 page = 0;
1839 lba++;
1840 }
1841 }
1842 return 0;
1843}
1844
1845static int msb_do_read_request(struct msb_data *msb, int lba,
1846 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1847{
1848 int error = 0;
1849 int offset = 0;
1850 *sucessfuly_read = 0;
1851
1852 while (offset < len) {
1853
1854 error = msb_cache_read(msb, lba, page, sg, offset);
1855 if (error)
1856 return error;
1857
1858 offset += msb->page_size;
1859 *sucessfuly_read += msb->page_size;
1860
1861 page++;
1862 if (page == msb->pages_in_block) {
1863 page = 0;
1864 lba++;
1865 }
1866 }
1867 return 0;
1868}
1869
1870static void msb_io_work(struct work_struct *work)
1871{
1872 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1873 int page, error, len;
1874 sector_t lba;
1875 unsigned long flags;
1876 struct scatterlist *sg = msb->prealloc_sg;
1877
1878 dbg_verbose("IO: work started");
1879
1880 while (1) {
1881 spin_lock_irqsave(&msb->q_lock, flags);
1882
1883 if (msb->need_flush_cache) {
1884 msb->need_flush_cache = false;
1885 spin_unlock_irqrestore(&msb->q_lock, flags);
1886 msb_cache_flush(msb);
1887 continue;
1888 }
1889
1890 if (!msb->req) {
1891 msb->req = blk_fetch_request(msb->queue);
1892 if (!msb->req) {
1893 dbg_verbose("IO: no more requests exiting");
1894 spin_unlock_irqrestore(&msb->q_lock, flags);
1895 return;
1896 }
1897 }
1898
1899 spin_unlock_irqrestore(&msb->q_lock, flags);
1900
1901 /* If card was removed meanwhile */
1902 if (!msb->req)
1903 return;
1904
1905 /* process the request */
1906 dbg_verbose("IO: processing new request");
1907 blk_rq_map_sg(msb->queue, msb->req, sg);
1908
1909 lba = blk_rq_pos(msb->req);
1910
1911 sector_div(lba, msb->page_size / 512);
1912 page = sector_div(lba, msb->pages_in_block);
1913
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(msb->req), &len);
1917 else
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(msb->req), &len);
1920
1921 spin_lock_irqsave(&msb->q_lock, flags);
1922
1923 if (len)
1924 if (!__blk_end_request(msb->req, 0, len))
1925 msb->req = NULL;
1926
1927 if (error && msb->req) {
1928 dbg_verbose("IO: ending one sector of the request with error");
1929 if (!__blk_end_request(msb->req, error, msb->page_size))
1930 msb->req = NULL;
1931 }
1932
1933 if (msb->req)
1934 dbg_verbose("IO: request still pending");
1935
1936 spin_unlock_irqrestore(&msb->q_lock, flags);
1937 }
1938}
1939
1940static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1941static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1942
1943static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1944{
1945 struct gendisk *disk = bdev->bd_disk;
1946 struct msb_data *msb = disk->private_data;
1947
1948 dbg_verbose("block device open");
1949
1950 mutex_lock(&msb_disk_lock);
1951
1952 if (msb && msb->card)
1953 msb->usage_count++;
1954
1955 mutex_unlock(&msb_disk_lock);
1956 return 0;
1957}
1958
1959static void msb_data_clear(struct msb_data *msb)
1960{
1961 kfree(msb->boot_page);
1962 kfree(msb->used_blocks_bitmap);
1963 kfree(msb->lba_to_pba_table);
1964 kfree(msb->cache);
1965 msb->card = NULL;
1966}
1967
1968static int msb_disk_release(struct gendisk *disk)
1969{
1970 struct msb_data *msb = disk->private_data;
1971
1972 dbg_verbose("block device release");
1973 mutex_lock(&msb_disk_lock);
1974
1975 if (msb) {
1976 if (msb->usage_count)
1977 msb->usage_count--;
1978
1979 if (!msb->usage_count) {
1980 disk->private_data = NULL;
1981 idr_remove(&msb_disk_idr, msb->disk_id);
1982 put_disk(disk);
1983 kfree(msb);
1984 }
1985 }
1986 mutex_unlock(&msb_disk_lock);
1987 return 0;
1988}
1989
1990static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1991{
1992 msb_disk_release(disk);
1993}
1994
1995static int msb_bd_getgeo(struct block_device *bdev,
1996 struct hd_geometry *geo)
1997{
1998 struct msb_data *msb = bdev->bd_disk->private_data;
1999 *geo = msb->geometry;
2000 return 0;
2001}
2002
2003static int msb_prepare_req(struct request_queue *q, struct request *req)
2004{
2005 if (req->cmd_type != REQ_TYPE_FS) {
2006 blk_dump_rq_flags(req, "MS unsupported request");
2007 return BLKPREP_KILL;
2008 }
2009 req->rq_flags |= RQF_DONTPREP;
2010 return BLKPREP_OK;
2011}
2012
2013static void msb_submit_req(struct request_queue *q)
2014{
2015 struct memstick_dev *card = q->queuedata;
2016 struct msb_data *msb = memstick_get_drvdata(card);
2017 struct request *req = NULL;
2018
2019 dbg_verbose("Submit request");
2020
2021 if (msb->card_dead) {
2022 dbg("Refusing requests on removed card");
2023
2024 WARN_ON(!msb->io_queue_stopped);
2025
2026 while ((req = blk_fetch_request(q)) != NULL)
2027 __blk_end_request_all(req, -ENODEV);
2028 return;
2029 }
2030
2031 if (msb->req)
2032 return;
2033
2034 if (!msb->io_queue_stopped)
2035 queue_work(msb->io_queue, &msb->io_work);
2036}
2037
2038static int msb_check_card(struct memstick_dev *card)
2039{
2040 struct msb_data *msb = memstick_get_drvdata(card);
2041 return (msb->card_dead == 0);
2042}
2043
2044static void msb_stop(struct memstick_dev *card)
2045{
2046 struct msb_data *msb = memstick_get_drvdata(card);
2047 unsigned long flags;
2048
2049 dbg("Stopping all msblock IO");
2050
2051 spin_lock_irqsave(&msb->q_lock, flags);
2052 blk_stop_queue(msb->queue);
2053 msb->io_queue_stopped = true;
2054 spin_unlock_irqrestore(&msb->q_lock, flags);
2055
2056 del_timer_sync(&msb->cache_flush_timer);
2057 flush_workqueue(msb->io_queue);
2058
2059 if (msb->req) {
2060 spin_lock_irqsave(&msb->q_lock, flags);
2061 blk_requeue_request(msb->queue, msb->req);
2062 msb->req = NULL;
2063 spin_unlock_irqrestore(&msb->q_lock, flags);
2064 }
2065
2066}
2067
2068static void msb_start(struct memstick_dev *card)
2069{
2070 struct msb_data *msb = memstick_get_drvdata(card);
2071 unsigned long flags;
2072
2073 dbg("Resuming IO from msblock");
2074
2075 msb_invalidate_reg_window(msb);
2076
2077 spin_lock_irqsave(&msb->q_lock, flags);
2078 if (!msb->io_queue_stopped || msb->card_dead) {
2079 spin_unlock_irqrestore(&msb->q_lock, flags);
2080 return;
2081 }
2082 spin_unlock_irqrestore(&msb->q_lock, flags);
2083
2084 /* Kick cache flush anyway, its harmless */
2085 msb->need_flush_cache = true;
2086 msb->io_queue_stopped = false;
2087
2088 spin_lock_irqsave(&msb->q_lock, flags);
2089 blk_start_queue(msb->queue);
2090 spin_unlock_irqrestore(&msb->q_lock, flags);
2091
2092 queue_work(msb->io_queue, &msb->io_work);
2093
2094}
2095
2096static const struct block_device_operations msb_bdops = {
2097 .open = msb_bd_open,
2098 .release = msb_bd_release,
2099 .getgeo = msb_bd_getgeo,
2100 .owner = THIS_MODULE
2101};
2102
2103/* Registers the block device */
2104static int msb_init_disk(struct memstick_dev *card)
2105{
2106 struct msb_data *msb = memstick_get_drvdata(card);
2107 struct memstick_host *host = card->host;
2108 int rc;
2109 u64 limit = BLK_BOUNCE_HIGH;
2110 unsigned long capacity;
2111
2112 if (host->dev.dma_mask && *(host->dev.dma_mask))
2113 limit = *(host->dev.dma_mask);
2114
2115 mutex_lock(&msb_disk_lock);
2116 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2117 mutex_unlock(&msb_disk_lock);
2118
2119 if (msb->disk_id < 0)
2120 return msb->disk_id;
2121
2122 msb->disk = alloc_disk(0);
2123 if (!msb->disk) {
2124 rc = -ENOMEM;
2125 goto out_release_id;
2126 }
2127
2128 msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2129 if (!msb->queue) {
2130 rc = -ENOMEM;
2131 goto out_put_disk;
2132 }
2133
2134 msb->queue->queuedata = card;
2135 blk_queue_prep_rq(msb->queue, msb_prepare_req);
2136
2137 blk_queue_bounce_limit(msb->queue, limit);
2138 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2139 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2140 blk_queue_max_segment_size(msb->queue,
2141 MS_BLOCK_MAX_PAGES * msb->page_size);
2142 blk_queue_logical_block_size(msb->queue, msb->page_size);
2143
2144 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2145 msb->disk->fops = &msb_bdops;
2146 msb->disk->private_data = msb;
2147 msb->disk->queue = msb->queue;
2148 msb->disk->flags |= GENHD_FL_EXT_DEVT;
2149
2150 capacity = msb->pages_in_block * msb->logical_block_count;
2151 capacity *= (msb->page_size / 512);
2152 set_capacity(msb->disk, capacity);
2153 dbg("Set total disk size to %lu sectors", capacity);
2154
2155 msb->usage_count = 1;
2156 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2157 INIT_WORK(&msb->io_work, msb_io_work);
2158 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2159
2160 if (msb->read_only)
2161 set_disk_ro(msb->disk, 1);
2162
2163 msb_start(card);
2164 device_add_disk(&card->dev, msb->disk);
2165 dbg("Disk added");
2166 return 0;
2167
2168out_put_disk:
2169 put_disk(msb->disk);
2170out_release_id:
2171 mutex_lock(&msb_disk_lock);
2172 idr_remove(&msb_disk_idr, msb->disk_id);
2173 mutex_unlock(&msb_disk_lock);
2174 return rc;
2175}
2176
2177static int msb_probe(struct memstick_dev *card)
2178{
2179 struct msb_data *msb;
2180 int rc = 0;
2181
2182 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2183 if (!msb)
2184 return -ENOMEM;
2185 memstick_set_drvdata(card, msb);
2186 msb->card = card;
2187 spin_lock_init(&msb->q_lock);
2188
2189 rc = msb_init_card(card);
2190 if (rc)
2191 goto out_free;
2192
2193 rc = msb_init_disk(card);
2194 if (!rc) {
2195 card->check = msb_check_card;
2196 card->stop = msb_stop;
2197 card->start = msb_start;
2198 return 0;
2199 }
2200out_free:
2201 memstick_set_drvdata(card, NULL);
2202 msb_data_clear(msb);
2203 kfree(msb);
2204 return rc;
2205}
2206
2207static void msb_remove(struct memstick_dev *card)
2208{
2209 struct msb_data *msb = memstick_get_drvdata(card);
2210 unsigned long flags;
2211
2212 if (!msb->io_queue_stopped)
2213 msb_stop(card);
2214
2215 dbg("Removing the disk device");
2216
2217 /* Take care of unhandled + new requests from now on */
2218 spin_lock_irqsave(&msb->q_lock, flags);
2219 msb->card_dead = true;
2220 blk_start_queue(msb->queue);
2221 spin_unlock_irqrestore(&msb->q_lock, flags);
2222
2223 /* Remove the disk */
2224 del_gendisk(msb->disk);
2225 blk_cleanup_queue(msb->queue);
2226 msb->queue = NULL;
2227
2228 mutex_lock(&msb_disk_lock);
2229 msb_data_clear(msb);
2230 mutex_unlock(&msb_disk_lock);
2231
2232 msb_disk_release(msb->disk);
2233 memstick_set_drvdata(card, NULL);
2234}
2235
2236#ifdef CONFIG_PM
2237
2238static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2239{
2240 msb_stop(card);
2241 return 0;
2242}
2243
2244static int msb_resume(struct memstick_dev *card)
2245{
2246 struct msb_data *msb = memstick_get_drvdata(card);
2247 struct msb_data *new_msb = NULL;
2248 bool card_dead = true;
2249
2250#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2251 msb->card_dead = true;
2252 return 0;
2253#endif
2254 mutex_lock(&card->host->lock);
2255
2256 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2257 if (!new_msb)
2258 goto out;
2259
2260 new_msb->card = card;
2261 memstick_set_drvdata(card, new_msb);
2262 spin_lock_init(&new_msb->q_lock);
2263 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2264
2265 if (msb_init_card(card))
2266 goto out;
2267
2268 if (msb->block_size != new_msb->block_size)
2269 goto out;
2270
2271 if (memcmp(msb->boot_page, new_msb->boot_page,
2272 sizeof(struct ms_boot_page)))
2273 goto out;
2274
2275 if (msb->logical_block_count != new_msb->logical_block_count ||
2276 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2277 msb->logical_block_count))
2278 goto out;
2279
2280 if (msb->block_count != new_msb->block_count ||
2281 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2282 msb->block_count / 8))
2283 goto out;
2284
2285 card_dead = false;
2286out:
2287 if (card_dead)
2288 dbg("Card was removed/replaced during suspend");
2289
2290 msb->card_dead = card_dead;
2291 memstick_set_drvdata(card, msb);
2292
2293 if (new_msb) {
2294 msb_data_clear(new_msb);
2295 kfree(new_msb);
2296 }
2297
2298 msb_start(card);
2299 mutex_unlock(&card->host->lock);
2300 return 0;
2301}
2302#else
2303
2304#define msb_suspend NULL
2305#define msb_resume NULL
2306
2307#endif /* CONFIG_PM */
2308
2309static struct memstick_device_id msb_id_tbl[] = {
2310 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2311 MEMSTICK_CLASS_FLASH},
2312
2313 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2314 MEMSTICK_CLASS_ROM},
2315
2316 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2317 MEMSTICK_CLASS_RO},
2318
2319 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2320 MEMSTICK_CLASS_WP},
2321
2322 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2323 MEMSTICK_CLASS_DUO},
2324 {}
2325};
2326MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2327
2328
2329static struct memstick_driver msb_driver = {
2330 .driver = {
2331 .name = DRIVER_NAME,
2332 .owner = THIS_MODULE
2333 },
2334 .id_table = msb_id_tbl,
2335 .probe = msb_probe,
2336 .remove = msb_remove,
2337 .suspend = msb_suspend,
2338 .resume = msb_resume
2339};
2340
2341static int __init msb_init(void)
2342{
2343 int rc = memstick_register_driver(&msb_driver);
2344 if (rc)
2345 pr_err("failed to register memstick driver (error %d)\n", rc);
2346
2347 return rc;
2348}
2349
2350static void __exit msb_exit(void)
2351{
2352 memstick_unregister_driver(&msb_driver);
2353 idr_destroy(&msb_disk_idr);
2354}
2355
2356module_init(msb_init);
2357module_exit(msb_exit);
2358
2359module_param(cache_flush_timeout, int, S_IRUGO);
2360MODULE_PARM_DESC(cache_flush_timeout,
2361 "Cache flush timeout in msec (1000 default)");
2362module_param(debug, int, S_IRUGO | S_IWUSR);
2363MODULE_PARM_DESC(debug, "Debug level (0-2)");
2364
2365module_param(verify_writes, bool, S_IRUGO);
2366MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2367
2368MODULE_LICENSE("GPL");
2369MODULE_AUTHOR("Maxim Levitsky");
2370MODULE_DESCRIPTION("Sony MemoryStick block device driver");