Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ms_block.c - Sony MemoryStick (legacy) storage support
4
5 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
6 *
7 * Minor portions of the driver were copied from mspro_block.c which is
8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
9 */
10#define DRIVER_NAME "ms_block"
11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
12
13#include <linux/module.h>
14#include <linux/blk-mq.h>
15#include <linux/memstick.h>
16#include <linux/idr.h>
17#include <linux/hdreg.h>
18#include <linux/delay.h>
19#include <linux/slab.h>
20#include <linux/random.h>
21#include <linux/bitmap.h>
22#include <linux/scatterlist.h>
23#include <linux/jiffies.h>
24#include <linux/workqueue.h>
25#include <linux/mutex.h>
26#include "ms_block.h"
27
28static int debug;
29static int cache_flush_timeout = 1000;
30static bool verify_writes;
31
32/*
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
35 */
36static size_t msb_sg_copy(struct scatterlist *sg_from,
37 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
38{
39 size_t copied = 0;
40
41 while (offset > 0) {
42 if (offset >= sg_from->length) {
43 if (sg_is_last(sg_from))
44 return 0;
45
46 offset -= sg_from->length;
47 sg_from = sg_next(sg_from);
48 continue;
49 }
50
51 copied = min(len, sg_from->length - offset);
52 sg_set_page(sg_to, sg_page(sg_from),
53 copied, sg_from->offset + offset);
54
55 len -= copied;
56 offset = 0;
57
58 if (sg_is_last(sg_from) || !len)
59 goto out;
60
61 sg_to = sg_next(sg_to);
62 to_nents--;
63 sg_from = sg_next(sg_from);
64 }
65
66 while (len > sg_from->length && to_nents--) {
67 len -= sg_from->length;
68 copied += sg_from->length;
69
70 sg_set_page(sg_to, sg_page(sg_from),
71 sg_from->length, sg_from->offset);
72
73 if (sg_is_last(sg_from) || !len)
74 goto out;
75
76 sg_from = sg_next(sg_from);
77 sg_to = sg_next(sg_to);
78 }
79
80 if (len && to_nents) {
81 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
82 copied += len;
83 }
84out:
85 sg_mark_end(sg_to);
86 return copied;
87}
88
89/*
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
93 */
94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
95 size_t offset, u8 *buffer, size_t len)
96{
97 int retval = 0, cmplen;
98 struct sg_mapping_iter miter;
99
100 sg_miter_start(&miter, sg, sg_nents(sg),
101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
102
103 while (sg_miter_next(&miter) && len > 0) {
104 if (offset >= miter.length) {
105 offset -= miter.length;
106 continue;
107 }
108
109 cmplen = min(miter.length - offset, len);
110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
111 if (retval)
112 break;
113
114 buffer += cmplen;
115 len -= cmplen;
116 offset = 0;
117 }
118
119 if (!retval && len)
120 retval = -1;
121
122 sg_miter_stop(&miter);
123 return retval;
124}
125
126
127/* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
132 */
133static int msb_get_zone_from_lba(int lba)
134{
135 if (lba < 494)
136 return 0;
137 return ((lba - 494) / 496) + 1;
138}
139
140/* Get zone of physical block. Trivial */
141static int msb_get_zone_from_pba(int pba)
142{
143 return pba / MS_BLOCKS_IN_ZONE;
144}
145
146/* Debug test to validate free block counts */
147static int msb_validate_used_block_bitmap(struct msb_data *msb)
148{
149 int total_free_blocks = 0;
150 int i;
151
152 if (!debug)
153 return 0;
154
155 for (i = 0; i < msb->zone_count; i++)
156 total_free_blocks += msb->free_block_count[i];
157
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
159 msb->block_count) == total_free_blocks)
160 return 0;
161
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb->read_only = true;
164 return -EINVAL;
165}
166
167/* Mark physical block as used */
168static void msb_mark_block_used(struct msb_data *msb, int pba)
169{
170 int zone = msb_get_zone_from_pba(pba);
171
172 if (test_bit(pba, msb->used_blocks_bitmap)) {
173 pr_err(
174 "BUG: attempt to mark already used pba %d as used", pba);
175 msb->read_only = true;
176 return;
177 }
178
179 if (msb_validate_used_block_bitmap(msb))
180 return;
181
182 /* No races because all IO is single threaded */
183 __set_bit(pba, msb->used_blocks_bitmap);
184 msb->free_block_count[zone]--;
185}
186
187/* Mark physical block as free */
188static void msb_mark_block_unused(struct msb_data *msb, int pba)
189{
190 int zone = msb_get_zone_from_pba(pba);
191
192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
194 msb->read_only = true;
195 return;
196 }
197
198 if (msb_validate_used_block_bitmap(msb))
199 return;
200
201 /* No races because all IO is single threaded */
202 __clear_bit(pba, msb->used_blocks_bitmap);
203 msb->free_block_count[zone]++;
204}
205
206/* Invalidate current register window */
207static void msb_invalidate_reg_window(struct msb_data *msb)
208{
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
213 msb->addr_valid = false;
214}
215
216/* Start a state machine */
217static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
218 (struct memstick_dev *card, struct memstick_request **req))
219{
220 struct memstick_dev *card = msb->card;
221
222 WARN_ON(msb->state != -1);
223 msb->int_polling = false;
224 msb->state = 0;
225 msb->exit_error = 0;
226
227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
228
229 card->next_request = state_func;
230 memstick_new_req(card->host);
231 wait_for_completion(&card->mrq_complete);
232
233 WARN_ON(msb->state != -1);
234 return msb->exit_error;
235}
236
237/* State machines call that to exit */
238static int msb_exit_state_machine(struct msb_data *msb, int error)
239{
240 WARN_ON(msb->state == -1);
241
242 msb->state = -1;
243 msb->exit_error = error;
244 msb->card->next_request = h_msb_default_bad;
245
246 /* Invalidate reg window on errors */
247 if (error)
248 msb_invalidate_reg_window(msb);
249
250 complete(&msb->card->mrq_complete);
251 return -ENXIO;
252}
253
254/* read INT register */
255static int msb_read_int_reg(struct msb_data *msb, long timeout)
256{
257 struct memstick_request *mrq = &msb->card->current_mrq;
258
259 WARN_ON(msb->state == -1);
260
261 if (!msb->int_polling) {
262 msb->int_timeout = jiffies +
263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
264 msb->int_polling = true;
265 } else if (time_after(jiffies, msb->int_timeout)) {
266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
267 return 0;
268 }
269
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
271 mrq->need_card_int && !mrq->error) {
272 mrq->data[0] = mrq->int_reg;
273 mrq->need_card_int = false;
274 return 0;
275 } else {
276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
277 return 1;
278 }
279}
280
281/* Read a register */
282static int msb_read_regs(struct msb_data *msb, int offset, int len)
283{
284 struct memstick_request *req = &msb->card->current_mrq;
285
286 if (msb->reg_addr.r_offset != offset ||
287 msb->reg_addr.r_length != len || !msb->addr_valid) {
288
289 msb->reg_addr.r_offset = offset;
290 msb->reg_addr.r_length = len;
291 msb->addr_valid = true;
292
293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
294 &msb->reg_addr, sizeof(msb->reg_addr));
295 return 0;
296 }
297
298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
299 return 1;
300}
301
302/* Write a card register */
303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
304{
305 struct memstick_request *req = &msb->card->current_mrq;
306
307 if (msb->reg_addr.w_offset != offset ||
308 msb->reg_addr.w_length != len || !msb->addr_valid) {
309
310 msb->reg_addr.w_offset = offset;
311 msb->reg_addr.w_length = len;
312 msb->addr_valid = true;
313
314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
315 &msb->reg_addr, sizeof(msb->reg_addr));
316 return 0;
317 }
318
319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
320 return 1;
321}
322
323/* Handler for absence of IO */
324static int h_msb_default_bad(struct memstick_dev *card,
325 struct memstick_request **mrq)
326{
327 return -ENXIO;
328}
329
330/*
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
334 */
335static int h_msb_read_page(struct memstick_dev *card,
336 struct memstick_request **out_mrq)
337{
338 struct msb_data *msb = memstick_get_drvdata(card);
339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
340 struct scatterlist sg[2];
341 u8 command, intreg;
342
343 if (mrq->error) {
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb, mrq->error);
346 }
347again:
348 switch (msb->state) {
349 case MSB_RP_SEND_BLOCK_ADDRESS:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 * the reg window, and thus it returns request for that.
352 * Then we stay in this state and retry
353 */
354 if (!msb_write_regs(msb,
355 offsetof(struct ms_register, param),
356 sizeof(struct ms_param_register),
357 (unsigned char *)&msb->regs.param))
358 return 0;
359
360 msb->state = MSB_RP_SEND_READ_COMMAND;
361 return 0;
362
363 case MSB_RP_SEND_READ_COMMAND:
364 command = MS_CMD_BLOCK_READ;
365 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
366 msb->state = MSB_RP_SEND_INT_REQ;
367 return 0;
368
369 case MSB_RP_SEND_INT_REQ:
370 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
371 /* If dont actually need to send the int read request (only in
372 * serial mode), then just fall through
373 */
374 if (msb_read_int_reg(msb, -1))
375 return 0;
376 fallthrough;
377
378 case MSB_RP_RECEIVE_INT_REQ_RESULT:
379 intreg = mrq->data[0];
380 msb->regs.status.interrupt = intreg;
381
382 if (intreg & MEMSTICK_INT_CMDNAK)
383 return msb_exit_state_machine(msb, -EIO);
384
385 if (!(intreg & MEMSTICK_INT_CED)) {
386 msb->state = MSB_RP_SEND_INT_REQ;
387 goto again;
388 }
389
390 msb->int_polling = false;
391 msb->state = (intreg & MEMSTICK_INT_ERR) ?
392 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
393 goto again;
394
395 case MSB_RP_SEND_READ_STATUS_REG:
396 /* read the status register to understand source of the INT_ERR */
397 if (!msb_read_regs(msb,
398 offsetof(struct ms_register, status),
399 sizeof(struct ms_status_register)))
400 return 0;
401
402 msb->state = MSB_RP_RECEIVE_STATUS_REG;
403 return 0;
404
405 case MSB_RP_RECEIVE_STATUS_REG:
406 msb->regs.status = *(struct ms_status_register *)mrq->data;
407 msb->state = MSB_RP_SEND_OOB_READ;
408 fallthrough;
409
410 case MSB_RP_SEND_OOB_READ:
411 if (!msb_read_regs(msb,
412 offsetof(struct ms_register, extra_data),
413 sizeof(struct ms_extra_data_register)))
414 return 0;
415
416 msb->state = MSB_RP_RECEIVE_OOB_READ;
417 return 0;
418
419 case MSB_RP_RECEIVE_OOB_READ:
420 msb->regs.extra_data =
421 *(struct ms_extra_data_register *) mrq->data;
422 msb->state = MSB_RP_SEND_READ_DATA;
423 fallthrough;
424
425 case MSB_RP_SEND_READ_DATA:
426 /* Skip that state if we only read the oob */
427 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
428 msb->state = MSB_RP_RECEIVE_READ_DATA;
429 goto again;
430 }
431
432 sg_init_table(sg, ARRAY_SIZE(sg));
433 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
434 msb->current_sg_offset,
435 msb->page_size);
436
437 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
438 msb->state = MSB_RP_RECEIVE_READ_DATA;
439 return 0;
440
441 case MSB_RP_RECEIVE_READ_DATA:
442 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
443 msb->current_sg_offset += msb->page_size;
444 return msb_exit_state_machine(msb, 0);
445 }
446
447 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
448 dbg("read_page: uncorrectable error");
449 return msb_exit_state_machine(msb, -EBADMSG);
450 }
451
452 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
453 dbg("read_page: correctable error");
454 msb->current_sg_offset += msb->page_size;
455 return msb_exit_state_machine(msb, -EUCLEAN);
456 } else {
457 dbg("read_page: INT error, but no status error bits");
458 return msb_exit_state_machine(msb, -EIO);
459 }
460 }
461
462 BUG();
463}
464
465/*
466 * Handler of writes of exactly one block.
467 * Takes address from msb->regs.param.
468 * Writes same extra data to blocks, also taken
469 * from msb->regs.extra
470 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
471 * device refuses to take the command or something else
472 */
473static int h_msb_write_block(struct memstick_dev *card,
474 struct memstick_request **out_mrq)
475{
476 struct msb_data *msb = memstick_get_drvdata(card);
477 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
478 struct scatterlist sg[2];
479 u8 intreg, command;
480
481 if (mrq->error)
482 return msb_exit_state_machine(msb, mrq->error);
483
484again:
485 switch (msb->state) {
486
487 /* HACK: Jmicon handling of TPCs between 8 and
488 * sizeof(memstick_request.data) is broken due to hardware
489 * bug in PIO mode that is used for these TPCs
490 * Therefore split the write
491 */
492
493 case MSB_WB_SEND_WRITE_PARAMS:
494 if (!msb_write_regs(msb,
495 offsetof(struct ms_register, param),
496 sizeof(struct ms_param_register),
497 &msb->regs.param))
498 return 0;
499
500 msb->state = MSB_WB_SEND_WRITE_OOB;
501 return 0;
502
503 case MSB_WB_SEND_WRITE_OOB:
504 if (!msb_write_regs(msb,
505 offsetof(struct ms_register, extra_data),
506 sizeof(struct ms_extra_data_register),
507 &msb->regs.extra_data))
508 return 0;
509 msb->state = MSB_WB_SEND_WRITE_COMMAND;
510 return 0;
511
512
513 case MSB_WB_SEND_WRITE_COMMAND:
514 command = MS_CMD_BLOCK_WRITE;
515 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
516 msb->state = MSB_WB_SEND_INT_REQ;
517 return 0;
518
519 case MSB_WB_SEND_INT_REQ:
520 msb->state = MSB_WB_RECEIVE_INT_REQ;
521 if (msb_read_int_reg(msb, -1))
522 return 0;
523 fallthrough;
524
525 case MSB_WB_RECEIVE_INT_REQ:
526 intreg = mrq->data[0];
527 msb->regs.status.interrupt = intreg;
528
529 /* errors mean out of here, and fast... */
530 if (intreg & (MEMSTICK_INT_CMDNAK))
531 return msb_exit_state_machine(msb, -EIO);
532
533 if (intreg & MEMSTICK_INT_ERR)
534 return msb_exit_state_machine(msb, -EBADMSG);
535
536
537 /* for last page we need to poll CED */
538 if (msb->current_page == msb->pages_in_block) {
539 if (intreg & MEMSTICK_INT_CED)
540 return msb_exit_state_machine(msb, 0);
541 msb->state = MSB_WB_SEND_INT_REQ;
542 goto again;
543
544 }
545
546 /* for non-last page we need BREQ before writing next chunk */
547 if (!(intreg & MEMSTICK_INT_BREQ)) {
548 msb->state = MSB_WB_SEND_INT_REQ;
549 goto again;
550 }
551
552 msb->int_polling = false;
553 msb->state = MSB_WB_SEND_WRITE_DATA;
554 fallthrough;
555
556 case MSB_WB_SEND_WRITE_DATA:
557 sg_init_table(sg, ARRAY_SIZE(sg));
558
559 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
560 msb->current_sg_offset,
561 msb->page_size) < msb->page_size)
562 return msb_exit_state_machine(msb, -EIO);
563
564 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
565 mrq->need_card_int = 1;
566 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
567 return 0;
568
569 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
570 msb->current_page++;
571 msb->current_sg_offset += msb->page_size;
572 msb->state = MSB_WB_SEND_INT_REQ;
573 goto again;
574 default:
575 BUG();
576 }
577
578 return 0;
579}
580
581/*
582 * This function is used to send simple IO requests to device that consist
583 * of register write + command
584 */
585static int h_msb_send_command(struct memstick_dev *card,
586 struct memstick_request **out_mrq)
587{
588 struct msb_data *msb = memstick_get_drvdata(card);
589 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
590 u8 intreg;
591
592 if (mrq->error) {
593 dbg("send_command: unknown error");
594 return msb_exit_state_machine(msb, mrq->error);
595 }
596again:
597 switch (msb->state) {
598
599 /* HACK: see h_msb_write_block */
600 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
601 if (!msb_write_regs(msb,
602 offsetof(struct ms_register, param),
603 sizeof(struct ms_param_register),
604 &msb->regs.param))
605 return 0;
606 msb->state = MSB_SC_SEND_WRITE_OOB;
607 return 0;
608
609 case MSB_SC_SEND_WRITE_OOB:
610 if (!msb->command_need_oob) {
611 msb->state = MSB_SC_SEND_COMMAND;
612 goto again;
613 }
614
615 if (!msb_write_regs(msb,
616 offsetof(struct ms_register, extra_data),
617 sizeof(struct ms_extra_data_register),
618 &msb->regs.extra_data))
619 return 0;
620
621 msb->state = MSB_SC_SEND_COMMAND;
622 return 0;
623
624 case MSB_SC_SEND_COMMAND:
625 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
626 msb->state = MSB_SC_SEND_INT_REQ;
627 return 0;
628
629 case MSB_SC_SEND_INT_REQ:
630 msb->state = MSB_SC_RECEIVE_INT_REQ;
631 if (msb_read_int_reg(msb, -1))
632 return 0;
633 fallthrough;
634
635 case MSB_SC_RECEIVE_INT_REQ:
636 intreg = mrq->data[0];
637
638 if (intreg & MEMSTICK_INT_CMDNAK)
639 return msb_exit_state_machine(msb, -EIO);
640 if (intreg & MEMSTICK_INT_ERR)
641 return msb_exit_state_machine(msb, -EBADMSG);
642
643 if (!(intreg & MEMSTICK_INT_CED)) {
644 msb->state = MSB_SC_SEND_INT_REQ;
645 goto again;
646 }
647
648 return msb_exit_state_machine(msb, 0);
649 }
650
651 BUG();
652}
653
654/* Small handler for card reset */
655static int h_msb_reset(struct memstick_dev *card,
656 struct memstick_request **out_mrq)
657{
658 u8 command = MS_CMD_RESET;
659 struct msb_data *msb = memstick_get_drvdata(card);
660 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
661
662 if (mrq->error)
663 return msb_exit_state_machine(msb, mrq->error);
664
665 switch (msb->state) {
666 case MSB_RS_SEND:
667 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
668 mrq->need_card_int = 0;
669 msb->state = MSB_RS_CONFIRM;
670 return 0;
671 case MSB_RS_CONFIRM:
672 return msb_exit_state_machine(msb, 0);
673 }
674 BUG();
675}
676
677/* This handler is used to do serial->parallel switch */
678static int h_msb_parallel_switch(struct memstick_dev *card,
679 struct memstick_request **out_mrq)
680{
681 struct msb_data *msb = memstick_get_drvdata(card);
682 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
683 struct memstick_host *host = card->host;
684
685 if (mrq->error) {
686 dbg("parallel_switch: error");
687 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
688 return msb_exit_state_machine(msb, mrq->error);
689 }
690
691 switch (msb->state) {
692 case MSB_PS_SEND_SWITCH_COMMAND:
693 /* Set the parallel interface on memstick side */
694 msb->regs.param.system |= MEMSTICK_SYS_PAM;
695
696 if (!msb_write_regs(msb,
697 offsetof(struct ms_register, param),
698 1,
699 (unsigned char *)&msb->regs.param))
700 return 0;
701
702 msb->state = MSB_PS_SWICH_HOST;
703 return 0;
704
705 case MSB_PS_SWICH_HOST:
706 /* Set parallel interface on our side + send a dummy request
707 * to see if card responds
708 */
709 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
710 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
711 msb->state = MSB_PS_CONFIRM;
712 return 0;
713
714 case MSB_PS_CONFIRM:
715 return msb_exit_state_machine(msb, 0);
716 }
717
718 BUG();
719}
720
721static int msb_switch_to_parallel(struct msb_data *msb);
722
723/* Reset the card, to guard against hw errors beeing treated as bad blocks */
724static int msb_reset(struct msb_data *msb, bool full)
725{
726
727 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
728 struct memstick_dev *card = msb->card;
729 struct memstick_host *host = card->host;
730 int error;
731
732 /* Reset the card */
733 msb->regs.param.system = MEMSTICK_SYS_BAMD;
734
735 if (full) {
736 error = host->set_param(host,
737 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
738 if (error)
739 goto out_error;
740
741 msb_invalidate_reg_window(msb);
742
743 error = host->set_param(host,
744 MEMSTICK_POWER, MEMSTICK_POWER_ON);
745 if (error)
746 goto out_error;
747
748 error = host->set_param(host,
749 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
750 if (error) {
751out_error:
752 dbg("Failed to reset the host controller");
753 msb->read_only = true;
754 return -EFAULT;
755 }
756 }
757
758 error = msb_run_state_machine(msb, h_msb_reset);
759 if (error) {
760 dbg("Failed to reset the card");
761 msb->read_only = true;
762 return -ENODEV;
763 }
764
765 /* Set parallel mode */
766 if (was_parallel)
767 msb_switch_to_parallel(msb);
768 return 0;
769}
770
771/* Attempts to switch interface to parallel mode */
772static int msb_switch_to_parallel(struct msb_data *msb)
773{
774 int error;
775
776 error = msb_run_state_machine(msb, h_msb_parallel_switch);
777 if (error) {
778 pr_err("Switch to parallel failed");
779 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
780 msb_reset(msb, true);
781 return -EFAULT;
782 }
783
784 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
785 return 0;
786}
787
788/* Changes overwrite flag on a page */
789static int msb_set_overwrite_flag(struct msb_data *msb,
790 u16 pba, u8 page, u8 flag)
791{
792 if (msb->read_only)
793 return -EROFS;
794
795 msb->regs.param.block_address = cpu_to_be16(pba);
796 msb->regs.param.page_address = page;
797 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
798 msb->regs.extra_data.overwrite_flag = flag;
799 msb->command_value = MS_CMD_BLOCK_WRITE;
800 msb->command_need_oob = true;
801
802 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
803 flag, pba, page);
804 return msb_run_state_machine(msb, h_msb_send_command);
805}
806
807static int msb_mark_bad(struct msb_data *msb, int pba)
808{
809 pr_notice("marking pba %d as bad", pba);
810 msb_reset(msb, true);
811 return msb_set_overwrite_flag(
812 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
813}
814
815static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
816{
817 dbg("marking page %d of pba %d as bad", page, pba);
818 msb_reset(msb, true);
819 return msb_set_overwrite_flag(msb,
820 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
821}
822
823/* Erases one physical block */
824static int msb_erase_block(struct msb_data *msb, u16 pba)
825{
826 int error, try;
827
828 if (msb->read_only)
829 return -EROFS;
830
831 dbg_verbose("erasing pba %d", pba);
832
833 for (try = 1; try < 3; try++) {
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
839
840
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
843 break;
844 }
845
846 if (error) {
847 pr_err("erase failed, marking pba %d as bad", pba);
848 msb_mark_bad(msb, pba);
849 }
850
851 dbg_verbose("erase success, marking pba %d as unused", pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
854 return error;
855}
856
857/* Reads one page from device */
858static int msb_read_page(struct msb_data *msb,
859 u16 pba, u8 page, struct ms_extra_data_register *extra,
860 struct scatterlist *sg, int offset)
861{
862 int try, error;
863
864 if (pba == MS_BLOCK_INVALID) {
865 unsigned long flags;
866 struct sg_mapping_iter miter;
867 size_t len = msb->page_size;
868
869 dbg_verbose("read unmapped sector. returning 0xFF");
870
871 local_irq_save(flags);
872 sg_miter_start(&miter, sg, sg_nents(sg),
873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
874
875 while (sg_miter_next(&miter) && len > 0) {
876
877 int chunklen;
878
879 if (offset && offset >= miter.length) {
880 offset -= miter.length;
881 continue;
882 }
883
884 chunklen = min(miter.length - offset, len);
885 memset(miter.addr + offset, 0xFF, chunklen);
886 len -= chunklen;
887 offset = 0;
888 }
889
890 sg_miter_stop(&miter);
891 local_irq_restore(flags);
892
893 if (offset)
894 return -EFAULT;
895
896 if (extra)
897 memset(extra, 0xFF, sizeof(*extra));
898 return 0;
899 }
900
901 if (pba >= msb->block_count) {
902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
903 return -EINVAL;
904 }
905
906 for (try = 1; try < 3; try++) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
910
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
914
915
916 if (error == -EUCLEAN) {
917 pr_notice("correctable error on pba %d, page %d",
918 pba, page);
919 error = 0;
920 }
921
922 if (!error && extra)
923 *extra = msb->regs.extra_data;
924
925 if (!error || msb_reset(msb, true))
926 break;
927
928 }
929
930 /* Mark bad pages */
931 if (error == -EBADMSG) {
932 pr_err("uncorrectable error on read of pba %d, page %d",
933 pba, page);
934
935 if (msb->regs.extra_data.overwrite_flag &
936 MEMSTICK_OVERWRITE_PGST0)
937 msb_mark_page_bad(msb, pba, page);
938 return -EBADMSG;
939 }
940
941 if (error)
942 pr_err("read of pba %d, page %d failed with error %d",
943 pba, page, error);
944 return error;
945}
946
947/* Reads oob of page only */
948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 struct ms_extra_data_register *extra)
950{
951 int error;
952
953 BUG_ON(!extra);
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
957
958 if (pba > msb->block_count) {
959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
960 return -EINVAL;
961 }
962
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
965
966 if (error == -EUCLEAN) {
967 pr_notice("correctable error on pba %d, page %d",
968 pba, page);
969 return 0;
970 }
971
972 return error;
973}
974
975/* Reads a block and compares it with data contained in scatterlist orig_sg */
976static int msb_verify_block(struct msb_data *msb, u16 pba,
977 struct scatterlist *orig_sg, int offset)
978{
979 struct scatterlist sg;
980 int page = 0, error;
981
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
983
984 while (page < msb->pages_in_block) {
985
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
988 if (error)
989 return error;
990 page++;
991 }
992
993 if (msb_sg_compare_to_buffer(orig_sg, offset,
994 msb->block_buffer, msb->block_size))
995 return -EIO;
996 return 0;
997}
998
999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003 int error, current_try = 1;
1004
1005 BUG_ON(sg->length < msb->page_size);
1006
1007 if (msb->read_only)
1008 return -EROFS;
1009
1010 if (pba == MS_BLOCK_INVALID) {
1011 pr_err(
1012 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1013 return -EINVAL;
1014 }
1015
1016 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1017 pr_err(
1018 "BUG: write: attempt to write beyond the end of device");
1019 return -EINVAL;
1020 }
1021
1022 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023 pr_err("BUG: write: lba zone mismatch");
1024 return -EINVAL;
1025 }
1026
1027 if (pba == msb->boot_block_locations[0] ||
1028 pba == msb->boot_block_locations[1]) {
1029 pr_err("BUG: write: attempt to write to boot blocks!");
1030 return -EINVAL;
1031 }
1032
1033 while (1) {
1034
1035 if (msb->read_only)
1036 return -EROFS;
1037
1038 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039 msb->regs.param.page_address = 0;
1040 msb->regs.param.block_address = cpu_to_be16(pba);
1041
1042 msb->regs.extra_data.management_flag = 0xFF;
1043 msb->regs.extra_data.overwrite_flag = 0xF8;
1044 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045
1046 msb->current_sg = sg;
1047 msb->current_sg_offset = offset;
1048 msb->current_page = 0;
1049
1050 error = msb_run_state_machine(msb, h_msb_write_block);
1051
1052 /* Sector we just wrote to is assumed erased since its pba
1053 * was erased. If it wasn't erased, write will succeed
1054 * and will just clear the bits that were set in the block
1055 * thus test that what we have written,
1056 * matches what we expect.
1057 * We do trust the blocks that we erased
1058 */
1059 if (!error && (verify_writes ||
1060 !test_bit(pba, msb->erased_blocks_bitmap)))
1061 error = msb_verify_block(msb, pba, sg, offset);
1062
1063 if (!error)
1064 break;
1065
1066 if (current_try > 1 || msb_reset(msb, true))
1067 break;
1068
1069 pr_err("write failed, trying to erase the pba %d", pba);
1070 error = msb_erase_block(msb, pba);
1071 if (error)
1072 break;
1073
1074 current_try++;
1075 }
1076 return error;
1077}
1078
1079/* Finds a free block for write replacement */
1080static u16 msb_get_free_block(struct msb_data *msb, int zone)
1081{
1082 u16 pos;
1083 int pba = zone * MS_BLOCKS_IN_ZONE;
1084 int i;
1085
1086 get_random_bytes(&pos, sizeof(pos));
1087
1088 if (!msb->free_block_count[zone]) {
1089 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090 msb->read_only = true;
1091 return MS_BLOCK_INVALID;
1092 }
1093
1094 pos %= msb->free_block_count[zone];
1095
1096 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097 msb->free_block_count[zone], pos);
1098
1099 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100 msb->block_count, pba);
1101 for (i = 0; i < pos; ++i)
1102 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103 msb->block_count, pba + 1);
1104
1105 dbg_verbose("result of the free blocks scan: pba %d", pba);
1106
1107 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108 pr_err("BUG: can't get a free block");
1109 msb->read_only = true;
1110 return MS_BLOCK_INVALID;
1111 }
1112
1113 msb_mark_block_used(msb, pba);
1114 return pba;
1115}
1116
1117static int msb_update_block(struct msb_data *msb, u16 lba,
1118 struct scatterlist *sg, int offset)
1119{
1120 u16 pba, new_pba;
1121 int error, try;
1122
1123 pba = msb->lba_to_pba_table[lba];
1124 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1125
1126 if (pba != MS_BLOCK_INVALID) {
1127 dbg_verbose("setting the update flag on the block");
1128 msb_set_overwrite_flag(msb, pba, 0,
1129 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1130 }
1131
1132 for (try = 0; try < 3; try++) {
1133 new_pba = msb_get_free_block(msb,
1134 msb_get_zone_from_lba(lba));
1135
1136 if (new_pba == MS_BLOCK_INVALID) {
1137 error = -EIO;
1138 goto out;
1139 }
1140
1141 dbg_verbose("block update: writing updated block to the pba %d",
1142 new_pba);
1143 error = msb_write_block(msb, new_pba, lba, sg, offset);
1144 if (error == -EBADMSG) {
1145 msb_mark_bad(msb, new_pba);
1146 continue;
1147 }
1148
1149 if (error)
1150 goto out;
1151
1152 dbg_verbose("block update: erasing the old block");
1153 msb_erase_block(msb, pba);
1154 msb->lba_to_pba_table[lba] = new_pba;
1155 return 0;
1156 }
1157out:
1158 if (error) {
1159 pr_err("block update error after %d tries, switching to r/o mode", try);
1160 msb->read_only = true;
1161 }
1162 return error;
1163}
1164
1165/* Converts endiannes in the boot block for easy use */
1166static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1167{
1168 p->header.block_id = be16_to_cpu(p->header.block_id);
1169 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170 p->entry.disabled_block.start_addr
1171 = be32_to_cpu(p->entry.disabled_block.start_addr);
1172 p->entry.disabled_block.data_size
1173 = be32_to_cpu(p->entry.disabled_block.data_size);
1174 p->entry.cis_idi.start_addr
1175 = be32_to_cpu(p->entry.cis_idi.start_addr);
1176 p->entry.cis_idi.data_size
1177 = be32_to_cpu(p->entry.cis_idi.data_size);
1178 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180 p->attr.number_of_effective_blocks
1181 = be16_to_cpu(p->attr.number_of_effective_blocks);
1182 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183 p->attr.memory_manufacturer_code
1184 = be16_to_cpu(p->attr.memory_manufacturer_code);
1185 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186 p->attr.implemented_capacity
1187 = be16_to_cpu(p->attr.implemented_capacity);
1188 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1190}
1191
1192static int msb_read_boot_blocks(struct msb_data *msb)
1193{
1194 int pba = 0;
1195 struct scatterlist sg;
1196 struct ms_extra_data_register extra;
1197 struct ms_boot_page *page;
1198
1199 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201 msb->boot_block_count = 0;
1202
1203 dbg_verbose("Start of a scan for the boot blocks");
1204
1205 if (!msb->boot_page) {
1206 page = kmalloc_array(2, sizeof(struct ms_boot_page),
1207 GFP_KERNEL);
1208 if (!page)
1209 return -ENOMEM;
1210
1211 msb->boot_page = page;
1212 } else
1213 page = msb->boot_page;
1214
1215 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1216
1217 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1218
1219 sg_init_one(&sg, page, sizeof(*page));
1220 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1221 dbg("boot scan: can't read pba %d", pba);
1222 continue;
1223 }
1224
1225 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1226 dbg("management flag doesn't indicate boot block %d",
1227 pba);
1228 continue;
1229 }
1230
1231 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1232 dbg("the pba at %d doesn't contain boot block ID", pba);
1233 continue;
1234 }
1235
1236 msb_fix_boot_page_endianness(page);
1237 msb->boot_block_locations[msb->boot_block_count] = pba;
1238
1239 page++;
1240 msb->boot_block_count++;
1241
1242 if (msb->boot_block_count == 2)
1243 break;
1244 }
1245
1246 if (!msb->boot_block_count) {
1247 pr_err("media doesn't contain master page, aborting");
1248 return -EIO;
1249 }
1250
1251 dbg_verbose("End of scan for boot blocks");
1252 return 0;
1253}
1254
1255static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1256{
1257 struct ms_boot_page *boot_block;
1258 struct scatterlist sg;
1259 u16 *buffer = NULL;
1260 int offset = 0;
1261 int i, error = 0;
1262 int data_size, data_offset, page, page_offset, size_to_read;
1263 u16 pba;
1264
1265 BUG_ON(block_nr > 1);
1266 boot_block = &msb->boot_page[block_nr];
1267 pba = msb->boot_block_locations[block_nr];
1268
1269 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1270 return -EINVAL;
1271
1272 data_size = boot_block->entry.disabled_block.data_size;
1273 data_offset = sizeof(struct ms_boot_page) +
1274 boot_block->entry.disabled_block.start_addr;
1275 if (!data_size)
1276 return 0;
1277
1278 page = data_offset / msb->page_size;
1279 page_offset = data_offset % msb->page_size;
1280 size_to_read =
1281 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1282 msb->page_size;
1283
1284 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1285 pba, data_offset, data_size);
1286
1287 buffer = kzalloc(size_to_read, GFP_KERNEL);
1288 if (!buffer)
1289 return -ENOMEM;
1290
1291 /* Read the buffer */
1292 sg_init_one(&sg, buffer, size_to_read);
1293
1294 while (offset < size_to_read) {
1295 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1296 if (error)
1297 goto out;
1298
1299 page++;
1300 offset += msb->page_size;
1301
1302 if (page == msb->pages_in_block) {
1303 pr_err(
1304 "bad block table extends beyond the boot block");
1305 break;
1306 }
1307 }
1308
1309 /* Process the bad block table */
1310 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1311
1312 u16 bad_block = be16_to_cpu(buffer[i]);
1313
1314 if (bad_block >= msb->block_count) {
1315 dbg("bad block table contains invalid block %d",
1316 bad_block);
1317 continue;
1318 }
1319
1320 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1321 dbg("duplicate bad block %d in the table",
1322 bad_block);
1323 continue;
1324 }
1325
1326 dbg("block %d is marked as factory bad", bad_block);
1327 msb_mark_block_used(msb, bad_block);
1328 }
1329out:
1330 kfree(buffer);
1331 return error;
1332}
1333
1334static int msb_ftl_initialize(struct msb_data *msb)
1335{
1336 int i;
1337
1338 if (msb->ftl_initialized)
1339 return 0;
1340
1341 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1342 msb->logical_block_count = msb->zone_count * 496 - 2;
1343
1344 msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1345 msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1346 msb->lba_to_pba_table =
1347 kmalloc_array(msb->logical_block_count, sizeof(u16),
1348 GFP_KERNEL);
1349
1350 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1351 !msb->erased_blocks_bitmap) {
1352 bitmap_free(msb->used_blocks_bitmap);
1353 bitmap_free(msb->erased_blocks_bitmap);
1354 kfree(msb->lba_to_pba_table);
1355 return -ENOMEM;
1356 }
1357
1358 for (i = 0; i < msb->zone_count; i++)
1359 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1360
1361 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1362 msb->logical_block_count * sizeof(u16));
1363
1364 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1365 msb->zone_count, msb->logical_block_count);
1366
1367 msb->ftl_initialized = true;
1368 return 0;
1369}
1370
1371static int msb_ftl_scan(struct msb_data *msb)
1372{
1373 u16 pba, lba, other_block;
1374 u8 overwrite_flag, management_flag, other_overwrite_flag;
1375 int error;
1376 struct ms_extra_data_register extra;
1377 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1378
1379 if (!overwrite_flags)
1380 return -ENOMEM;
1381
1382 dbg("Start of media scanning");
1383 for (pba = 0; pba < msb->block_count; pba++) {
1384
1385 if (pba == msb->boot_block_locations[0] ||
1386 pba == msb->boot_block_locations[1]) {
1387 dbg_verbose("pba %05d -> [boot block]", pba);
1388 msb_mark_block_used(msb, pba);
1389 continue;
1390 }
1391
1392 if (test_bit(pba, msb->used_blocks_bitmap)) {
1393 dbg_verbose("pba %05d -> [factory bad]", pba);
1394 continue;
1395 }
1396
1397 memset(&extra, 0, sizeof(extra));
1398 error = msb_read_oob(msb, pba, 0, &extra);
1399
1400 /* can't trust the page if we can't read the oob */
1401 if (error == -EBADMSG) {
1402 pr_notice(
1403 "oob of pba %d damaged, will try to erase it", pba);
1404 msb_mark_block_used(msb, pba);
1405 msb_erase_block(msb, pba);
1406 continue;
1407 } else if (error) {
1408 pr_err("unknown error %d on read of oob of pba %d - aborting",
1409 error, pba);
1410
1411 kfree(overwrite_flags);
1412 return error;
1413 }
1414
1415 lba = be16_to_cpu(extra.logical_address);
1416 management_flag = extra.management_flag;
1417 overwrite_flag = extra.overwrite_flag;
1418 overwrite_flags[pba] = overwrite_flag;
1419
1420 /* Skip bad blocks */
1421 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1422 dbg("pba %05d -> [BAD]", pba);
1423 msb_mark_block_used(msb, pba);
1424 continue;
1425 }
1426
1427 /* Skip system/drm blocks */
1428 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1429 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1430 dbg("pba %05d -> [reserved management flag %02x]",
1431 pba, management_flag);
1432 msb_mark_block_used(msb, pba);
1433 continue;
1434 }
1435
1436 /* Erase temporary tables */
1437 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1438 dbg("pba %05d -> [temp table] - will erase", pba);
1439
1440 msb_mark_block_used(msb, pba);
1441 msb_erase_block(msb, pba);
1442 continue;
1443 }
1444
1445 if (lba == MS_BLOCK_INVALID) {
1446 dbg_verbose("pba %05d -> [free]", pba);
1447 continue;
1448 }
1449
1450 msb_mark_block_used(msb, pba);
1451
1452 /* Block has LBA not according to zoning*/
1453 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1454 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1455 pba, lba);
1456 msb_erase_block(msb, pba);
1457 continue;
1458 }
1459
1460 /* No collisions - great */
1461 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1462 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1463 msb->lba_to_pba_table[lba] = pba;
1464 continue;
1465 }
1466
1467 other_block = msb->lba_to_pba_table[lba];
1468 other_overwrite_flag = overwrite_flags[other_block];
1469
1470 pr_notice("Collision between pba %d and pba %d",
1471 pba, other_block);
1472
1473 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1474 pr_notice("pba %d is marked as stable, use it", pba);
1475 msb_erase_block(msb, other_block);
1476 msb->lba_to_pba_table[lba] = pba;
1477 continue;
1478 }
1479
1480 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1481 pr_notice("pba %d is marked as stable, use it",
1482 other_block);
1483 msb_erase_block(msb, pba);
1484 continue;
1485 }
1486
1487 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1488 pba, other_block, other_block);
1489
1490 msb_erase_block(msb, other_block);
1491 msb->lba_to_pba_table[lba] = pba;
1492 }
1493
1494 dbg("End of media scanning");
1495 kfree(overwrite_flags);
1496 return 0;
1497}
1498
1499static void msb_cache_flush_timer(struct timer_list *t)
1500{
1501 struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1502
1503 msb->need_flush_cache = true;
1504 queue_work(msb->io_queue, &msb->io_work);
1505}
1506
1507
1508static void msb_cache_discard(struct msb_data *msb)
1509{
1510 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1511 return;
1512
1513 del_timer_sync(&msb->cache_flush_timer);
1514
1515 dbg_verbose("Discarding the write cache");
1516 msb->cache_block_lba = MS_BLOCK_INVALID;
1517 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1518}
1519
1520static int msb_cache_init(struct msb_data *msb)
1521{
1522 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1523
1524 if (!msb->cache)
1525 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1526 if (!msb->cache)
1527 return -ENOMEM;
1528
1529 msb_cache_discard(msb);
1530 return 0;
1531}
1532
1533static int msb_cache_flush(struct msb_data *msb)
1534{
1535 struct scatterlist sg;
1536 struct ms_extra_data_register extra;
1537 int page, offset, error;
1538 u16 pba, lba;
1539
1540 if (msb->read_only)
1541 return -EROFS;
1542
1543 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1544 return 0;
1545
1546 lba = msb->cache_block_lba;
1547 pba = msb->lba_to_pba_table[lba];
1548
1549 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550 pba, msb->cache_block_lba);
1551
1552 sg_init_one(&sg, msb->cache , msb->block_size);
1553
1554 /* Read all missing pages in cache */
1555 for (page = 0; page < msb->pages_in_block; page++) {
1556
1557 if (test_bit(page, &msb->valid_cache_bitmap))
1558 continue;
1559
1560 offset = page * msb->page_size;
1561
1562 dbg_verbose("reading non-present sector %d of cache block %d",
1563 page, lba);
1564 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1565
1566 /* Bad pages are copied with 00 page status */
1567 if (error == -EBADMSG) {
1568 pr_err("read error on sector %d, contents probably damaged", page);
1569 continue;
1570 }
1571
1572 if (error)
1573 return error;
1574
1575 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1576 MEMSTICK_OV_PG_NORMAL) {
1577 dbg("page %d is marked as bad", page);
1578 continue;
1579 }
1580
1581 set_bit(page, &msb->valid_cache_bitmap);
1582 }
1583
1584 /* Write the cache now */
1585 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1586 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1587
1588 /* Mark invalid pages */
1589 if (!error) {
1590 for (page = 0; page < msb->pages_in_block; page++) {
1591
1592 if (test_bit(page, &msb->valid_cache_bitmap))
1593 continue;
1594
1595 dbg("marking page %d as containing damaged data",
1596 page);
1597 msb_set_overwrite_flag(msb,
1598 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1599 }
1600 }
1601
1602 msb_cache_discard(msb);
1603 return error;
1604}
1605
1606static int msb_cache_write(struct msb_data *msb, int lba,
1607 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1608{
1609 int error;
1610 struct scatterlist sg_tmp[10];
1611
1612 if (msb->read_only)
1613 return -EROFS;
1614
1615 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1616 lba != msb->cache_block_lba)
1617 if (add_to_cache_only)
1618 return 0;
1619
1620 /* If we need to write different block */
1621 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1622 lba != msb->cache_block_lba) {
1623 dbg_verbose("first flush the cache");
1624 error = msb_cache_flush(msb);
1625 if (error)
1626 return error;
1627 }
1628
1629 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1630 msb->cache_block_lba = lba;
1631 mod_timer(&msb->cache_flush_timer,
1632 jiffies + msecs_to_jiffies(cache_flush_timeout));
1633 }
1634
1635 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1636
1637 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1638 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1639
1640 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1641 msb->cache + page * msb->page_size, msb->page_size);
1642
1643 set_bit(page, &msb->valid_cache_bitmap);
1644 return 0;
1645}
1646
1647static int msb_cache_read(struct msb_data *msb, int lba,
1648 int page, struct scatterlist *sg, int offset)
1649{
1650 int pba = msb->lba_to_pba_table[lba];
1651 struct scatterlist sg_tmp[10];
1652 int error = 0;
1653
1654 if (lba == msb->cache_block_lba &&
1655 test_bit(page, &msb->valid_cache_bitmap)) {
1656
1657 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1658 lba, pba, page);
1659
1660 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1661 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1662 offset, msb->page_size);
1663 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664 msb->cache + msb->page_size * page,
1665 msb->page_size);
1666 } else {
1667 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1668 lba, pba, page);
1669
1670 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1671 if (error)
1672 return error;
1673
1674 msb_cache_write(msb, lba, page, true, sg, offset);
1675 }
1676 return error;
1677}
1678
1679/* Emulated geometry table
1680 * This table content isn't that importaint,
1681 * One could put here different values, providing that they still
1682 * cover whole disk.
1683 * 64 MB entry is what windows reports for my 64M memstick
1684 */
1685
1686static const struct chs_entry chs_table[] = {
1687/* size sectors cylynders heads */
1688 { 4, 16, 247, 2 },
1689 { 8, 16, 495, 2 },
1690 { 16, 16, 495, 4 },
1691 { 32, 16, 991, 4 },
1692 { 64, 16, 991, 8 },
1693 {128, 16, 991, 16 },
1694 { 0 }
1695};
1696
1697/* Load information about the card */
1698static int msb_init_card(struct memstick_dev *card)
1699{
1700 struct msb_data *msb = memstick_get_drvdata(card);
1701 struct memstick_host *host = card->host;
1702 struct ms_boot_page *boot_block;
1703 int error = 0, i, raw_size_in_megs;
1704
1705 msb->caps = 0;
1706
1707 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1708 card->id.class <= MEMSTICK_CLASS_ROM)
1709 msb->read_only = true;
1710
1711 msb->state = -1;
1712 error = msb_reset(msb, false);
1713 if (error)
1714 return error;
1715
1716 /* Due to a bug in Jmicron driver written by Alex Dubov,
1717 * its serial mode barely works,
1718 * so we switch to parallel mode right away
1719 */
1720 if (host->caps & MEMSTICK_CAP_PAR4)
1721 msb_switch_to_parallel(msb);
1722
1723 msb->page_size = sizeof(struct ms_boot_page);
1724
1725 /* Read the boot page */
1726 error = msb_read_boot_blocks(msb);
1727 if (error)
1728 return -EIO;
1729
1730 boot_block = &msb->boot_page[0];
1731
1732 /* Save intersting attributes from boot page */
1733 msb->block_count = boot_block->attr.number_of_blocks;
1734 msb->page_size = boot_block->attr.page_size;
1735
1736 msb->pages_in_block = boot_block->attr.block_size * 2;
1737 msb->block_size = msb->page_size * msb->pages_in_block;
1738
1739 if ((size_t)msb->page_size > PAGE_SIZE) {
1740 /* this isn't supported by linux at all, anyway*/
1741 dbg("device page %d size isn't supported", msb->page_size);
1742 return -EINVAL;
1743 }
1744
1745 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1746 if (!msb->block_buffer)
1747 return -ENOMEM;
1748
1749 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1750
1751 for (i = 0; chs_table[i].size; i++) {
1752
1753 if (chs_table[i].size != raw_size_in_megs)
1754 continue;
1755
1756 msb->geometry.cylinders = chs_table[i].cyl;
1757 msb->geometry.heads = chs_table[i].head;
1758 msb->geometry.sectors = chs_table[i].sec;
1759 break;
1760 }
1761
1762 if (boot_block->attr.transfer_supporting == 1)
1763 msb->caps |= MEMSTICK_CAP_PAR4;
1764
1765 if (boot_block->attr.device_type & 0x03)
1766 msb->read_only = true;
1767
1768 dbg("Total block count = %d", msb->block_count);
1769 dbg("Each block consists of %d pages", msb->pages_in_block);
1770 dbg("Page size = %d bytes", msb->page_size);
1771 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1772 dbg("Read only: %d", msb->read_only);
1773
1774#if 0
1775 /* Now we can switch the interface */
1776 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1777 msb_switch_to_parallel(msb);
1778#endif
1779
1780 error = msb_cache_init(msb);
1781 if (error)
1782 return error;
1783
1784 error = msb_ftl_initialize(msb);
1785 if (error)
1786 return error;
1787
1788
1789 /* Read the bad block table */
1790 error = msb_read_bad_block_table(msb, 0);
1791
1792 if (error && error != -ENOMEM) {
1793 dbg("failed to read bad block table from primary boot block, trying from backup");
1794 error = msb_read_bad_block_table(msb, 1);
1795 }
1796
1797 if (error)
1798 return error;
1799
1800 /* *drum roll* Scan the media */
1801 error = msb_ftl_scan(msb);
1802 if (error) {
1803 pr_err("Scan of media failed");
1804 return error;
1805 }
1806
1807 return 0;
1808
1809}
1810
1811static int msb_do_write_request(struct msb_data *msb, int lba,
1812 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1813{
1814 int error = 0;
1815 off_t offset = 0;
1816 *sucessfuly_written = 0;
1817
1818 while (offset < len) {
1819 if (page == 0 && len - offset >= msb->block_size) {
1820
1821 if (msb->cache_block_lba == lba)
1822 msb_cache_discard(msb);
1823
1824 dbg_verbose("Writing whole lba %d", lba);
1825 error = msb_update_block(msb, lba, sg, offset);
1826 if (error)
1827 return error;
1828
1829 offset += msb->block_size;
1830 *sucessfuly_written += msb->block_size;
1831 lba++;
1832 continue;
1833 }
1834
1835 error = msb_cache_write(msb, lba, page, false, sg, offset);
1836 if (error)
1837 return error;
1838
1839 offset += msb->page_size;
1840 *sucessfuly_written += msb->page_size;
1841
1842 page++;
1843 if (page == msb->pages_in_block) {
1844 page = 0;
1845 lba++;
1846 }
1847 }
1848 return 0;
1849}
1850
1851static int msb_do_read_request(struct msb_data *msb, int lba,
1852 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1853{
1854 int error = 0;
1855 int offset = 0;
1856 *sucessfuly_read = 0;
1857
1858 while (offset < len) {
1859
1860 error = msb_cache_read(msb, lba, page, sg, offset);
1861 if (error)
1862 return error;
1863
1864 offset += msb->page_size;
1865 *sucessfuly_read += msb->page_size;
1866
1867 page++;
1868 if (page == msb->pages_in_block) {
1869 page = 0;
1870 lba++;
1871 }
1872 }
1873 return 0;
1874}
1875
1876static void msb_io_work(struct work_struct *work)
1877{
1878 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1879 int page, error, len;
1880 sector_t lba;
1881 struct scatterlist *sg = msb->prealloc_sg;
1882 struct request *req;
1883
1884 dbg_verbose("IO: work started");
1885
1886 while (1) {
1887 spin_lock_irq(&msb->q_lock);
1888
1889 if (msb->need_flush_cache) {
1890 msb->need_flush_cache = false;
1891 spin_unlock_irq(&msb->q_lock);
1892 msb_cache_flush(msb);
1893 continue;
1894 }
1895
1896 req = msb->req;
1897 if (!req) {
1898 dbg_verbose("IO: no more requests exiting");
1899 spin_unlock_irq(&msb->q_lock);
1900 return;
1901 }
1902
1903 spin_unlock_irq(&msb->q_lock);
1904
1905 /* process the request */
1906 dbg_verbose("IO: processing new request");
1907 blk_rq_map_sg(msb->queue, req, sg);
1908
1909 lba = blk_rq_pos(req);
1910
1911 sector_div(lba, msb->page_size / 512);
1912 page = sector_div(lba, msb->pages_in_block);
1913
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(req), &len);
1917 else
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(req), &len);
1920
1921 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1922 __blk_mq_end_request(req, BLK_STS_OK);
1923 spin_lock_irq(&msb->q_lock);
1924 msb->req = NULL;
1925 spin_unlock_irq(&msb->q_lock);
1926 }
1927
1928 if (error && msb->req) {
1929 blk_status_t ret = errno_to_blk_status(error);
1930
1931 dbg_verbose("IO: ending one sector of the request with error");
1932 blk_mq_end_request(req, ret);
1933 spin_lock_irq(&msb->q_lock);
1934 msb->req = NULL;
1935 spin_unlock_irq(&msb->q_lock);
1936 }
1937
1938 if (msb->req)
1939 dbg_verbose("IO: request still pending");
1940 }
1941}
1942
1943static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1944static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1945
1946static void msb_data_clear(struct msb_data *msb)
1947{
1948 kfree(msb->boot_page);
1949 bitmap_free(msb->used_blocks_bitmap);
1950 bitmap_free(msb->erased_blocks_bitmap);
1951 kfree(msb->lba_to_pba_table);
1952 kfree(msb->cache);
1953 msb->card = NULL;
1954}
1955
1956static int msb_bd_getgeo(struct block_device *bdev,
1957 struct hd_geometry *geo)
1958{
1959 struct msb_data *msb = bdev->bd_disk->private_data;
1960 *geo = msb->geometry;
1961 return 0;
1962}
1963
1964static void msb_bd_free_disk(struct gendisk *disk)
1965{
1966 struct msb_data *msb = disk->private_data;
1967
1968 mutex_lock(&msb_disk_lock);
1969 idr_remove(&msb_disk_idr, msb->disk_id);
1970 mutex_unlock(&msb_disk_lock);
1971
1972 kfree(msb);
1973}
1974
1975static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1976 const struct blk_mq_queue_data *bd)
1977{
1978 struct memstick_dev *card = hctx->queue->queuedata;
1979 struct msb_data *msb = memstick_get_drvdata(card);
1980 struct request *req = bd->rq;
1981
1982 dbg_verbose("Submit request");
1983
1984 spin_lock_irq(&msb->q_lock);
1985
1986 if (msb->card_dead) {
1987 dbg("Refusing requests on removed card");
1988
1989 WARN_ON(!msb->io_queue_stopped);
1990
1991 spin_unlock_irq(&msb->q_lock);
1992 blk_mq_start_request(req);
1993 return BLK_STS_IOERR;
1994 }
1995
1996 if (msb->req) {
1997 spin_unlock_irq(&msb->q_lock);
1998 return BLK_STS_DEV_RESOURCE;
1999 }
2000
2001 blk_mq_start_request(req);
2002 msb->req = req;
2003
2004 if (!msb->io_queue_stopped)
2005 queue_work(msb->io_queue, &msb->io_work);
2006
2007 spin_unlock_irq(&msb->q_lock);
2008 return BLK_STS_OK;
2009}
2010
2011static int msb_check_card(struct memstick_dev *card)
2012{
2013 struct msb_data *msb = memstick_get_drvdata(card);
2014
2015 return (msb->card_dead == 0);
2016}
2017
2018static void msb_stop(struct memstick_dev *card)
2019{
2020 struct msb_data *msb = memstick_get_drvdata(card);
2021 unsigned long flags;
2022
2023 dbg("Stopping all msblock IO");
2024
2025 blk_mq_stop_hw_queues(msb->queue);
2026 spin_lock_irqsave(&msb->q_lock, flags);
2027 msb->io_queue_stopped = true;
2028 spin_unlock_irqrestore(&msb->q_lock, flags);
2029
2030 del_timer_sync(&msb->cache_flush_timer);
2031 flush_workqueue(msb->io_queue);
2032
2033 spin_lock_irqsave(&msb->q_lock, flags);
2034 if (msb->req) {
2035 blk_mq_requeue_request(msb->req, false);
2036 msb->req = NULL;
2037 }
2038 spin_unlock_irqrestore(&msb->q_lock, flags);
2039}
2040
2041static void msb_start(struct memstick_dev *card)
2042{
2043 struct msb_data *msb = memstick_get_drvdata(card);
2044 unsigned long flags;
2045
2046 dbg("Resuming IO from msblock");
2047
2048 msb_invalidate_reg_window(msb);
2049
2050 spin_lock_irqsave(&msb->q_lock, flags);
2051 if (!msb->io_queue_stopped || msb->card_dead) {
2052 spin_unlock_irqrestore(&msb->q_lock, flags);
2053 return;
2054 }
2055 spin_unlock_irqrestore(&msb->q_lock, flags);
2056
2057 /* Kick cache flush anyway, its harmless */
2058 msb->need_flush_cache = true;
2059 msb->io_queue_stopped = false;
2060
2061 blk_mq_start_hw_queues(msb->queue);
2062
2063 queue_work(msb->io_queue, &msb->io_work);
2064
2065}
2066
2067static const struct block_device_operations msb_bdops = {
2068 .owner = THIS_MODULE,
2069 .getgeo = msb_bd_getgeo,
2070 .free_disk = msb_bd_free_disk,
2071};
2072
2073static const struct blk_mq_ops msb_mq_ops = {
2074 .queue_rq = msb_queue_rq,
2075};
2076
2077/* Registers the block device */
2078static int msb_init_disk(struct memstick_dev *card)
2079{
2080 struct msb_data *msb = memstick_get_drvdata(card);
2081 int rc;
2082 unsigned long capacity;
2083
2084 mutex_lock(&msb_disk_lock);
2085 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2086 mutex_unlock(&msb_disk_lock);
2087
2088 if (msb->disk_id < 0)
2089 return msb->disk_id;
2090
2091 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
2092 BLK_MQ_F_SHOULD_MERGE);
2093 if (rc)
2094 goto out_release_id;
2095
2096 msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
2097 if (IS_ERR(msb->disk)) {
2098 rc = PTR_ERR(msb->disk);
2099 goto out_free_tag_set;
2100 }
2101 msb->queue = msb->disk->queue;
2102
2103 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2104 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2105 blk_queue_max_segment_size(msb->queue,
2106 MS_BLOCK_MAX_PAGES * msb->page_size);
2107 blk_queue_logical_block_size(msb->queue, msb->page_size);
2108
2109 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2110 msb->disk->fops = &msb_bdops;
2111 msb->disk->private_data = msb;
2112
2113 capacity = msb->pages_in_block * msb->logical_block_count;
2114 capacity *= (msb->page_size / 512);
2115 set_capacity(msb->disk, capacity);
2116 dbg("Set total disk size to %lu sectors", capacity);
2117
2118 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2119 if (!msb->io_queue) {
2120 rc = -ENOMEM;
2121 goto out_cleanup_disk;
2122 }
2123
2124 INIT_WORK(&msb->io_work, msb_io_work);
2125 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2126
2127 if (msb->read_only)
2128 set_disk_ro(msb->disk, 1);
2129
2130 msb_start(card);
2131 rc = device_add_disk(&card->dev, msb->disk, NULL);
2132 if (rc)
2133 goto out_destroy_workqueue;
2134 dbg("Disk added");
2135 return 0;
2136
2137out_destroy_workqueue:
2138 destroy_workqueue(msb->io_queue);
2139out_cleanup_disk:
2140 put_disk(msb->disk);
2141out_free_tag_set:
2142 blk_mq_free_tag_set(&msb->tag_set);
2143out_release_id:
2144 mutex_lock(&msb_disk_lock);
2145 idr_remove(&msb_disk_idr, msb->disk_id);
2146 mutex_unlock(&msb_disk_lock);
2147 return rc;
2148}
2149
2150static int msb_probe(struct memstick_dev *card)
2151{
2152 struct msb_data *msb;
2153 int rc = 0;
2154
2155 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2156 if (!msb)
2157 return -ENOMEM;
2158 memstick_set_drvdata(card, msb);
2159 msb->card = card;
2160 spin_lock_init(&msb->q_lock);
2161
2162 rc = msb_init_card(card);
2163 if (rc)
2164 goto out_free;
2165
2166 rc = msb_init_disk(card);
2167 if (!rc) {
2168 card->check = msb_check_card;
2169 card->stop = msb_stop;
2170 card->start = msb_start;
2171 return 0;
2172 }
2173out_free:
2174 memstick_set_drvdata(card, NULL);
2175 msb_data_clear(msb);
2176 kfree(msb);
2177 return rc;
2178}
2179
2180static void msb_remove(struct memstick_dev *card)
2181{
2182 struct msb_data *msb = memstick_get_drvdata(card);
2183 unsigned long flags;
2184
2185 if (!msb->io_queue_stopped)
2186 msb_stop(card);
2187
2188 dbg("Removing the disk device");
2189
2190 /* Take care of unhandled + new requests from now on */
2191 spin_lock_irqsave(&msb->q_lock, flags);
2192 msb->card_dead = true;
2193 spin_unlock_irqrestore(&msb->q_lock, flags);
2194 blk_mq_start_hw_queues(msb->queue);
2195
2196 /* Remove the disk */
2197 del_gendisk(msb->disk);
2198 blk_mq_free_tag_set(&msb->tag_set);
2199 msb->queue = NULL;
2200
2201 mutex_lock(&msb_disk_lock);
2202 msb_data_clear(msb);
2203 mutex_unlock(&msb_disk_lock);
2204
2205 put_disk(msb->disk);
2206 memstick_set_drvdata(card, NULL);
2207}
2208
2209#ifdef CONFIG_PM
2210
2211static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2212{
2213 msb_stop(card);
2214 return 0;
2215}
2216
2217static int msb_resume(struct memstick_dev *card)
2218{
2219 struct msb_data *msb = memstick_get_drvdata(card);
2220 struct msb_data *new_msb = NULL;
2221 bool card_dead = true;
2222
2223#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2224 msb->card_dead = true;
2225 return 0;
2226#endif
2227 mutex_lock(&card->host->lock);
2228
2229 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2230 if (!new_msb)
2231 goto out;
2232
2233 new_msb->card = card;
2234 memstick_set_drvdata(card, new_msb);
2235 spin_lock_init(&new_msb->q_lock);
2236 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2237
2238 if (msb_init_card(card))
2239 goto out;
2240
2241 if (msb->block_size != new_msb->block_size)
2242 goto out;
2243
2244 if (memcmp(msb->boot_page, new_msb->boot_page,
2245 sizeof(struct ms_boot_page)))
2246 goto out;
2247
2248 if (msb->logical_block_count != new_msb->logical_block_count ||
2249 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2250 msb->logical_block_count))
2251 goto out;
2252
2253 if (msb->block_count != new_msb->block_count ||
2254 !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2255 msb->block_count))
2256 goto out;
2257
2258 card_dead = false;
2259out:
2260 if (card_dead)
2261 dbg("Card was removed/replaced during suspend");
2262
2263 msb->card_dead = card_dead;
2264 memstick_set_drvdata(card, msb);
2265
2266 if (new_msb) {
2267 msb_data_clear(new_msb);
2268 kfree(new_msb);
2269 }
2270
2271 msb_start(card);
2272 mutex_unlock(&card->host->lock);
2273 return 0;
2274}
2275#else
2276
2277#define msb_suspend NULL
2278#define msb_resume NULL
2279
2280#endif /* CONFIG_PM */
2281
2282static struct memstick_device_id msb_id_tbl[] = {
2283 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2284 MEMSTICK_CLASS_FLASH},
2285
2286 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2287 MEMSTICK_CLASS_ROM},
2288
2289 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2290 MEMSTICK_CLASS_RO},
2291
2292 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2293 MEMSTICK_CLASS_WP},
2294
2295 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2296 MEMSTICK_CLASS_DUO},
2297 {}
2298};
2299MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2300
2301
2302static struct memstick_driver msb_driver = {
2303 .driver = {
2304 .name = DRIVER_NAME,
2305 .owner = THIS_MODULE
2306 },
2307 .id_table = msb_id_tbl,
2308 .probe = msb_probe,
2309 .remove = msb_remove,
2310 .suspend = msb_suspend,
2311 .resume = msb_resume
2312};
2313
2314static int __init msb_init(void)
2315{
2316 int rc = memstick_register_driver(&msb_driver);
2317
2318 if (rc)
2319 pr_err("failed to register memstick driver (error %d)\n", rc);
2320
2321 return rc;
2322}
2323
2324static void __exit msb_exit(void)
2325{
2326 memstick_unregister_driver(&msb_driver);
2327 idr_destroy(&msb_disk_idr);
2328}
2329
2330module_init(msb_init);
2331module_exit(msb_exit);
2332
2333module_param(cache_flush_timeout, int, S_IRUGO);
2334MODULE_PARM_DESC(cache_flush_timeout,
2335 "Cache flush timeout in msec (1000 default)");
2336module_param(debug, int, S_IRUGO | S_IWUSR);
2337MODULE_PARM_DESC(debug, "Debug level (0-2)");
2338
2339module_param(verify_writes, bool, S_IRUGO);
2340MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2341
2342MODULE_LICENSE("GPL");
2343MODULE_AUTHOR("Maxim Levitsky");
2344MODULE_DESCRIPTION("Sony MemoryStick block device driver");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ms_block.c - Sony MemoryStick (legacy) storage support
4
5 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
6 *
7 * Minor portions of the driver were copied from mspro_block.c which is
8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
9 */
10#define DRIVER_NAME "ms_block"
11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
12
13#include <linux/module.h>
14#include <linux/blk-mq.h>
15#include <linux/memstick.h>
16#include <linux/idr.h>
17#include <linux/hdreg.h>
18#include <linux/delay.h>
19#include <linux/slab.h>
20#include <linux/random.h>
21#include <linux/bitmap.h>
22#include <linux/scatterlist.h>
23#include <linux/jiffies.h>
24#include <linux/workqueue.h>
25#include <linux/mutex.h>
26#include "ms_block.h"
27
28static int debug;
29static int cache_flush_timeout = 1000;
30static bool verify_writes;
31
32/*
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
35 */
36static size_t msb_sg_copy(struct scatterlist *sg_from,
37 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
38{
39 size_t copied = 0;
40
41 while (offset > 0) {
42 if (offset >= sg_from->length) {
43 if (sg_is_last(sg_from))
44 return 0;
45
46 offset -= sg_from->length;
47 sg_from = sg_next(sg_from);
48 continue;
49 }
50
51 copied = min(len, sg_from->length - offset);
52 sg_set_page(sg_to, sg_page(sg_from),
53 copied, sg_from->offset + offset);
54
55 len -= copied;
56 offset = 0;
57
58 if (sg_is_last(sg_from) || !len)
59 goto out;
60
61 sg_to = sg_next(sg_to);
62 to_nents--;
63 sg_from = sg_next(sg_from);
64 }
65
66 while (len > sg_from->length && to_nents--) {
67 len -= sg_from->length;
68 copied += sg_from->length;
69
70 sg_set_page(sg_to, sg_page(sg_from),
71 sg_from->length, sg_from->offset);
72
73 if (sg_is_last(sg_from) || !len)
74 goto out;
75
76 sg_from = sg_next(sg_from);
77 sg_to = sg_next(sg_to);
78 }
79
80 if (len && to_nents) {
81 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
82 copied += len;
83 }
84out:
85 sg_mark_end(sg_to);
86 return copied;
87}
88
89/*
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
93 */
94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
95 size_t offset, u8 *buffer, size_t len)
96{
97 int retval = 0, cmplen;
98 struct sg_mapping_iter miter;
99
100 sg_miter_start(&miter, sg, sg_nents(sg),
101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
102
103 while (sg_miter_next(&miter) && len > 0) {
104 if (offset >= miter.length) {
105 offset -= miter.length;
106 continue;
107 }
108
109 cmplen = min(miter.length - offset, len);
110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
111 if (retval)
112 break;
113
114 buffer += cmplen;
115 len -= cmplen;
116 offset = 0;
117 }
118
119 if (!retval && len)
120 retval = -1;
121
122 sg_miter_stop(&miter);
123 return retval;
124}
125
126
127/* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
132*/
133static int msb_get_zone_from_lba(int lba)
134{
135 if (lba < 494)
136 return 0;
137 return ((lba - 494) / 496) + 1;
138}
139
140/* Get zone of physical block. Trivial */
141static int msb_get_zone_from_pba(int pba)
142{
143 return pba / MS_BLOCKS_IN_ZONE;
144}
145
146/* Debug test to validate free block counts */
147static int msb_validate_used_block_bitmap(struct msb_data *msb)
148{
149 int total_free_blocks = 0;
150 int i;
151
152 if (!debug)
153 return 0;
154
155 for (i = 0; i < msb->zone_count; i++)
156 total_free_blocks += msb->free_block_count[i];
157
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
159 msb->block_count) == total_free_blocks)
160 return 0;
161
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb->read_only = true;
164 return -EINVAL;
165}
166
167/* Mark physical block as used */
168static void msb_mark_block_used(struct msb_data *msb, int pba)
169{
170 int zone = msb_get_zone_from_pba(pba);
171
172 if (test_bit(pba, msb->used_blocks_bitmap)) {
173 pr_err(
174 "BUG: attempt to mark already used pba %d as used", pba);
175 msb->read_only = true;
176 return;
177 }
178
179 if (msb_validate_used_block_bitmap(msb))
180 return;
181
182 /* No races because all IO is single threaded */
183 __set_bit(pba, msb->used_blocks_bitmap);
184 msb->free_block_count[zone]--;
185}
186
187/* Mark physical block as free */
188static void msb_mark_block_unused(struct msb_data *msb, int pba)
189{
190 int zone = msb_get_zone_from_pba(pba);
191
192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
194 msb->read_only = true;
195 return;
196 }
197
198 if (msb_validate_used_block_bitmap(msb))
199 return;
200
201 /* No races because all IO is single threaded */
202 __clear_bit(pba, msb->used_blocks_bitmap);
203 msb->free_block_count[zone]++;
204}
205
206/* Invalidate current register window */
207static void msb_invalidate_reg_window(struct msb_data *msb)
208{
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
213 msb->addr_valid = false;
214}
215
216/* Start a state machine */
217static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
218 (struct memstick_dev *card, struct memstick_request **req))
219{
220 struct memstick_dev *card = msb->card;
221
222 WARN_ON(msb->state != -1);
223 msb->int_polling = false;
224 msb->state = 0;
225 msb->exit_error = 0;
226
227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
228
229 card->next_request = state_func;
230 memstick_new_req(card->host);
231 wait_for_completion(&card->mrq_complete);
232
233 WARN_ON(msb->state != -1);
234 return msb->exit_error;
235}
236
237/* State machines call that to exit */
238static int msb_exit_state_machine(struct msb_data *msb, int error)
239{
240 WARN_ON(msb->state == -1);
241
242 msb->state = -1;
243 msb->exit_error = error;
244 msb->card->next_request = h_msb_default_bad;
245
246 /* Invalidate reg window on errors */
247 if (error)
248 msb_invalidate_reg_window(msb);
249
250 complete(&msb->card->mrq_complete);
251 return -ENXIO;
252}
253
254/* read INT register */
255static int msb_read_int_reg(struct msb_data *msb, long timeout)
256{
257 struct memstick_request *mrq = &msb->card->current_mrq;
258
259 WARN_ON(msb->state == -1);
260
261 if (!msb->int_polling) {
262 msb->int_timeout = jiffies +
263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
264 msb->int_polling = true;
265 } else if (time_after(jiffies, msb->int_timeout)) {
266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
267 return 0;
268 }
269
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
271 mrq->need_card_int && !mrq->error) {
272 mrq->data[0] = mrq->int_reg;
273 mrq->need_card_int = false;
274 return 0;
275 } else {
276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
277 return 1;
278 }
279}
280
281/* Read a register */
282static int msb_read_regs(struct msb_data *msb, int offset, int len)
283{
284 struct memstick_request *req = &msb->card->current_mrq;
285
286 if (msb->reg_addr.r_offset != offset ||
287 msb->reg_addr.r_length != len || !msb->addr_valid) {
288
289 msb->reg_addr.r_offset = offset;
290 msb->reg_addr.r_length = len;
291 msb->addr_valid = true;
292
293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
294 &msb->reg_addr, sizeof(msb->reg_addr));
295 return 0;
296 }
297
298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
299 return 1;
300}
301
302/* Write a card register */
303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
304{
305 struct memstick_request *req = &msb->card->current_mrq;
306
307 if (msb->reg_addr.w_offset != offset ||
308 msb->reg_addr.w_length != len || !msb->addr_valid) {
309
310 msb->reg_addr.w_offset = offset;
311 msb->reg_addr.w_length = len;
312 msb->addr_valid = true;
313
314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
315 &msb->reg_addr, sizeof(msb->reg_addr));
316 return 0;
317 }
318
319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
320 return 1;
321}
322
323/* Handler for absence of IO */
324static int h_msb_default_bad(struct memstick_dev *card,
325 struct memstick_request **mrq)
326{
327 return -ENXIO;
328}
329
330/*
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
334 */
335static int h_msb_read_page(struct memstick_dev *card,
336 struct memstick_request **out_mrq)
337{
338 struct msb_data *msb = memstick_get_drvdata(card);
339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
340 struct scatterlist sg[2];
341 u8 command, intreg;
342
343 if (mrq->error) {
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb, mrq->error);
346 }
347again:
348 switch (msb->state) {
349 case MSB_RP_SEND_BLOCK_ADDRESS:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 the reg window, and thus it returns request for that.
352 Then we stay in this state and retry */
353 if (!msb_write_regs(msb,
354 offsetof(struct ms_register, param),
355 sizeof(struct ms_param_register),
356 (unsigned char *)&msb->regs.param))
357 return 0;
358
359 msb->state = MSB_RP_SEND_READ_COMMAND;
360 return 0;
361
362 case MSB_RP_SEND_READ_COMMAND:
363 command = MS_CMD_BLOCK_READ;
364 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
365 msb->state = MSB_RP_SEND_INT_REQ;
366 return 0;
367
368 case MSB_RP_SEND_INT_REQ:
369 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
370 /* If dont actually need to send the int read request (only in
371 serial mode), then just fall through */
372 if (msb_read_int_reg(msb, -1))
373 return 0;
374 /* fallthrough */
375
376 case MSB_RP_RECEIVE_INT_REQ_RESULT:
377 intreg = mrq->data[0];
378 msb->regs.status.interrupt = intreg;
379
380 if (intreg & MEMSTICK_INT_CMDNAK)
381 return msb_exit_state_machine(msb, -EIO);
382
383 if (!(intreg & MEMSTICK_INT_CED)) {
384 msb->state = MSB_RP_SEND_INT_REQ;
385 goto again;
386 }
387
388 msb->int_polling = false;
389 msb->state = (intreg & MEMSTICK_INT_ERR) ?
390 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
391 goto again;
392
393 case MSB_RP_SEND_READ_STATUS_REG:
394 /* read the status register to understand source of the INT_ERR */
395 if (!msb_read_regs(msb,
396 offsetof(struct ms_register, status),
397 sizeof(struct ms_status_register)))
398 return 0;
399
400 msb->state = MSB_RP_RECEIVE_STATUS_REG;
401 return 0;
402
403 case MSB_RP_RECEIVE_STATUS_REG:
404 msb->regs.status = *(struct ms_status_register *)mrq->data;
405 msb->state = MSB_RP_SEND_OOB_READ;
406 /* fallthrough */
407
408 case MSB_RP_SEND_OOB_READ:
409 if (!msb_read_regs(msb,
410 offsetof(struct ms_register, extra_data),
411 sizeof(struct ms_extra_data_register)))
412 return 0;
413
414 msb->state = MSB_RP_RECEIVE_OOB_READ;
415 return 0;
416
417 case MSB_RP_RECEIVE_OOB_READ:
418 msb->regs.extra_data =
419 *(struct ms_extra_data_register *) mrq->data;
420 msb->state = MSB_RP_SEND_READ_DATA;
421 /* fallthrough */
422
423 case MSB_RP_SEND_READ_DATA:
424 /* Skip that state if we only read the oob */
425 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
426 msb->state = MSB_RP_RECEIVE_READ_DATA;
427 goto again;
428 }
429
430 sg_init_table(sg, ARRAY_SIZE(sg));
431 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
432 msb->current_sg_offset,
433 msb->page_size);
434
435 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
436 msb->state = MSB_RP_RECEIVE_READ_DATA;
437 return 0;
438
439 case MSB_RP_RECEIVE_READ_DATA:
440 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
441 msb->current_sg_offset += msb->page_size;
442 return msb_exit_state_machine(msb, 0);
443 }
444
445 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
446 dbg("read_page: uncorrectable error");
447 return msb_exit_state_machine(msb, -EBADMSG);
448 }
449
450 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
451 dbg("read_page: correctable error");
452 msb->current_sg_offset += msb->page_size;
453 return msb_exit_state_machine(msb, -EUCLEAN);
454 } else {
455 dbg("read_page: INT error, but no status error bits");
456 return msb_exit_state_machine(msb, -EIO);
457 }
458 }
459
460 BUG();
461}
462
463/*
464 * Handler of writes of exactly one block.
465 * Takes address from msb->regs.param.
466 * Writes same extra data to blocks, also taken
467 * from msb->regs.extra
468 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
469 * device refuses to take the command or something else
470 */
471static int h_msb_write_block(struct memstick_dev *card,
472 struct memstick_request **out_mrq)
473{
474 struct msb_data *msb = memstick_get_drvdata(card);
475 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
476 struct scatterlist sg[2];
477 u8 intreg, command;
478
479 if (mrq->error)
480 return msb_exit_state_machine(msb, mrq->error);
481
482again:
483 switch (msb->state) {
484
485 /* HACK: Jmicon handling of TPCs between 8 and
486 * sizeof(memstick_request.data) is broken due to hardware
487 * bug in PIO mode that is used for these TPCs
488 * Therefore split the write
489 */
490
491 case MSB_WB_SEND_WRITE_PARAMS:
492 if (!msb_write_regs(msb,
493 offsetof(struct ms_register, param),
494 sizeof(struct ms_param_register),
495 &msb->regs.param))
496 return 0;
497
498 msb->state = MSB_WB_SEND_WRITE_OOB;
499 return 0;
500
501 case MSB_WB_SEND_WRITE_OOB:
502 if (!msb_write_regs(msb,
503 offsetof(struct ms_register, extra_data),
504 sizeof(struct ms_extra_data_register),
505 &msb->regs.extra_data))
506 return 0;
507 msb->state = MSB_WB_SEND_WRITE_COMMAND;
508 return 0;
509
510
511 case MSB_WB_SEND_WRITE_COMMAND:
512 command = MS_CMD_BLOCK_WRITE;
513 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
514 msb->state = MSB_WB_SEND_INT_REQ;
515 return 0;
516
517 case MSB_WB_SEND_INT_REQ:
518 msb->state = MSB_WB_RECEIVE_INT_REQ;
519 if (msb_read_int_reg(msb, -1))
520 return 0;
521 /* fallthrough */
522
523 case MSB_WB_RECEIVE_INT_REQ:
524 intreg = mrq->data[0];
525 msb->regs.status.interrupt = intreg;
526
527 /* errors mean out of here, and fast... */
528 if (intreg & (MEMSTICK_INT_CMDNAK))
529 return msb_exit_state_machine(msb, -EIO);
530
531 if (intreg & MEMSTICK_INT_ERR)
532 return msb_exit_state_machine(msb, -EBADMSG);
533
534
535 /* for last page we need to poll CED */
536 if (msb->current_page == msb->pages_in_block) {
537 if (intreg & MEMSTICK_INT_CED)
538 return msb_exit_state_machine(msb, 0);
539 msb->state = MSB_WB_SEND_INT_REQ;
540 goto again;
541
542 }
543
544 /* for non-last page we need BREQ before writing next chunk */
545 if (!(intreg & MEMSTICK_INT_BREQ)) {
546 msb->state = MSB_WB_SEND_INT_REQ;
547 goto again;
548 }
549
550 msb->int_polling = false;
551 msb->state = MSB_WB_SEND_WRITE_DATA;
552 /* fallthrough */
553
554 case MSB_WB_SEND_WRITE_DATA:
555 sg_init_table(sg, ARRAY_SIZE(sg));
556
557 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
558 msb->current_sg_offset,
559 msb->page_size) < msb->page_size)
560 return msb_exit_state_machine(msb, -EIO);
561
562 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
563 mrq->need_card_int = 1;
564 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
565 return 0;
566
567 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
568 msb->current_page++;
569 msb->current_sg_offset += msb->page_size;
570 msb->state = MSB_WB_SEND_INT_REQ;
571 goto again;
572 default:
573 BUG();
574 }
575
576 return 0;
577}
578
579/*
580 * This function is used to send simple IO requests to device that consist
581 * of register write + command
582 */
583static int h_msb_send_command(struct memstick_dev *card,
584 struct memstick_request **out_mrq)
585{
586 struct msb_data *msb = memstick_get_drvdata(card);
587 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
588 u8 intreg;
589
590 if (mrq->error) {
591 dbg("send_command: unknown error");
592 return msb_exit_state_machine(msb, mrq->error);
593 }
594again:
595 switch (msb->state) {
596
597 /* HACK: see h_msb_write_block */
598 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
599 if (!msb_write_regs(msb,
600 offsetof(struct ms_register, param),
601 sizeof(struct ms_param_register),
602 &msb->regs.param))
603 return 0;
604 msb->state = MSB_SC_SEND_WRITE_OOB;
605 return 0;
606
607 case MSB_SC_SEND_WRITE_OOB:
608 if (!msb->command_need_oob) {
609 msb->state = MSB_SC_SEND_COMMAND;
610 goto again;
611 }
612
613 if (!msb_write_regs(msb,
614 offsetof(struct ms_register, extra_data),
615 sizeof(struct ms_extra_data_register),
616 &msb->regs.extra_data))
617 return 0;
618
619 msb->state = MSB_SC_SEND_COMMAND;
620 return 0;
621
622 case MSB_SC_SEND_COMMAND:
623 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
624 msb->state = MSB_SC_SEND_INT_REQ;
625 return 0;
626
627 case MSB_SC_SEND_INT_REQ:
628 msb->state = MSB_SC_RECEIVE_INT_REQ;
629 if (msb_read_int_reg(msb, -1))
630 return 0;
631 /* fallthrough */
632
633 case MSB_SC_RECEIVE_INT_REQ:
634 intreg = mrq->data[0];
635
636 if (intreg & MEMSTICK_INT_CMDNAK)
637 return msb_exit_state_machine(msb, -EIO);
638 if (intreg & MEMSTICK_INT_ERR)
639 return msb_exit_state_machine(msb, -EBADMSG);
640
641 if (!(intreg & MEMSTICK_INT_CED)) {
642 msb->state = MSB_SC_SEND_INT_REQ;
643 goto again;
644 }
645
646 return msb_exit_state_machine(msb, 0);
647 }
648
649 BUG();
650}
651
652/* Small handler for card reset */
653static int h_msb_reset(struct memstick_dev *card,
654 struct memstick_request **out_mrq)
655{
656 u8 command = MS_CMD_RESET;
657 struct msb_data *msb = memstick_get_drvdata(card);
658 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
659
660 if (mrq->error)
661 return msb_exit_state_machine(msb, mrq->error);
662
663 switch (msb->state) {
664 case MSB_RS_SEND:
665 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
666 mrq->need_card_int = 0;
667 msb->state = MSB_RS_CONFIRM;
668 return 0;
669 case MSB_RS_CONFIRM:
670 return msb_exit_state_machine(msb, 0);
671 }
672 BUG();
673}
674
675/* This handler is used to do serial->parallel switch */
676static int h_msb_parallel_switch(struct memstick_dev *card,
677 struct memstick_request **out_mrq)
678{
679 struct msb_data *msb = memstick_get_drvdata(card);
680 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
681 struct memstick_host *host = card->host;
682
683 if (mrq->error) {
684 dbg("parallel_switch: error");
685 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
686 return msb_exit_state_machine(msb, mrq->error);
687 }
688
689 switch (msb->state) {
690 case MSB_PS_SEND_SWITCH_COMMAND:
691 /* Set the parallel interface on memstick side */
692 msb->regs.param.system |= MEMSTICK_SYS_PAM;
693
694 if (!msb_write_regs(msb,
695 offsetof(struct ms_register, param),
696 1,
697 (unsigned char *)&msb->regs.param))
698 return 0;
699
700 msb->state = MSB_PS_SWICH_HOST;
701 return 0;
702
703 case MSB_PS_SWICH_HOST:
704 /* Set parallel interface on our side + send a dummy request
705 to see if card responds */
706 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
707 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
708 msb->state = MSB_PS_CONFIRM;
709 return 0;
710
711 case MSB_PS_CONFIRM:
712 return msb_exit_state_machine(msb, 0);
713 }
714
715 BUG();
716}
717
718static int msb_switch_to_parallel(struct msb_data *msb);
719
720/* Reset the card, to guard against hw errors beeing treated as bad blocks */
721static int msb_reset(struct msb_data *msb, bool full)
722{
723
724 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
725 struct memstick_dev *card = msb->card;
726 struct memstick_host *host = card->host;
727 int error;
728
729 /* Reset the card */
730 msb->regs.param.system = MEMSTICK_SYS_BAMD;
731
732 if (full) {
733 error = host->set_param(host,
734 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
735 if (error)
736 goto out_error;
737
738 msb_invalidate_reg_window(msb);
739
740 error = host->set_param(host,
741 MEMSTICK_POWER, MEMSTICK_POWER_ON);
742 if (error)
743 goto out_error;
744
745 error = host->set_param(host,
746 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
747 if (error) {
748out_error:
749 dbg("Failed to reset the host controller");
750 msb->read_only = true;
751 return -EFAULT;
752 }
753 }
754
755 error = msb_run_state_machine(msb, h_msb_reset);
756 if (error) {
757 dbg("Failed to reset the card");
758 msb->read_only = true;
759 return -ENODEV;
760 }
761
762 /* Set parallel mode */
763 if (was_parallel)
764 msb_switch_to_parallel(msb);
765 return 0;
766}
767
768/* Attempts to switch interface to parallel mode */
769static int msb_switch_to_parallel(struct msb_data *msb)
770{
771 int error;
772
773 error = msb_run_state_machine(msb, h_msb_parallel_switch);
774 if (error) {
775 pr_err("Switch to parallel failed");
776 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
777 msb_reset(msb, true);
778 return -EFAULT;
779 }
780
781 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
782 return 0;
783}
784
785/* Changes overwrite flag on a page */
786static int msb_set_overwrite_flag(struct msb_data *msb,
787 u16 pba, u8 page, u8 flag)
788{
789 if (msb->read_only)
790 return -EROFS;
791
792 msb->regs.param.block_address = cpu_to_be16(pba);
793 msb->regs.param.page_address = page;
794 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
795 msb->regs.extra_data.overwrite_flag = flag;
796 msb->command_value = MS_CMD_BLOCK_WRITE;
797 msb->command_need_oob = true;
798
799 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
800 flag, pba, page);
801 return msb_run_state_machine(msb, h_msb_send_command);
802}
803
804static int msb_mark_bad(struct msb_data *msb, int pba)
805{
806 pr_notice("marking pba %d as bad", pba);
807 msb_reset(msb, true);
808 return msb_set_overwrite_flag(
809 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
810}
811
812static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
813{
814 dbg("marking page %d of pba %d as bad", page, pba);
815 msb_reset(msb, true);
816 return msb_set_overwrite_flag(msb,
817 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
818}
819
820/* Erases one physical block */
821static int msb_erase_block(struct msb_data *msb, u16 pba)
822{
823 int error, try;
824 if (msb->read_only)
825 return -EROFS;
826
827 dbg_verbose("erasing pba %d", pba);
828
829 for (try = 1; try < 3; try++) {
830 msb->regs.param.block_address = cpu_to_be16(pba);
831 msb->regs.param.page_address = 0;
832 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
833 msb->command_value = MS_CMD_BLOCK_ERASE;
834 msb->command_need_oob = false;
835
836
837 error = msb_run_state_machine(msb, h_msb_send_command);
838 if (!error || msb_reset(msb, true))
839 break;
840 }
841
842 if (error) {
843 pr_err("erase failed, marking pba %d as bad", pba);
844 msb_mark_bad(msb, pba);
845 }
846
847 dbg_verbose("erase success, marking pba %d as unused", pba);
848 msb_mark_block_unused(msb, pba);
849 __set_bit(pba, msb->erased_blocks_bitmap);
850 return error;
851}
852
853/* Reads one page from device */
854static int msb_read_page(struct msb_data *msb,
855 u16 pba, u8 page, struct ms_extra_data_register *extra,
856 struct scatterlist *sg, int offset)
857{
858 int try, error;
859
860 if (pba == MS_BLOCK_INVALID) {
861 unsigned long flags;
862 struct sg_mapping_iter miter;
863 size_t len = msb->page_size;
864
865 dbg_verbose("read unmapped sector. returning 0xFF");
866
867 local_irq_save(flags);
868 sg_miter_start(&miter, sg, sg_nents(sg),
869 SG_MITER_ATOMIC | SG_MITER_TO_SG);
870
871 while (sg_miter_next(&miter) && len > 0) {
872
873 int chunklen;
874
875 if (offset && offset >= miter.length) {
876 offset -= miter.length;
877 continue;
878 }
879
880 chunklen = min(miter.length - offset, len);
881 memset(miter.addr + offset, 0xFF, chunklen);
882 len -= chunklen;
883 offset = 0;
884 }
885
886 sg_miter_stop(&miter);
887 local_irq_restore(flags);
888
889 if (offset)
890 return -EFAULT;
891
892 if (extra)
893 memset(extra, 0xFF, sizeof(*extra));
894 return 0;
895 }
896
897 if (pba >= msb->block_count) {
898 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
899 return -EINVAL;
900 }
901
902 for (try = 1; try < 3; try++) {
903 msb->regs.param.block_address = cpu_to_be16(pba);
904 msb->regs.param.page_address = page;
905 msb->regs.param.cp = MEMSTICK_CP_PAGE;
906
907 msb->current_sg = sg;
908 msb->current_sg_offset = offset;
909 error = msb_run_state_machine(msb, h_msb_read_page);
910
911
912 if (error == -EUCLEAN) {
913 pr_notice("correctable error on pba %d, page %d",
914 pba, page);
915 error = 0;
916 }
917
918 if (!error && extra)
919 *extra = msb->regs.extra_data;
920
921 if (!error || msb_reset(msb, true))
922 break;
923
924 }
925
926 /* Mark bad pages */
927 if (error == -EBADMSG) {
928 pr_err("uncorrectable error on read of pba %d, page %d",
929 pba, page);
930
931 if (msb->regs.extra_data.overwrite_flag &
932 MEMSTICK_OVERWRITE_PGST0)
933 msb_mark_page_bad(msb, pba, page);
934 return -EBADMSG;
935 }
936
937 if (error)
938 pr_err("read of pba %d, page %d failed with error %d",
939 pba, page, error);
940 return error;
941}
942
943/* Reads oob of page only */
944static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
945 struct ms_extra_data_register *extra)
946{
947 int error;
948
949 BUG_ON(!extra);
950 msb->regs.param.block_address = cpu_to_be16(pba);
951 msb->regs.param.page_address = page;
952 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
953
954 if (pba > msb->block_count) {
955 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
956 return -EINVAL;
957 }
958
959 error = msb_run_state_machine(msb, h_msb_read_page);
960 *extra = msb->regs.extra_data;
961
962 if (error == -EUCLEAN) {
963 pr_notice("correctable error on pba %d, page %d",
964 pba, page);
965 return 0;
966 }
967
968 return error;
969}
970
971/* Reads a block and compares it with data contained in scatterlist orig_sg */
972static int msb_verify_block(struct msb_data *msb, u16 pba,
973 struct scatterlist *orig_sg, int offset)
974{
975 struct scatterlist sg;
976 int page = 0, error;
977
978 sg_init_one(&sg, msb->block_buffer, msb->block_size);
979
980 while (page < msb->pages_in_block) {
981
982 error = msb_read_page(msb, pba, page,
983 NULL, &sg, page * msb->page_size);
984 if (error)
985 return error;
986 page++;
987 }
988
989 if (msb_sg_compare_to_buffer(orig_sg, offset,
990 msb->block_buffer, msb->block_size))
991 return -EIO;
992 return 0;
993}
994
995/* Writes exectly one block + oob */
996static int msb_write_block(struct msb_data *msb,
997 u16 pba, u32 lba, struct scatterlist *sg, int offset)
998{
999 int error, current_try = 1;
1000 BUG_ON(sg->length < msb->page_size);
1001
1002 if (msb->read_only)
1003 return -EROFS;
1004
1005 if (pba == MS_BLOCK_INVALID) {
1006 pr_err(
1007 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1008 return -EINVAL;
1009 }
1010
1011 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1012 pr_err(
1013 "BUG: write: attempt to write beyond the end of device");
1014 return -EINVAL;
1015 }
1016
1017 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1018 pr_err("BUG: write: lba zone mismatch");
1019 return -EINVAL;
1020 }
1021
1022 if (pba == msb->boot_block_locations[0] ||
1023 pba == msb->boot_block_locations[1]) {
1024 pr_err("BUG: write: attempt to write to boot blocks!");
1025 return -EINVAL;
1026 }
1027
1028 while (1) {
1029
1030 if (msb->read_only)
1031 return -EROFS;
1032
1033 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1034 msb->regs.param.page_address = 0;
1035 msb->regs.param.block_address = cpu_to_be16(pba);
1036
1037 msb->regs.extra_data.management_flag = 0xFF;
1038 msb->regs.extra_data.overwrite_flag = 0xF8;
1039 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1040
1041 msb->current_sg = sg;
1042 msb->current_sg_offset = offset;
1043 msb->current_page = 0;
1044
1045 error = msb_run_state_machine(msb, h_msb_write_block);
1046
1047 /* Sector we just wrote to is assumed erased since its pba
1048 was erased. If it wasn't erased, write will succeed
1049 and will just clear the bits that were set in the block
1050 thus test that what we have written,
1051 matches what we expect.
1052 We do trust the blocks that we erased */
1053 if (!error && (verify_writes ||
1054 !test_bit(pba, msb->erased_blocks_bitmap)))
1055 error = msb_verify_block(msb, pba, sg, offset);
1056
1057 if (!error)
1058 break;
1059
1060 if (current_try > 1 || msb_reset(msb, true))
1061 break;
1062
1063 pr_err("write failed, trying to erase the pba %d", pba);
1064 error = msb_erase_block(msb, pba);
1065 if (error)
1066 break;
1067
1068 current_try++;
1069 }
1070 return error;
1071}
1072
1073/* Finds a free block for write replacement */
1074static u16 msb_get_free_block(struct msb_data *msb, int zone)
1075{
1076 u16 pos;
1077 int pba = zone * MS_BLOCKS_IN_ZONE;
1078 int i;
1079
1080 get_random_bytes(&pos, sizeof(pos));
1081
1082 if (!msb->free_block_count[zone]) {
1083 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1084 msb->read_only = true;
1085 return MS_BLOCK_INVALID;
1086 }
1087
1088 pos %= msb->free_block_count[zone];
1089
1090 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1091 msb->free_block_count[zone], pos);
1092
1093 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1094 msb->block_count, pba);
1095 for (i = 0; i < pos; ++i)
1096 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1097 msb->block_count, pba + 1);
1098
1099 dbg_verbose("result of the free blocks scan: pba %d", pba);
1100
1101 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1102 pr_err("BUG: cant get a free block");
1103 msb->read_only = true;
1104 return MS_BLOCK_INVALID;
1105 }
1106
1107 msb_mark_block_used(msb, pba);
1108 return pba;
1109}
1110
1111static int msb_update_block(struct msb_data *msb, u16 lba,
1112 struct scatterlist *sg, int offset)
1113{
1114 u16 pba, new_pba;
1115 int error, try;
1116
1117 pba = msb->lba_to_pba_table[lba];
1118 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1119
1120 if (pba != MS_BLOCK_INVALID) {
1121 dbg_verbose("setting the update flag on the block");
1122 msb_set_overwrite_flag(msb, pba, 0,
1123 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1124 }
1125
1126 for (try = 0; try < 3; try++) {
1127 new_pba = msb_get_free_block(msb,
1128 msb_get_zone_from_lba(lba));
1129
1130 if (new_pba == MS_BLOCK_INVALID) {
1131 error = -EIO;
1132 goto out;
1133 }
1134
1135 dbg_verbose("block update: writing updated block to the pba %d",
1136 new_pba);
1137 error = msb_write_block(msb, new_pba, lba, sg, offset);
1138 if (error == -EBADMSG) {
1139 msb_mark_bad(msb, new_pba);
1140 continue;
1141 }
1142
1143 if (error)
1144 goto out;
1145
1146 dbg_verbose("block update: erasing the old block");
1147 msb_erase_block(msb, pba);
1148 msb->lba_to_pba_table[lba] = new_pba;
1149 return 0;
1150 }
1151out:
1152 if (error) {
1153 pr_err("block update error after %d tries, switching to r/o mode", try);
1154 msb->read_only = true;
1155 }
1156 return error;
1157}
1158
1159/* Converts endiannes in the boot block for easy use */
1160static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1161{
1162 p->header.block_id = be16_to_cpu(p->header.block_id);
1163 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1164 p->entry.disabled_block.start_addr
1165 = be32_to_cpu(p->entry.disabled_block.start_addr);
1166 p->entry.disabled_block.data_size
1167 = be32_to_cpu(p->entry.disabled_block.data_size);
1168 p->entry.cis_idi.start_addr
1169 = be32_to_cpu(p->entry.cis_idi.start_addr);
1170 p->entry.cis_idi.data_size
1171 = be32_to_cpu(p->entry.cis_idi.data_size);
1172 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1173 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1174 p->attr.number_of_effective_blocks
1175 = be16_to_cpu(p->attr.number_of_effective_blocks);
1176 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1177 p->attr.memory_manufacturer_code
1178 = be16_to_cpu(p->attr.memory_manufacturer_code);
1179 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1180 p->attr.implemented_capacity
1181 = be16_to_cpu(p->attr.implemented_capacity);
1182 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1183 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1184}
1185
1186static int msb_read_boot_blocks(struct msb_data *msb)
1187{
1188 int pba = 0;
1189 struct scatterlist sg;
1190 struct ms_extra_data_register extra;
1191 struct ms_boot_page *page;
1192
1193 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1194 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1195 msb->boot_block_count = 0;
1196
1197 dbg_verbose("Start of a scan for the boot blocks");
1198
1199 if (!msb->boot_page) {
1200 page = kmalloc_array(2, sizeof(struct ms_boot_page),
1201 GFP_KERNEL);
1202 if (!page)
1203 return -ENOMEM;
1204
1205 msb->boot_page = page;
1206 } else
1207 page = msb->boot_page;
1208
1209 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1210
1211 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1212
1213 sg_init_one(&sg, page, sizeof(*page));
1214 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1215 dbg("boot scan: can't read pba %d", pba);
1216 continue;
1217 }
1218
1219 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1220 dbg("management flag doesn't indicate boot block %d",
1221 pba);
1222 continue;
1223 }
1224
1225 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1226 dbg("the pba at %d doesn' contain boot block ID", pba);
1227 continue;
1228 }
1229
1230 msb_fix_boot_page_endianness(page);
1231 msb->boot_block_locations[msb->boot_block_count] = pba;
1232
1233 page++;
1234 msb->boot_block_count++;
1235
1236 if (msb->boot_block_count == 2)
1237 break;
1238 }
1239
1240 if (!msb->boot_block_count) {
1241 pr_err("media doesn't contain master page, aborting");
1242 return -EIO;
1243 }
1244
1245 dbg_verbose("End of scan for boot blocks");
1246 return 0;
1247}
1248
1249static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1250{
1251 struct ms_boot_page *boot_block;
1252 struct scatterlist sg;
1253 u16 *buffer = NULL;
1254 int offset = 0;
1255 int i, error = 0;
1256 int data_size, data_offset, page, page_offset, size_to_read;
1257 u16 pba;
1258
1259 BUG_ON(block_nr > 1);
1260 boot_block = &msb->boot_page[block_nr];
1261 pba = msb->boot_block_locations[block_nr];
1262
1263 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1264 return -EINVAL;
1265
1266 data_size = boot_block->entry.disabled_block.data_size;
1267 data_offset = sizeof(struct ms_boot_page) +
1268 boot_block->entry.disabled_block.start_addr;
1269 if (!data_size)
1270 return 0;
1271
1272 page = data_offset / msb->page_size;
1273 page_offset = data_offset % msb->page_size;
1274 size_to_read =
1275 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1276 msb->page_size;
1277
1278 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1279 pba, data_offset, data_size);
1280
1281 buffer = kzalloc(size_to_read, GFP_KERNEL);
1282 if (!buffer)
1283 return -ENOMEM;
1284
1285 /* Read the buffer */
1286 sg_init_one(&sg, buffer, size_to_read);
1287
1288 while (offset < size_to_read) {
1289 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1290 if (error)
1291 goto out;
1292
1293 page++;
1294 offset += msb->page_size;
1295
1296 if (page == msb->pages_in_block) {
1297 pr_err(
1298 "bad block table extends beyond the boot block");
1299 break;
1300 }
1301 }
1302
1303 /* Process the bad block table */
1304 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1305
1306 u16 bad_block = be16_to_cpu(buffer[i]);
1307
1308 if (bad_block >= msb->block_count) {
1309 dbg("bad block table contains invalid block %d",
1310 bad_block);
1311 continue;
1312 }
1313
1314 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1315 dbg("duplicate bad block %d in the table",
1316 bad_block);
1317 continue;
1318 }
1319
1320 dbg("block %d is marked as factory bad", bad_block);
1321 msb_mark_block_used(msb, bad_block);
1322 }
1323out:
1324 kfree(buffer);
1325 return error;
1326}
1327
1328static int msb_ftl_initialize(struct msb_data *msb)
1329{
1330 int i;
1331
1332 if (msb->ftl_initialized)
1333 return 0;
1334
1335 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1336 msb->logical_block_count = msb->zone_count * 496 - 2;
1337
1338 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1339 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1340 msb->lba_to_pba_table =
1341 kmalloc_array(msb->logical_block_count, sizeof(u16),
1342 GFP_KERNEL);
1343
1344 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1345 !msb->erased_blocks_bitmap) {
1346 kfree(msb->used_blocks_bitmap);
1347 kfree(msb->lba_to_pba_table);
1348 kfree(msb->erased_blocks_bitmap);
1349 return -ENOMEM;
1350 }
1351
1352 for (i = 0; i < msb->zone_count; i++)
1353 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1354
1355 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1356 msb->logical_block_count * sizeof(u16));
1357
1358 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1359 msb->zone_count, msb->logical_block_count);
1360
1361 msb->ftl_initialized = true;
1362 return 0;
1363}
1364
1365static int msb_ftl_scan(struct msb_data *msb)
1366{
1367 u16 pba, lba, other_block;
1368 u8 overwrite_flag, management_flag, other_overwrite_flag;
1369 int error;
1370 struct ms_extra_data_register extra;
1371 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1372
1373 if (!overwrite_flags)
1374 return -ENOMEM;
1375
1376 dbg("Start of media scanning");
1377 for (pba = 0; pba < msb->block_count; pba++) {
1378
1379 if (pba == msb->boot_block_locations[0] ||
1380 pba == msb->boot_block_locations[1]) {
1381 dbg_verbose("pba %05d -> [boot block]", pba);
1382 msb_mark_block_used(msb, pba);
1383 continue;
1384 }
1385
1386 if (test_bit(pba, msb->used_blocks_bitmap)) {
1387 dbg_verbose("pba %05d -> [factory bad]", pba);
1388 continue;
1389 }
1390
1391 memset(&extra, 0, sizeof(extra));
1392 error = msb_read_oob(msb, pba, 0, &extra);
1393
1394 /* can't trust the page if we can't read the oob */
1395 if (error == -EBADMSG) {
1396 pr_notice(
1397 "oob of pba %d damaged, will try to erase it", pba);
1398 msb_mark_block_used(msb, pba);
1399 msb_erase_block(msb, pba);
1400 continue;
1401 } else if (error) {
1402 pr_err("unknown error %d on read of oob of pba %d - aborting",
1403 error, pba);
1404
1405 kfree(overwrite_flags);
1406 return error;
1407 }
1408
1409 lba = be16_to_cpu(extra.logical_address);
1410 management_flag = extra.management_flag;
1411 overwrite_flag = extra.overwrite_flag;
1412 overwrite_flags[pba] = overwrite_flag;
1413
1414 /* Skip bad blocks */
1415 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1416 dbg("pba %05d -> [BAD]", pba);
1417 msb_mark_block_used(msb, pba);
1418 continue;
1419 }
1420
1421 /* Skip system/drm blocks */
1422 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1423 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1424 dbg("pba %05d -> [reserved management flag %02x]",
1425 pba, management_flag);
1426 msb_mark_block_used(msb, pba);
1427 continue;
1428 }
1429
1430 /* Erase temporary tables */
1431 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1432 dbg("pba %05d -> [temp table] - will erase", pba);
1433
1434 msb_mark_block_used(msb, pba);
1435 msb_erase_block(msb, pba);
1436 continue;
1437 }
1438
1439 if (lba == MS_BLOCK_INVALID) {
1440 dbg_verbose("pba %05d -> [free]", pba);
1441 continue;
1442 }
1443
1444 msb_mark_block_used(msb, pba);
1445
1446 /* Block has LBA not according to zoning*/
1447 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1448 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1449 pba, lba);
1450 msb_erase_block(msb, pba);
1451 continue;
1452 }
1453
1454 /* No collisions - great */
1455 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1456 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1457 msb->lba_to_pba_table[lba] = pba;
1458 continue;
1459 }
1460
1461 other_block = msb->lba_to_pba_table[lba];
1462 other_overwrite_flag = overwrite_flags[other_block];
1463
1464 pr_notice("Collision between pba %d and pba %d",
1465 pba, other_block);
1466
1467 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1468 pr_notice("pba %d is marked as stable, use it", pba);
1469 msb_erase_block(msb, other_block);
1470 msb->lba_to_pba_table[lba] = pba;
1471 continue;
1472 }
1473
1474 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1475 pr_notice("pba %d is marked as stable, use it",
1476 other_block);
1477 msb_erase_block(msb, pba);
1478 continue;
1479 }
1480
1481 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1482 pba, other_block, other_block);
1483
1484 msb_erase_block(msb, other_block);
1485 msb->lba_to_pba_table[lba] = pba;
1486 }
1487
1488 dbg("End of media scanning");
1489 kfree(overwrite_flags);
1490 return 0;
1491}
1492
1493static void msb_cache_flush_timer(struct timer_list *t)
1494{
1495 struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1496 msb->need_flush_cache = true;
1497 queue_work(msb->io_queue, &msb->io_work);
1498}
1499
1500
1501static void msb_cache_discard(struct msb_data *msb)
1502{
1503 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1504 return;
1505
1506 del_timer_sync(&msb->cache_flush_timer);
1507
1508 dbg_verbose("Discarding the write cache");
1509 msb->cache_block_lba = MS_BLOCK_INVALID;
1510 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1511}
1512
1513static int msb_cache_init(struct msb_data *msb)
1514{
1515 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1516
1517 if (!msb->cache)
1518 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1519 if (!msb->cache)
1520 return -ENOMEM;
1521
1522 msb_cache_discard(msb);
1523 return 0;
1524}
1525
1526static int msb_cache_flush(struct msb_data *msb)
1527{
1528 struct scatterlist sg;
1529 struct ms_extra_data_register extra;
1530 int page, offset, error;
1531 u16 pba, lba;
1532
1533 if (msb->read_only)
1534 return -EROFS;
1535
1536 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1537 return 0;
1538
1539 lba = msb->cache_block_lba;
1540 pba = msb->lba_to_pba_table[lba];
1541
1542 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1543 pba, msb->cache_block_lba);
1544
1545 sg_init_one(&sg, msb->cache , msb->block_size);
1546
1547 /* Read all missing pages in cache */
1548 for (page = 0; page < msb->pages_in_block; page++) {
1549
1550 if (test_bit(page, &msb->valid_cache_bitmap))
1551 continue;
1552
1553 offset = page * msb->page_size;
1554
1555 dbg_verbose("reading non-present sector %d of cache block %d",
1556 page, lba);
1557 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1558
1559 /* Bad pages are copied with 00 page status */
1560 if (error == -EBADMSG) {
1561 pr_err("read error on sector %d, contents probably damaged", page);
1562 continue;
1563 }
1564
1565 if (error)
1566 return error;
1567
1568 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1569 MEMSTICK_OV_PG_NORMAL) {
1570 dbg("page %d is marked as bad", page);
1571 continue;
1572 }
1573
1574 set_bit(page, &msb->valid_cache_bitmap);
1575 }
1576
1577 /* Write the cache now */
1578 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1579 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1580
1581 /* Mark invalid pages */
1582 if (!error) {
1583 for (page = 0; page < msb->pages_in_block; page++) {
1584
1585 if (test_bit(page, &msb->valid_cache_bitmap))
1586 continue;
1587
1588 dbg("marking page %d as containing damaged data",
1589 page);
1590 msb_set_overwrite_flag(msb,
1591 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1592 }
1593 }
1594
1595 msb_cache_discard(msb);
1596 return error;
1597}
1598
1599static int msb_cache_write(struct msb_data *msb, int lba,
1600 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1601{
1602 int error;
1603 struct scatterlist sg_tmp[10];
1604
1605 if (msb->read_only)
1606 return -EROFS;
1607
1608 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1609 lba != msb->cache_block_lba)
1610 if (add_to_cache_only)
1611 return 0;
1612
1613 /* If we need to write different block */
1614 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1615 lba != msb->cache_block_lba) {
1616 dbg_verbose("first flush the cache");
1617 error = msb_cache_flush(msb);
1618 if (error)
1619 return error;
1620 }
1621
1622 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1623 msb->cache_block_lba = lba;
1624 mod_timer(&msb->cache_flush_timer,
1625 jiffies + msecs_to_jiffies(cache_flush_timeout));
1626 }
1627
1628 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1629
1630 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1631 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1632
1633 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1634 msb->cache + page * msb->page_size, msb->page_size);
1635
1636 set_bit(page, &msb->valid_cache_bitmap);
1637 return 0;
1638}
1639
1640static int msb_cache_read(struct msb_data *msb, int lba,
1641 int page, struct scatterlist *sg, int offset)
1642{
1643 int pba = msb->lba_to_pba_table[lba];
1644 struct scatterlist sg_tmp[10];
1645 int error = 0;
1646
1647 if (lba == msb->cache_block_lba &&
1648 test_bit(page, &msb->valid_cache_bitmap)) {
1649
1650 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1651 lba, pba, page);
1652
1653 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1654 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1655 offset, msb->page_size);
1656 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1657 msb->cache + msb->page_size * page,
1658 msb->page_size);
1659 } else {
1660 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1661 lba, pba, page);
1662
1663 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1664 if (error)
1665 return error;
1666
1667 msb_cache_write(msb, lba, page, true, sg, offset);
1668 }
1669 return error;
1670}
1671
1672/* Emulated geometry table
1673 * This table content isn't that importaint,
1674 * One could put here different values, providing that they still
1675 * cover whole disk.
1676 * 64 MB entry is what windows reports for my 64M memstick */
1677
1678static const struct chs_entry chs_table[] = {
1679/* size sectors cylynders heads */
1680 { 4, 16, 247, 2 },
1681 { 8, 16, 495, 2 },
1682 { 16, 16, 495, 4 },
1683 { 32, 16, 991, 4 },
1684 { 64, 16, 991, 8 },
1685 {128, 16, 991, 16 },
1686 { 0 }
1687};
1688
1689/* Load information about the card */
1690static int msb_init_card(struct memstick_dev *card)
1691{
1692 struct msb_data *msb = memstick_get_drvdata(card);
1693 struct memstick_host *host = card->host;
1694 struct ms_boot_page *boot_block;
1695 int error = 0, i, raw_size_in_megs;
1696
1697 msb->caps = 0;
1698
1699 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1700 card->id.class <= MEMSTICK_CLASS_ROM)
1701 msb->read_only = true;
1702
1703 msb->state = -1;
1704 error = msb_reset(msb, false);
1705 if (error)
1706 return error;
1707
1708 /* Due to a bug in Jmicron driver written by Alex Dubov,
1709 its serial mode barely works,
1710 so we switch to parallel mode right away */
1711 if (host->caps & MEMSTICK_CAP_PAR4)
1712 msb_switch_to_parallel(msb);
1713
1714 msb->page_size = sizeof(struct ms_boot_page);
1715
1716 /* Read the boot page */
1717 error = msb_read_boot_blocks(msb);
1718 if (error)
1719 return -EIO;
1720
1721 boot_block = &msb->boot_page[0];
1722
1723 /* Save intersting attributes from boot page */
1724 msb->block_count = boot_block->attr.number_of_blocks;
1725 msb->page_size = boot_block->attr.page_size;
1726
1727 msb->pages_in_block = boot_block->attr.block_size * 2;
1728 msb->block_size = msb->page_size * msb->pages_in_block;
1729
1730 if (msb->page_size > PAGE_SIZE) {
1731 /* this isn't supported by linux at all, anyway*/
1732 dbg("device page %d size isn't supported", msb->page_size);
1733 return -EINVAL;
1734 }
1735
1736 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1737 if (!msb->block_buffer)
1738 return -ENOMEM;
1739
1740 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1741
1742 for (i = 0; chs_table[i].size; i++) {
1743
1744 if (chs_table[i].size != raw_size_in_megs)
1745 continue;
1746
1747 msb->geometry.cylinders = chs_table[i].cyl;
1748 msb->geometry.heads = chs_table[i].head;
1749 msb->geometry.sectors = chs_table[i].sec;
1750 break;
1751 }
1752
1753 if (boot_block->attr.transfer_supporting == 1)
1754 msb->caps |= MEMSTICK_CAP_PAR4;
1755
1756 if (boot_block->attr.device_type & 0x03)
1757 msb->read_only = true;
1758
1759 dbg("Total block count = %d", msb->block_count);
1760 dbg("Each block consists of %d pages", msb->pages_in_block);
1761 dbg("Page size = %d bytes", msb->page_size);
1762 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1763 dbg("Read only: %d", msb->read_only);
1764
1765#if 0
1766 /* Now we can switch the interface */
1767 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1768 msb_switch_to_parallel(msb);
1769#endif
1770
1771 error = msb_cache_init(msb);
1772 if (error)
1773 return error;
1774
1775 error = msb_ftl_initialize(msb);
1776 if (error)
1777 return error;
1778
1779
1780 /* Read the bad block table */
1781 error = msb_read_bad_block_table(msb, 0);
1782
1783 if (error && error != -ENOMEM) {
1784 dbg("failed to read bad block table from primary boot block, trying from backup");
1785 error = msb_read_bad_block_table(msb, 1);
1786 }
1787
1788 if (error)
1789 return error;
1790
1791 /* *drum roll* Scan the media */
1792 error = msb_ftl_scan(msb);
1793 if (error) {
1794 pr_err("Scan of media failed");
1795 return error;
1796 }
1797
1798 return 0;
1799
1800}
1801
1802static int msb_do_write_request(struct msb_data *msb, int lba,
1803 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1804{
1805 int error = 0;
1806 off_t offset = 0;
1807 *sucessfuly_written = 0;
1808
1809 while (offset < len) {
1810 if (page == 0 && len - offset >= msb->block_size) {
1811
1812 if (msb->cache_block_lba == lba)
1813 msb_cache_discard(msb);
1814
1815 dbg_verbose("Writing whole lba %d", lba);
1816 error = msb_update_block(msb, lba, sg, offset);
1817 if (error)
1818 return error;
1819
1820 offset += msb->block_size;
1821 *sucessfuly_written += msb->block_size;
1822 lba++;
1823 continue;
1824 }
1825
1826 error = msb_cache_write(msb, lba, page, false, sg, offset);
1827 if (error)
1828 return error;
1829
1830 offset += msb->page_size;
1831 *sucessfuly_written += msb->page_size;
1832
1833 page++;
1834 if (page == msb->pages_in_block) {
1835 page = 0;
1836 lba++;
1837 }
1838 }
1839 return 0;
1840}
1841
1842static int msb_do_read_request(struct msb_data *msb, int lba,
1843 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1844{
1845 int error = 0;
1846 int offset = 0;
1847 *sucessfuly_read = 0;
1848
1849 while (offset < len) {
1850
1851 error = msb_cache_read(msb, lba, page, sg, offset);
1852 if (error)
1853 return error;
1854
1855 offset += msb->page_size;
1856 *sucessfuly_read += msb->page_size;
1857
1858 page++;
1859 if (page == msb->pages_in_block) {
1860 page = 0;
1861 lba++;
1862 }
1863 }
1864 return 0;
1865}
1866
1867static void msb_io_work(struct work_struct *work)
1868{
1869 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1870 int page, error, len;
1871 sector_t lba;
1872 struct scatterlist *sg = msb->prealloc_sg;
1873 struct request *req;
1874
1875 dbg_verbose("IO: work started");
1876
1877 while (1) {
1878 spin_lock_irq(&msb->q_lock);
1879
1880 if (msb->need_flush_cache) {
1881 msb->need_flush_cache = false;
1882 spin_unlock_irq(&msb->q_lock);
1883 msb_cache_flush(msb);
1884 continue;
1885 }
1886
1887 req = msb->req;
1888 if (!req) {
1889 dbg_verbose("IO: no more requests exiting");
1890 spin_unlock_irq(&msb->q_lock);
1891 return;
1892 }
1893
1894 spin_unlock_irq(&msb->q_lock);
1895
1896 /* process the request */
1897 dbg_verbose("IO: processing new request");
1898 blk_rq_map_sg(msb->queue, req, sg);
1899
1900 lba = blk_rq_pos(req);
1901
1902 sector_div(lba, msb->page_size / 512);
1903 page = sector_div(lba, msb->pages_in_block);
1904
1905 if (rq_data_dir(msb->req) == READ)
1906 error = msb_do_read_request(msb, lba, page, sg,
1907 blk_rq_bytes(req), &len);
1908 else
1909 error = msb_do_write_request(msb, lba, page, sg,
1910 blk_rq_bytes(req), &len);
1911
1912 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1913 __blk_mq_end_request(req, BLK_STS_OK);
1914 spin_lock_irq(&msb->q_lock);
1915 msb->req = NULL;
1916 spin_unlock_irq(&msb->q_lock);
1917 }
1918
1919 if (error && msb->req) {
1920 blk_status_t ret = errno_to_blk_status(error);
1921
1922 dbg_verbose("IO: ending one sector of the request with error");
1923 blk_mq_end_request(req, ret);
1924 spin_lock_irq(&msb->q_lock);
1925 msb->req = NULL;
1926 spin_unlock_irq(&msb->q_lock);
1927 }
1928
1929 if (msb->req)
1930 dbg_verbose("IO: request still pending");
1931 }
1932}
1933
1934static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1935static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1936
1937static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1938{
1939 struct gendisk *disk = bdev->bd_disk;
1940 struct msb_data *msb = disk->private_data;
1941
1942 dbg_verbose("block device open");
1943
1944 mutex_lock(&msb_disk_lock);
1945
1946 if (msb && msb->card)
1947 msb->usage_count++;
1948
1949 mutex_unlock(&msb_disk_lock);
1950 return 0;
1951}
1952
1953static void msb_data_clear(struct msb_data *msb)
1954{
1955 kfree(msb->boot_page);
1956 kfree(msb->used_blocks_bitmap);
1957 kfree(msb->lba_to_pba_table);
1958 kfree(msb->cache);
1959 msb->card = NULL;
1960}
1961
1962static int msb_disk_release(struct gendisk *disk)
1963{
1964 struct msb_data *msb = disk->private_data;
1965
1966 dbg_verbose("block device release");
1967 mutex_lock(&msb_disk_lock);
1968
1969 if (msb) {
1970 if (msb->usage_count)
1971 msb->usage_count--;
1972
1973 if (!msb->usage_count) {
1974 disk->private_data = NULL;
1975 idr_remove(&msb_disk_idr, msb->disk_id);
1976 put_disk(disk);
1977 kfree(msb);
1978 }
1979 }
1980 mutex_unlock(&msb_disk_lock);
1981 return 0;
1982}
1983
1984static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1985{
1986 msb_disk_release(disk);
1987}
1988
1989static int msb_bd_getgeo(struct block_device *bdev,
1990 struct hd_geometry *geo)
1991{
1992 struct msb_data *msb = bdev->bd_disk->private_data;
1993 *geo = msb->geometry;
1994 return 0;
1995}
1996
1997static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1998 const struct blk_mq_queue_data *bd)
1999{
2000 struct memstick_dev *card = hctx->queue->queuedata;
2001 struct msb_data *msb = memstick_get_drvdata(card);
2002 struct request *req = bd->rq;
2003
2004 dbg_verbose("Submit request");
2005
2006 spin_lock_irq(&msb->q_lock);
2007
2008 if (msb->card_dead) {
2009 dbg("Refusing requests on removed card");
2010
2011 WARN_ON(!msb->io_queue_stopped);
2012
2013 spin_unlock_irq(&msb->q_lock);
2014 blk_mq_start_request(req);
2015 return BLK_STS_IOERR;
2016 }
2017
2018 if (msb->req) {
2019 spin_unlock_irq(&msb->q_lock);
2020 return BLK_STS_DEV_RESOURCE;
2021 }
2022
2023 blk_mq_start_request(req);
2024 msb->req = req;
2025
2026 if (!msb->io_queue_stopped)
2027 queue_work(msb->io_queue, &msb->io_work);
2028
2029 spin_unlock_irq(&msb->q_lock);
2030 return BLK_STS_OK;
2031}
2032
2033static int msb_check_card(struct memstick_dev *card)
2034{
2035 struct msb_data *msb = memstick_get_drvdata(card);
2036 return (msb->card_dead == 0);
2037}
2038
2039static void msb_stop(struct memstick_dev *card)
2040{
2041 struct msb_data *msb = memstick_get_drvdata(card);
2042 unsigned long flags;
2043
2044 dbg("Stopping all msblock IO");
2045
2046 blk_mq_stop_hw_queues(msb->queue);
2047 spin_lock_irqsave(&msb->q_lock, flags);
2048 msb->io_queue_stopped = true;
2049 spin_unlock_irqrestore(&msb->q_lock, flags);
2050
2051 del_timer_sync(&msb->cache_flush_timer);
2052 flush_workqueue(msb->io_queue);
2053
2054 spin_lock_irqsave(&msb->q_lock, flags);
2055 if (msb->req) {
2056 blk_mq_requeue_request(msb->req, false);
2057 msb->req = NULL;
2058 }
2059 spin_unlock_irqrestore(&msb->q_lock, flags);
2060}
2061
2062static void msb_start(struct memstick_dev *card)
2063{
2064 struct msb_data *msb = memstick_get_drvdata(card);
2065 unsigned long flags;
2066
2067 dbg("Resuming IO from msblock");
2068
2069 msb_invalidate_reg_window(msb);
2070
2071 spin_lock_irqsave(&msb->q_lock, flags);
2072 if (!msb->io_queue_stopped || msb->card_dead) {
2073 spin_unlock_irqrestore(&msb->q_lock, flags);
2074 return;
2075 }
2076 spin_unlock_irqrestore(&msb->q_lock, flags);
2077
2078 /* Kick cache flush anyway, its harmless */
2079 msb->need_flush_cache = true;
2080 msb->io_queue_stopped = false;
2081
2082 blk_mq_start_hw_queues(msb->queue);
2083
2084 queue_work(msb->io_queue, &msb->io_work);
2085
2086}
2087
2088static const struct block_device_operations msb_bdops = {
2089 .open = msb_bd_open,
2090 .release = msb_bd_release,
2091 .getgeo = msb_bd_getgeo,
2092 .owner = THIS_MODULE
2093};
2094
2095static const struct blk_mq_ops msb_mq_ops = {
2096 .queue_rq = msb_queue_rq,
2097};
2098
2099/* Registers the block device */
2100static int msb_init_disk(struct memstick_dev *card)
2101{
2102 struct msb_data *msb = memstick_get_drvdata(card);
2103 int rc;
2104 unsigned long capacity;
2105
2106 mutex_lock(&msb_disk_lock);
2107 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2108 mutex_unlock(&msb_disk_lock);
2109
2110 if (msb->disk_id < 0)
2111 return msb->disk_id;
2112
2113 msb->disk = alloc_disk(0);
2114 if (!msb->disk) {
2115 rc = -ENOMEM;
2116 goto out_release_id;
2117 }
2118
2119 msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
2120 BLK_MQ_F_SHOULD_MERGE);
2121 if (IS_ERR(msb->queue)) {
2122 rc = PTR_ERR(msb->queue);
2123 msb->queue = NULL;
2124 goto out_put_disk;
2125 }
2126
2127 msb->queue->queuedata = card;
2128
2129 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2130 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2131 blk_queue_max_segment_size(msb->queue,
2132 MS_BLOCK_MAX_PAGES * msb->page_size);
2133 blk_queue_logical_block_size(msb->queue, msb->page_size);
2134
2135 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2136 msb->disk->fops = &msb_bdops;
2137 msb->disk->private_data = msb;
2138 msb->disk->queue = msb->queue;
2139 msb->disk->flags |= GENHD_FL_EXT_DEVT;
2140
2141 capacity = msb->pages_in_block * msb->logical_block_count;
2142 capacity *= (msb->page_size / 512);
2143 set_capacity(msb->disk, capacity);
2144 dbg("Set total disk size to %lu sectors", capacity);
2145
2146 msb->usage_count = 1;
2147 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2148 INIT_WORK(&msb->io_work, msb_io_work);
2149 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2150
2151 if (msb->read_only)
2152 set_disk_ro(msb->disk, 1);
2153
2154 msb_start(card);
2155 device_add_disk(&card->dev, msb->disk, NULL);
2156 dbg("Disk added");
2157 return 0;
2158
2159out_put_disk:
2160 put_disk(msb->disk);
2161out_release_id:
2162 mutex_lock(&msb_disk_lock);
2163 idr_remove(&msb_disk_idr, msb->disk_id);
2164 mutex_unlock(&msb_disk_lock);
2165 return rc;
2166}
2167
2168static int msb_probe(struct memstick_dev *card)
2169{
2170 struct msb_data *msb;
2171 int rc = 0;
2172
2173 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2174 if (!msb)
2175 return -ENOMEM;
2176 memstick_set_drvdata(card, msb);
2177 msb->card = card;
2178 spin_lock_init(&msb->q_lock);
2179
2180 rc = msb_init_card(card);
2181 if (rc)
2182 goto out_free;
2183
2184 rc = msb_init_disk(card);
2185 if (!rc) {
2186 card->check = msb_check_card;
2187 card->stop = msb_stop;
2188 card->start = msb_start;
2189 return 0;
2190 }
2191out_free:
2192 memstick_set_drvdata(card, NULL);
2193 msb_data_clear(msb);
2194 kfree(msb);
2195 return rc;
2196}
2197
2198static void msb_remove(struct memstick_dev *card)
2199{
2200 struct msb_data *msb = memstick_get_drvdata(card);
2201 unsigned long flags;
2202
2203 if (!msb->io_queue_stopped)
2204 msb_stop(card);
2205
2206 dbg("Removing the disk device");
2207
2208 /* Take care of unhandled + new requests from now on */
2209 spin_lock_irqsave(&msb->q_lock, flags);
2210 msb->card_dead = true;
2211 spin_unlock_irqrestore(&msb->q_lock, flags);
2212 blk_mq_start_hw_queues(msb->queue);
2213
2214 /* Remove the disk */
2215 del_gendisk(msb->disk);
2216 blk_cleanup_queue(msb->queue);
2217 blk_mq_free_tag_set(&msb->tag_set);
2218 msb->queue = NULL;
2219
2220 mutex_lock(&msb_disk_lock);
2221 msb_data_clear(msb);
2222 mutex_unlock(&msb_disk_lock);
2223
2224 msb_disk_release(msb->disk);
2225 memstick_set_drvdata(card, NULL);
2226}
2227
2228#ifdef CONFIG_PM
2229
2230static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2231{
2232 msb_stop(card);
2233 return 0;
2234}
2235
2236static int msb_resume(struct memstick_dev *card)
2237{
2238 struct msb_data *msb = memstick_get_drvdata(card);
2239 struct msb_data *new_msb = NULL;
2240 bool card_dead = true;
2241
2242#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2243 msb->card_dead = true;
2244 return 0;
2245#endif
2246 mutex_lock(&card->host->lock);
2247
2248 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2249 if (!new_msb)
2250 goto out;
2251
2252 new_msb->card = card;
2253 memstick_set_drvdata(card, new_msb);
2254 spin_lock_init(&new_msb->q_lock);
2255 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2256
2257 if (msb_init_card(card))
2258 goto out;
2259
2260 if (msb->block_size != new_msb->block_size)
2261 goto out;
2262
2263 if (memcmp(msb->boot_page, new_msb->boot_page,
2264 sizeof(struct ms_boot_page)))
2265 goto out;
2266
2267 if (msb->logical_block_count != new_msb->logical_block_count ||
2268 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2269 msb->logical_block_count))
2270 goto out;
2271
2272 if (msb->block_count != new_msb->block_count ||
2273 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2274 msb->block_count / 8))
2275 goto out;
2276
2277 card_dead = false;
2278out:
2279 if (card_dead)
2280 dbg("Card was removed/replaced during suspend");
2281
2282 msb->card_dead = card_dead;
2283 memstick_set_drvdata(card, msb);
2284
2285 if (new_msb) {
2286 msb_data_clear(new_msb);
2287 kfree(new_msb);
2288 }
2289
2290 msb_start(card);
2291 mutex_unlock(&card->host->lock);
2292 return 0;
2293}
2294#else
2295
2296#define msb_suspend NULL
2297#define msb_resume NULL
2298
2299#endif /* CONFIG_PM */
2300
2301static struct memstick_device_id msb_id_tbl[] = {
2302 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2303 MEMSTICK_CLASS_FLASH},
2304
2305 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2306 MEMSTICK_CLASS_ROM},
2307
2308 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2309 MEMSTICK_CLASS_RO},
2310
2311 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2312 MEMSTICK_CLASS_WP},
2313
2314 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2315 MEMSTICK_CLASS_DUO},
2316 {}
2317};
2318MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2319
2320
2321static struct memstick_driver msb_driver = {
2322 .driver = {
2323 .name = DRIVER_NAME,
2324 .owner = THIS_MODULE
2325 },
2326 .id_table = msb_id_tbl,
2327 .probe = msb_probe,
2328 .remove = msb_remove,
2329 .suspend = msb_suspend,
2330 .resume = msb_resume
2331};
2332
2333static int __init msb_init(void)
2334{
2335 int rc = memstick_register_driver(&msb_driver);
2336 if (rc)
2337 pr_err("failed to register memstick driver (error %d)\n", rc);
2338
2339 return rc;
2340}
2341
2342static void __exit msb_exit(void)
2343{
2344 memstick_unregister_driver(&msb_driver);
2345 idr_destroy(&msb_disk_idr);
2346}
2347
2348module_init(msb_init);
2349module_exit(msb_exit);
2350
2351module_param(cache_flush_timeout, int, S_IRUGO);
2352MODULE_PARM_DESC(cache_flush_timeout,
2353 "Cache flush timeout in msec (1000 default)");
2354module_param(debug, int, S_IRUGO | S_IWUSR);
2355MODULE_PARM_DESC(debug, "Debug level (0-2)");
2356
2357module_param(verify_writes, bool, S_IRUGO);
2358MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2359
2360MODULE_LICENSE("GPL");
2361MODULE_AUTHOR("Maxim Levitsky");
2362MODULE_DESCRIPTION("Sony MemoryStick block device driver");