Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ms_block.c - Sony MemoryStick (legacy) storage support
4
5 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
6 *
7 * Minor portions of the driver were copied from mspro_block.c which is
8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
9 */
10#define DRIVER_NAME "ms_block"
11#define pr_fmt(fmt) DRIVER_NAME ": " fmt
12
13#include <linux/module.h>
14#include <linux/blk-mq.h>
15#include <linux/memstick.h>
16#include <linux/idr.h>
17#include <linux/hdreg.h>
18#include <linux/delay.h>
19#include <linux/slab.h>
20#include <linux/random.h>
21#include <linux/bitmap.h>
22#include <linux/scatterlist.h>
23#include <linux/jiffies.h>
24#include <linux/workqueue.h>
25#include <linux/mutex.h>
26#include "ms_block.h"
27
28static int debug;
29static int cache_flush_timeout = 1000;
30static bool verify_writes;
31
32/*
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
35 */
36static size_t msb_sg_copy(struct scatterlist *sg_from,
37 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
38{
39 size_t copied = 0;
40
41 while (offset > 0) {
42 if (offset >= sg_from->length) {
43 if (sg_is_last(sg_from))
44 return 0;
45
46 offset -= sg_from->length;
47 sg_from = sg_next(sg_from);
48 continue;
49 }
50
51 copied = min(len, sg_from->length - offset);
52 sg_set_page(sg_to, sg_page(sg_from),
53 copied, sg_from->offset + offset);
54
55 len -= copied;
56 offset = 0;
57
58 if (sg_is_last(sg_from) || !len)
59 goto out;
60
61 sg_to = sg_next(sg_to);
62 to_nents--;
63 sg_from = sg_next(sg_from);
64 }
65
66 while (len > sg_from->length && to_nents--) {
67 len -= sg_from->length;
68 copied += sg_from->length;
69
70 sg_set_page(sg_to, sg_page(sg_from),
71 sg_from->length, sg_from->offset);
72
73 if (sg_is_last(sg_from) || !len)
74 goto out;
75
76 sg_from = sg_next(sg_from);
77 sg_to = sg_next(sg_to);
78 }
79
80 if (len && to_nents) {
81 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
82 copied += len;
83 }
84out:
85 sg_mark_end(sg_to);
86 return copied;
87}
88
89/*
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
93 */
94static int msb_sg_compare_to_buffer(struct scatterlist *sg,
95 size_t offset, u8 *buffer, size_t len)
96{
97 int retval = 0, cmplen;
98 struct sg_mapping_iter miter;
99
100 sg_miter_start(&miter, sg, sg_nents(sg),
101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
102
103 while (sg_miter_next(&miter) && len > 0) {
104 if (offset >= miter.length) {
105 offset -= miter.length;
106 continue;
107 }
108
109 cmplen = min(miter.length - offset, len);
110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
111 if (retval)
112 break;
113
114 buffer += cmplen;
115 len -= cmplen;
116 offset = 0;
117 }
118
119 if (!retval && len)
120 retval = -1;
121
122 sg_miter_stop(&miter);
123 return retval;
124}
125
126
127/* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
132*/
133static int msb_get_zone_from_lba(int lba)
134{
135 if (lba < 494)
136 return 0;
137 return ((lba - 494) / 496) + 1;
138}
139
140/* Get zone of physical block. Trivial */
141static int msb_get_zone_from_pba(int pba)
142{
143 return pba / MS_BLOCKS_IN_ZONE;
144}
145
146/* Debug test to validate free block counts */
147static int msb_validate_used_block_bitmap(struct msb_data *msb)
148{
149 int total_free_blocks = 0;
150 int i;
151
152 if (!debug)
153 return 0;
154
155 for (i = 0; i < msb->zone_count; i++)
156 total_free_blocks += msb->free_block_count[i];
157
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
159 msb->block_count) == total_free_blocks)
160 return 0;
161
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb->read_only = true;
164 return -EINVAL;
165}
166
167/* Mark physical block as used */
168static void msb_mark_block_used(struct msb_data *msb, int pba)
169{
170 int zone = msb_get_zone_from_pba(pba);
171
172 if (test_bit(pba, msb->used_blocks_bitmap)) {
173 pr_err(
174 "BUG: attempt to mark already used pba %d as used", pba);
175 msb->read_only = true;
176 return;
177 }
178
179 if (msb_validate_used_block_bitmap(msb))
180 return;
181
182 /* No races because all IO is single threaded */
183 __set_bit(pba, msb->used_blocks_bitmap);
184 msb->free_block_count[zone]--;
185}
186
187/* Mark physical block as free */
188static void msb_mark_block_unused(struct msb_data *msb, int pba)
189{
190 int zone = msb_get_zone_from_pba(pba);
191
192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
194 msb->read_only = true;
195 return;
196 }
197
198 if (msb_validate_used_block_bitmap(msb))
199 return;
200
201 /* No races because all IO is single threaded */
202 __clear_bit(pba, msb->used_blocks_bitmap);
203 msb->free_block_count[zone]++;
204}
205
206/* Invalidate current register window */
207static void msb_invalidate_reg_window(struct msb_data *msb)
208{
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
213 msb->addr_valid = false;
214}
215
216/* Start a state machine */
217static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
218 (struct memstick_dev *card, struct memstick_request **req))
219{
220 struct memstick_dev *card = msb->card;
221
222 WARN_ON(msb->state != -1);
223 msb->int_polling = false;
224 msb->state = 0;
225 msb->exit_error = 0;
226
227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
228
229 card->next_request = state_func;
230 memstick_new_req(card->host);
231 wait_for_completion(&card->mrq_complete);
232
233 WARN_ON(msb->state != -1);
234 return msb->exit_error;
235}
236
237/* State machines call that to exit */
238static int msb_exit_state_machine(struct msb_data *msb, int error)
239{
240 WARN_ON(msb->state == -1);
241
242 msb->state = -1;
243 msb->exit_error = error;
244 msb->card->next_request = h_msb_default_bad;
245
246 /* Invalidate reg window on errors */
247 if (error)
248 msb_invalidate_reg_window(msb);
249
250 complete(&msb->card->mrq_complete);
251 return -ENXIO;
252}
253
254/* read INT register */
255static int msb_read_int_reg(struct msb_data *msb, long timeout)
256{
257 struct memstick_request *mrq = &msb->card->current_mrq;
258
259 WARN_ON(msb->state == -1);
260
261 if (!msb->int_polling) {
262 msb->int_timeout = jiffies +
263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
264 msb->int_polling = true;
265 } else if (time_after(jiffies, msb->int_timeout)) {
266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
267 return 0;
268 }
269
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
271 mrq->need_card_int && !mrq->error) {
272 mrq->data[0] = mrq->int_reg;
273 mrq->need_card_int = false;
274 return 0;
275 } else {
276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
277 return 1;
278 }
279}
280
281/* Read a register */
282static int msb_read_regs(struct msb_data *msb, int offset, int len)
283{
284 struct memstick_request *req = &msb->card->current_mrq;
285
286 if (msb->reg_addr.r_offset != offset ||
287 msb->reg_addr.r_length != len || !msb->addr_valid) {
288
289 msb->reg_addr.r_offset = offset;
290 msb->reg_addr.r_length = len;
291 msb->addr_valid = true;
292
293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
294 &msb->reg_addr, sizeof(msb->reg_addr));
295 return 0;
296 }
297
298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
299 return 1;
300}
301
302/* Write a card register */
303static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
304{
305 struct memstick_request *req = &msb->card->current_mrq;
306
307 if (msb->reg_addr.w_offset != offset ||
308 msb->reg_addr.w_length != len || !msb->addr_valid) {
309
310 msb->reg_addr.w_offset = offset;
311 msb->reg_addr.w_length = len;
312 msb->addr_valid = true;
313
314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
315 &msb->reg_addr, sizeof(msb->reg_addr));
316 return 0;
317 }
318
319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
320 return 1;
321}
322
323/* Handler for absence of IO */
324static int h_msb_default_bad(struct memstick_dev *card,
325 struct memstick_request **mrq)
326{
327 return -ENXIO;
328}
329
330/*
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
334 */
335static int h_msb_read_page(struct memstick_dev *card,
336 struct memstick_request **out_mrq)
337{
338 struct msb_data *msb = memstick_get_drvdata(card);
339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
340 struct scatterlist sg[2];
341 u8 command, intreg;
342
343 if (mrq->error) {
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb, mrq->error);
346 }
347again:
348 switch (msb->state) {
349 case MSB_RP_SEND_BLOCK_ADDRESS:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 the reg window, and thus it returns request for that.
352 Then we stay in this state and retry */
353 if (!msb_write_regs(msb,
354 offsetof(struct ms_register, param),
355 sizeof(struct ms_param_register),
356 (unsigned char *)&msb->regs.param))
357 return 0;
358
359 msb->state = MSB_RP_SEND_READ_COMMAND;
360 return 0;
361
362 case MSB_RP_SEND_READ_COMMAND:
363 command = MS_CMD_BLOCK_READ;
364 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
365 msb->state = MSB_RP_SEND_INT_REQ;
366 return 0;
367
368 case MSB_RP_SEND_INT_REQ:
369 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
370 /* If dont actually need to send the int read request (only in
371 serial mode), then just fall through */
372 if (msb_read_int_reg(msb, -1))
373 return 0;
374 /* fallthrough */
375
376 case MSB_RP_RECEIVE_INT_REQ_RESULT:
377 intreg = mrq->data[0];
378 msb->regs.status.interrupt = intreg;
379
380 if (intreg & MEMSTICK_INT_CMDNAK)
381 return msb_exit_state_machine(msb, -EIO);
382
383 if (!(intreg & MEMSTICK_INT_CED)) {
384 msb->state = MSB_RP_SEND_INT_REQ;
385 goto again;
386 }
387
388 msb->int_polling = false;
389 msb->state = (intreg & MEMSTICK_INT_ERR) ?
390 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
391 goto again;
392
393 case MSB_RP_SEND_READ_STATUS_REG:
394 /* read the status register to understand source of the INT_ERR */
395 if (!msb_read_regs(msb,
396 offsetof(struct ms_register, status),
397 sizeof(struct ms_status_register)))
398 return 0;
399
400 msb->state = MSB_RP_RECEIVE_STATUS_REG;
401 return 0;
402
403 case MSB_RP_RECEIVE_STATUS_REG:
404 msb->regs.status = *(struct ms_status_register *)mrq->data;
405 msb->state = MSB_RP_SEND_OOB_READ;
406 /* fallthrough */
407
408 case MSB_RP_SEND_OOB_READ:
409 if (!msb_read_regs(msb,
410 offsetof(struct ms_register, extra_data),
411 sizeof(struct ms_extra_data_register)))
412 return 0;
413
414 msb->state = MSB_RP_RECEIVE_OOB_READ;
415 return 0;
416
417 case MSB_RP_RECEIVE_OOB_READ:
418 msb->regs.extra_data =
419 *(struct ms_extra_data_register *) mrq->data;
420 msb->state = MSB_RP_SEND_READ_DATA;
421 /* fallthrough */
422
423 case MSB_RP_SEND_READ_DATA:
424 /* Skip that state if we only read the oob */
425 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
426 msb->state = MSB_RP_RECEIVE_READ_DATA;
427 goto again;
428 }
429
430 sg_init_table(sg, ARRAY_SIZE(sg));
431 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
432 msb->current_sg_offset,
433 msb->page_size);
434
435 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
436 msb->state = MSB_RP_RECEIVE_READ_DATA;
437 return 0;
438
439 case MSB_RP_RECEIVE_READ_DATA:
440 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
441 msb->current_sg_offset += msb->page_size;
442 return msb_exit_state_machine(msb, 0);
443 }
444
445 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
446 dbg("read_page: uncorrectable error");
447 return msb_exit_state_machine(msb, -EBADMSG);
448 }
449
450 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
451 dbg("read_page: correctable error");
452 msb->current_sg_offset += msb->page_size;
453 return msb_exit_state_machine(msb, -EUCLEAN);
454 } else {
455 dbg("read_page: INT error, but no status error bits");
456 return msb_exit_state_machine(msb, -EIO);
457 }
458 }
459
460 BUG();
461}
462
463/*
464 * Handler of writes of exactly one block.
465 * Takes address from msb->regs.param.
466 * Writes same extra data to blocks, also taken
467 * from msb->regs.extra
468 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
469 * device refuses to take the command or something else
470 */
471static int h_msb_write_block(struct memstick_dev *card,
472 struct memstick_request **out_mrq)
473{
474 struct msb_data *msb = memstick_get_drvdata(card);
475 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
476 struct scatterlist sg[2];
477 u8 intreg, command;
478
479 if (mrq->error)
480 return msb_exit_state_machine(msb, mrq->error);
481
482again:
483 switch (msb->state) {
484
485 /* HACK: Jmicon handling of TPCs between 8 and
486 * sizeof(memstick_request.data) is broken due to hardware
487 * bug in PIO mode that is used for these TPCs
488 * Therefore split the write
489 */
490
491 case MSB_WB_SEND_WRITE_PARAMS:
492 if (!msb_write_regs(msb,
493 offsetof(struct ms_register, param),
494 sizeof(struct ms_param_register),
495 &msb->regs.param))
496 return 0;
497
498 msb->state = MSB_WB_SEND_WRITE_OOB;
499 return 0;
500
501 case MSB_WB_SEND_WRITE_OOB:
502 if (!msb_write_regs(msb,
503 offsetof(struct ms_register, extra_data),
504 sizeof(struct ms_extra_data_register),
505 &msb->regs.extra_data))
506 return 0;
507 msb->state = MSB_WB_SEND_WRITE_COMMAND;
508 return 0;
509
510
511 case MSB_WB_SEND_WRITE_COMMAND:
512 command = MS_CMD_BLOCK_WRITE;
513 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
514 msb->state = MSB_WB_SEND_INT_REQ;
515 return 0;
516
517 case MSB_WB_SEND_INT_REQ:
518 msb->state = MSB_WB_RECEIVE_INT_REQ;
519 if (msb_read_int_reg(msb, -1))
520 return 0;
521 /* fallthrough */
522
523 case MSB_WB_RECEIVE_INT_REQ:
524 intreg = mrq->data[0];
525 msb->regs.status.interrupt = intreg;
526
527 /* errors mean out of here, and fast... */
528 if (intreg & (MEMSTICK_INT_CMDNAK))
529 return msb_exit_state_machine(msb, -EIO);
530
531 if (intreg & MEMSTICK_INT_ERR)
532 return msb_exit_state_machine(msb, -EBADMSG);
533
534
535 /* for last page we need to poll CED */
536 if (msb->current_page == msb->pages_in_block) {
537 if (intreg & MEMSTICK_INT_CED)
538 return msb_exit_state_machine(msb, 0);
539 msb->state = MSB_WB_SEND_INT_REQ;
540 goto again;
541
542 }
543
544 /* for non-last page we need BREQ before writing next chunk */
545 if (!(intreg & MEMSTICK_INT_BREQ)) {
546 msb->state = MSB_WB_SEND_INT_REQ;
547 goto again;
548 }
549
550 msb->int_polling = false;
551 msb->state = MSB_WB_SEND_WRITE_DATA;
552 /* fallthrough */
553
554 case MSB_WB_SEND_WRITE_DATA:
555 sg_init_table(sg, ARRAY_SIZE(sg));
556
557 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
558 msb->current_sg_offset,
559 msb->page_size) < msb->page_size)
560 return msb_exit_state_machine(msb, -EIO);
561
562 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
563 mrq->need_card_int = 1;
564 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
565 return 0;
566
567 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
568 msb->current_page++;
569 msb->current_sg_offset += msb->page_size;
570 msb->state = MSB_WB_SEND_INT_REQ;
571 goto again;
572 default:
573 BUG();
574 }
575
576 return 0;
577}
578
579/*
580 * This function is used to send simple IO requests to device that consist
581 * of register write + command
582 */
583static int h_msb_send_command(struct memstick_dev *card,
584 struct memstick_request **out_mrq)
585{
586 struct msb_data *msb = memstick_get_drvdata(card);
587 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
588 u8 intreg;
589
590 if (mrq->error) {
591 dbg("send_command: unknown error");
592 return msb_exit_state_machine(msb, mrq->error);
593 }
594again:
595 switch (msb->state) {
596
597 /* HACK: see h_msb_write_block */
598 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
599 if (!msb_write_regs(msb,
600 offsetof(struct ms_register, param),
601 sizeof(struct ms_param_register),
602 &msb->regs.param))
603 return 0;
604 msb->state = MSB_SC_SEND_WRITE_OOB;
605 return 0;
606
607 case MSB_SC_SEND_WRITE_OOB:
608 if (!msb->command_need_oob) {
609 msb->state = MSB_SC_SEND_COMMAND;
610 goto again;
611 }
612
613 if (!msb_write_regs(msb,
614 offsetof(struct ms_register, extra_data),
615 sizeof(struct ms_extra_data_register),
616 &msb->regs.extra_data))
617 return 0;
618
619 msb->state = MSB_SC_SEND_COMMAND;
620 return 0;
621
622 case MSB_SC_SEND_COMMAND:
623 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
624 msb->state = MSB_SC_SEND_INT_REQ;
625 return 0;
626
627 case MSB_SC_SEND_INT_REQ:
628 msb->state = MSB_SC_RECEIVE_INT_REQ;
629 if (msb_read_int_reg(msb, -1))
630 return 0;
631 /* fallthrough */
632
633 case MSB_SC_RECEIVE_INT_REQ:
634 intreg = mrq->data[0];
635
636 if (intreg & MEMSTICK_INT_CMDNAK)
637 return msb_exit_state_machine(msb, -EIO);
638 if (intreg & MEMSTICK_INT_ERR)
639 return msb_exit_state_machine(msb, -EBADMSG);
640
641 if (!(intreg & MEMSTICK_INT_CED)) {
642 msb->state = MSB_SC_SEND_INT_REQ;
643 goto again;
644 }
645
646 return msb_exit_state_machine(msb, 0);
647 }
648
649 BUG();
650}
651
652/* Small handler for card reset */
653static int h_msb_reset(struct memstick_dev *card,
654 struct memstick_request **out_mrq)
655{
656 u8 command = MS_CMD_RESET;
657 struct msb_data *msb = memstick_get_drvdata(card);
658 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
659
660 if (mrq->error)
661 return msb_exit_state_machine(msb, mrq->error);
662
663 switch (msb->state) {
664 case MSB_RS_SEND:
665 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
666 mrq->need_card_int = 0;
667 msb->state = MSB_RS_CONFIRM;
668 return 0;
669 case MSB_RS_CONFIRM:
670 return msb_exit_state_machine(msb, 0);
671 }
672 BUG();
673}
674
675/* This handler is used to do serial->parallel switch */
676static int h_msb_parallel_switch(struct memstick_dev *card,
677 struct memstick_request **out_mrq)
678{
679 struct msb_data *msb = memstick_get_drvdata(card);
680 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
681 struct memstick_host *host = card->host;
682
683 if (mrq->error) {
684 dbg("parallel_switch: error");
685 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
686 return msb_exit_state_machine(msb, mrq->error);
687 }
688
689 switch (msb->state) {
690 case MSB_PS_SEND_SWITCH_COMMAND:
691 /* Set the parallel interface on memstick side */
692 msb->regs.param.system |= MEMSTICK_SYS_PAM;
693
694 if (!msb_write_regs(msb,
695 offsetof(struct ms_register, param),
696 1,
697 (unsigned char *)&msb->regs.param))
698 return 0;
699
700 msb->state = MSB_PS_SWICH_HOST;
701 return 0;
702
703 case MSB_PS_SWICH_HOST:
704 /* Set parallel interface on our side + send a dummy request
705 to see if card responds */
706 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
707 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
708 msb->state = MSB_PS_CONFIRM;
709 return 0;
710
711 case MSB_PS_CONFIRM:
712 return msb_exit_state_machine(msb, 0);
713 }
714
715 BUG();
716}
717
718static int msb_switch_to_parallel(struct msb_data *msb);
719
720/* Reset the card, to guard against hw errors beeing treated as bad blocks */
721static int msb_reset(struct msb_data *msb, bool full)
722{
723
724 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
725 struct memstick_dev *card = msb->card;
726 struct memstick_host *host = card->host;
727 int error;
728
729 /* Reset the card */
730 msb->regs.param.system = MEMSTICK_SYS_BAMD;
731
732 if (full) {
733 error = host->set_param(host,
734 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
735 if (error)
736 goto out_error;
737
738 msb_invalidate_reg_window(msb);
739
740 error = host->set_param(host,
741 MEMSTICK_POWER, MEMSTICK_POWER_ON);
742 if (error)
743 goto out_error;
744
745 error = host->set_param(host,
746 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
747 if (error) {
748out_error:
749 dbg("Failed to reset the host controller");
750 msb->read_only = true;
751 return -EFAULT;
752 }
753 }
754
755 error = msb_run_state_machine(msb, h_msb_reset);
756 if (error) {
757 dbg("Failed to reset the card");
758 msb->read_only = true;
759 return -ENODEV;
760 }
761
762 /* Set parallel mode */
763 if (was_parallel)
764 msb_switch_to_parallel(msb);
765 return 0;
766}
767
768/* Attempts to switch interface to parallel mode */
769static int msb_switch_to_parallel(struct msb_data *msb)
770{
771 int error;
772
773 error = msb_run_state_machine(msb, h_msb_parallel_switch);
774 if (error) {
775 pr_err("Switch to parallel failed");
776 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
777 msb_reset(msb, true);
778 return -EFAULT;
779 }
780
781 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
782 return 0;
783}
784
785/* Changes overwrite flag on a page */
786static int msb_set_overwrite_flag(struct msb_data *msb,
787 u16 pba, u8 page, u8 flag)
788{
789 if (msb->read_only)
790 return -EROFS;
791
792 msb->regs.param.block_address = cpu_to_be16(pba);
793 msb->regs.param.page_address = page;
794 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
795 msb->regs.extra_data.overwrite_flag = flag;
796 msb->command_value = MS_CMD_BLOCK_WRITE;
797 msb->command_need_oob = true;
798
799 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
800 flag, pba, page);
801 return msb_run_state_machine(msb, h_msb_send_command);
802}
803
804static int msb_mark_bad(struct msb_data *msb, int pba)
805{
806 pr_notice("marking pba %d as bad", pba);
807 msb_reset(msb, true);
808 return msb_set_overwrite_flag(
809 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
810}
811
812static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
813{
814 dbg("marking page %d of pba %d as bad", page, pba);
815 msb_reset(msb, true);
816 return msb_set_overwrite_flag(msb,
817 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
818}
819
820/* Erases one physical block */
821static int msb_erase_block(struct msb_data *msb, u16 pba)
822{
823 int error, try;
824 if (msb->read_only)
825 return -EROFS;
826
827 dbg_verbose("erasing pba %d", pba);
828
829 for (try = 1; try < 3; try++) {
830 msb->regs.param.block_address = cpu_to_be16(pba);
831 msb->regs.param.page_address = 0;
832 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
833 msb->command_value = MS_CMD_BLOCK_ERASE;
834 msb->command_need_oob = false;
835
836
837 error = msb_run_state_machine(msb, h_msb_send_command);
838 if (!error || msb_reset(msb, true))
839 break;
840 }
841
842 if (error) {
843 pr_err("erase failed, marking pba %d as bad", pba);
844 msb_mark_bad(msb, pba);
845 }
846
847 dbg_verbose("erase success, marking pba %d as unused", pba);
848 msb_mark_block_unused(msb, pba);
849 __set_bit(pba, msb->erased_blocks_bitmap);
850 return error;
851}
852
853/* Reads one page from device */
854static int msb_read_page(struct msb_data *msb,
855 u16 pba, u8 page, struct ms_extra_data_register *extra,
856 struct scatterlist *sg, int offset)
857{
858 int try, error;
859
860 if (pba == MS_BLOCK_INVALID) {
861 unsigned long flags;
862 struct sg_mapping_iter miter;
863 size_t len = msb->page_size;
864
865 dbg_verbose("read unmapped sector. returning 0xFF");
866
867 local_irq_save(flags);
868 sg_miter_start(&miter, sg, sg_nents(sg),
869 SG_MITER_ATOMIC | SG_MITER_TO_SG);
870
871 while (sg_miter_next(&miter) && len > 0) {
872
873 int chunklen;
874
875 if (offset && offset >= miter.length) {
876 offset -= miter.length;
877 continue;
878 }
879
880 chunklen = min(miter.length - offset, len);
881 memset(miter.addr + offset, 0xFF, chunklen);
882 len -= chunklen;
883 offset = 0;
884 }
885
886 sg_miter_stop(&miter);
887 local_irq_restore(flags);
888
889 if (offset)
890 return -EFAULT;
891
892 if (extra)
893 memset(extra, 0xFF, sizeof(*extra));
894 return 0;
895 }
896
897 if (pba >= msb->block_count) {
898 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
899 return -EINVAL;
900 }
901
902 for (try = 1; try < 3; try++) {
903 msb->regs.param.block_address = cpu_to_be16(pba);
904 msb->regs.param.page_address = page;
905 msb->regs.param.cp = MEMSTICK_CP_PAGE;
906
907 msb->current_sg = sg;
908 msb->current_sg_offset = offset;
909 error = msb_run_state_machine(msb, h_msb_read_page);
910
911
912 if (error == -EUCLEAN) {
913 pr_notice("correctable error on pba %d, page %d",
914 pba, page);
915 error = 0;
916 }
917
918 if (!error && extra)
919 *extra = msb->regs.extra_data;
920
921 if (!error || msb_reset(msb, true))
922 break;
923
924 }
925
926 /* Mark bad pages */
927 if (error == -EBADMSG) {
928 pr_err("uncorrectable error on read of pba %d, page %d",
929 pba, page);
930
931 if (msb->regs.extra_data.overwrite_flag &
932 MEMSTICK_OVERWRITE_PGST0)
933 msb_mark_page_bad(msb, pba, page);
934 return -EBADMSG;
935 }
936
937 if (error)
938 pr_err("read of pba %d, page %d failed with error %d",
939 pba, page, error);
940 return error;
941}
942
943/* Reads oob of page only */
944static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
945 struct ms_extra_data_register *extra)
946{
947 int error;
948
949 BUG_ON(!extra);
950 msb->regs.param.block_address = cpu_to_be16(pba);
951 msb->regs.param.page_address = page;
952 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
953
954 if (pba > msb->block_count) {
955 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
956 return -EINVAL;
957 }
958
959 error = msb_run_state_machine(msb, h_msb_read_page);
960 *extra = msb->regs.extra_data;
961
962 if (error == -EUCLEAN) {
963 pr_notice("correctable error on pba %d, page %d",
964 pba, page);
965 return 0;
966 }
967
968 return error;
969}
970
971/* Reads a block and compares it with data contained in scatterlist orig_sg */
972static int msb_verify_block(struct msb_data *msb, u16 pba,
973 struct scatterlist *orig_sg, int offset)
974{
975 struct scatterlist sg;
976 int page = 0, error;
977
978 sg_init_one(&sg, msb->block_buffer, msb->block_size);
979
980 while (page < msb->pages_in_block) {
981
982 error = msb_read_page(msb, pba, page,
983 NULL, &sg, page * msb->page_size);
984 if (error)
985 return error;
986 page++;
987 }
988
989 if (msb_sg_compare_to_buffer(orig_sg, offset,
990 msb->block_buffer, msb->block_size))
991 return -EIO;
992 return 0;
993}
994
995/* Writes exectly one block + oob */
996static int msb_write_block(struct msb_data *msb,
997 u16 pba, u32 lba, struct scatterlist *sg, int offset)
998{
999 int error, current_try = 1;
1000 BUG_ON(sg->length < msb->page_size);
1001
1002 if (msb->read_only)
1003 return -EROFS;
1004
1005 if (pba == MS_BLOCK_INVALID) {
1006 pr_err(
1007 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1008 return -EINVAL;
1009 }
1010
1011 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1012 pr_err(
1013 "BUG: write: attempt to write beyond the end of device");
1014 return -EINVAL;
1015 }
1016
1017 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1018 pr_err("BUG: write: lba zone mismatch");
1019 return -EINVAL;
1020 }
1021
1022 if (pba == msb->boot_block_locations[0] ||
1023 pba == msb->boot_block_locations[1]) {
1024 pr_err("BUG: write: attempt to write to boot blocks!");
1025 return -EINVAL;
1026 }
1027
1028 while (1) {
1029
1030 if (msb->read_only)
1031 return -EROFS;
1032
1033 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1034 msb->regs.param.page_address = 0;
1035 msb->regs.param.block_address = cpu_to_be16(pba);
1036
1037 msb->regs.extra_data.management_flag = 0xFF;
1038 msb->regs.extra_data.overwrite_flag = 0xF8;
1039 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1040
1041 msb->current_sg = sg;
1042 msb->current_sg_offset = offset;
1043 msb->current_page = 0;
1044
1045 error = msb_run_state_machine(msb, h_msb_write_block);
1046
1047 /* Sector we just wrote to is assumed erased since its pba
1048 was erased. If it wasn't erased, write will succeed
1049 and will just clear the bits that were set in the block
1050 thus test that what we have written,
1051 matches what we expect.
1052 We do trust the blocks that we erased */
1053 if (!error && (verify_writes ||
1054 !test_bit(pba, msb->erased_blocks_bitmap)))
1055 error = msb_verify_block(msb, pba, sg, offset);
1056
1057 if (!error)
1058 break;
1059
1060 if (current_try > 1 || msb_reset(msb, true))
1061 break;
1062
1063 pr_err("write failed, trying to erase the pba %d", pba);
1064 error = msb_erase_block(msb, pba);
1065 if (error)
1066 break;
1067
1068 current_try++;
1069 }
1070 return error;
1071}
1072
1073/* Finds a free block for write replacement */
1074static u16 msb_get_free_block(struct msb_data *msb, int zone)
1075{
1076 u16 pos;
1077 int pba = zone * MS_BLOCKS_IN_ZONE;
1078 int i;
1079
1080 get_random_bytes(&pos, sizeof(pos));
1081
1082 if (!msb->free_block_count[zone]) {
1083 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1084 msb->read_only = true;
1085 return MS_BLOCK_INVALID;
1086 }
1087
1088 pos %= msb->free_block_count[zone];
1089
1090 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1091 msb->free_block_count[zone], pos);
1092
1093 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1094 msb->block_count, pba);
1095 for (i = 0; i < pos; ++i)
1096 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1097 msb->block_count, pba + 1);
1098
1099 dbg_verbose("result of the free blocks scan: pba %d", pba);
1100
1101 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1102 pr_err("BUG: cant get a free block");
1103 msb->read_only = true;
1104 return MS_BLOCK_INVALID;
1105 }
1106
1107 msb_mark_block_used(msb, pba);
1108 return pba;
1109}
1110
1111static int msb_update_block(struct msb_data *msb, u16 lba,
1112 struct scatterlist *sg, int offset)
1113{
1114 u16 pba, new_pba;
1115 int error, try;
1116
1117 pba = msb->lba_to_pba_table[lba];
1118 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1119
1120 if (pba != MS_BLOCK_INVALID) {
1121 dbg_verbose("setting the update flag on the block");
1122 msb_set_overwrite_flag(msb, pba, 0,
1123 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1124 }
1125
1126 for (try = 0; try < 3; try++) {
1127 new_pba = msb_get_free_block(msb,
1128 msb_get_zone_from_lba(lba));
1129
1130 if (new_pba == MS_BLOCK_INVALID) {
1131 error = -EIO;
1132 goto out;
1133 }
1134
1135 dbg_verbose("block update: writing updated block to the pba %d",
1136 new_pba);
1137 error = msb_write_block(msb, new_pba, lba, sg, offset);
1138 if (error == -EBADMSG) {
1139 msb_mark_bad(msb, new_pba);
1140 continue;
1141 }
1142
1143 if (error)
1144 goto out;
1145
1146 dbg_verbose("block update: erasing the old block");
1147 msb_erase_block(msb, pba);
1148 msb->lba_to_pba_table[lba] = new_pba;
1149 return 0;
1150 }
1151out:
1152 if (error) {
1153 pr_err("block update error after %d tries, switching to r/o mode", try);
1154 msb->read_only = true;
1155 }
1156 return error;
1157}
1158
1159/* Converts endiannes in the boot block for easy use */
1160static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1161{
1162 p->header.block_id = be16_to_cpu(p->header.block_id);
1163 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1164 p->entry.disabled_block.start_addr
1165 = be32_to_cpu(p->entry.disabled_block.start_addr);
1166 p->entry.disabled_block.data_size
1167 = be32_to_cpu(p->entry.disabled_block.data_size);
1168 p->entry.cis_idi.start_addr
1169 = be32_to_cpu(p->entry.cis_idi.start_addr);
1170 p->entry.cis_idi.data_size
1171 = be32_to_cpu(p->entry.cis_idi.data_size);
1172 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1173 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1174 p->attr.number_of_effective_blocks
1175 = be16_to_cpu(p->attr.number_of_effective_blocks);
1176 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1177 p->attr.memory_manufacturer_code
1178 = be16_to_cpu(p->attr.memory_manufacturer_code);
1179 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1180 p->attr.implemented_capacity
1181 = be16_to_cpu(p->attr.implemented_capacity);
1182 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1183 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1184}
1185
1186static int msb_read_boot_blocks(struct msb_data *msb)
1187{
1188 int pba = 0;
1189 struct scatterlist sg;
1190 struct ms_extra_data_register extra;
1191 struct ms_boot_page *page;
1192
1193 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1194 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1195 msb->boot_block_count = 0;
1196
1197 dbg_verbose("Start of a scan for the boot blocks");
1198
1199 if (!msb->boot_page) {
1200 page = kmalloc_array(2, sizeof(struct ms_boot_page),
1201 GFP_KERNEL);
1202 if (!page)
1203 return -ENOMEM;
1204
1205 msb->boot_page = page;
1206 } else
1207 page = msb->boot_page;
1208
1209 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1210
1211 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1212
1213 sg_init_one(&sg, page, sizeof(*page));
1214 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1215 dbg("boot scan: can't read pba %d", pba);
1216 continue;
1217 }
1218
1219 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1220 dbg("management flag doesn't indicate boot block %d",
1221 pba);
1222 continue;
1223 }
1224
1225 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1226 dbg("the pba at %d doesn' contain boot block ID", pba);
1227 continue;
1228 }
1229
1230 msb_fix_boot_page_endianness(page);
1231 msb->boot_block_locations[msb->boot_block_count] = pba;
1232
1233 page++;
1234 msb->boot_block_count++;
1235
1236 if (msb->boot_block_count == 2)
1237 break;
1238 }
1239
1240 if (!msb->boot_block_count) {
1241 pr_err("media doesn't contain master page, aborting");
1242 return -EIO;
1243 }
1244
1245 dbg_verbose("End of scan for boot blocks");
1246 return 0;
1247}
1248
1249static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1250{
1251 struct ms_boot_page *boot_block;
1252 struct scatterlist sg;
1253 u16 *buffer = NULL;
1254 int offset = 0;
1255 int i, error = 0;
1256 int data_size, data_offset, page, page_offset, size_to_read;
1257 u16 pba;
1258
1259 BUG_ON(block_nr > 1);
1260 boot_block = &msb->boot_page[block_nr];
1261 pba = msb->boot_block_locations[block_nr];
1262
1263 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1264 return -EINVAL;
1265
1266 data_size = boot_block->entry.disabled_block.data_size;
1267 data_offset = sizeof(struct ms_boot_page) +
1268 boot_block->entry.disabled_block.start_addr;
1269 if (!data_size)
1270 return 0;
1271
1272 page = data_offset / msb->page_size;
1273 page_offset = data_offset % msb->page_size;
1274 size_to_read =
1275 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1276 msb->page_size;
1277
1278 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1279 pba, data_offset, data_size);
1280
1281 buffer = kzalloc(size_to_read, GFP_KERNEL);
1282 if (!buffer)
1283 return -ENOMEM;
1284
1285 /* Read the buffer */
1286 sg_init_one(&sg, buffer, size_to_read);
1287
1288 while (offset < size_to_read) {
1289 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1290 if (error)
1291 goto out;
1292
1293 page++;
1294 offset += msb->page_size;
1295
1296 if (page == msb->pages_in_block) {
1297 pr_err(
1298 "bad block table extends beyond the boot block");
1299 break;
1300 }
1301 }
1302
1303 /* Process the bad block table */
1304 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1305
1306 u16 bad_block = be16_to_cpu(buffer[i]);
1307
1308 if (bad_block >= msb->block_count) {
1309 dbg("bad block table contains invalid block %d",
1310 bad_block);
1311 continue;
1312 }
1313
1314 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1315 dbg("duplicate bad block %d in the table",
1316 bad_block);
1317 continue;
1318 }
1319
1320 dbg("block %d is marked as factory bad", bad_block);
1321 msb_mark_block_used(msb, bad_block);
1322 }
1323out:
1324 kfree(buffer);
1325 return error;
1326}
1327
1328static int msb_ftl_initialize(struct msb_data *msb)
1329{
1330 int i;
1331
1332 if (msb->ftl_initialized)
1333 return 0;
1334
1335 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1336 msb->logical_block_count = msb->zone_count * 496 - 2;
1337
1338 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1339 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1340 msb->lba_to_pba_table =
1341 kmalloc_array(msb->logical_block_count, sizeof(u16),
1342 GFP_KERNEL);
1343
1344 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1345 !msb->erased_blocks_bitmap) {
1346 kfree(msb->used_blocks_bitmap);
1347 kfree(msb->lba_to_pba_table);
1348 kfree(msb->erased_blocks_bitmap);
1349 return -ENOMEM;
1350 }
1351
1352 for (i = 0; i < msb->zone_count; i++)
1353 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1354
1355 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1356 msb->logical_block_count * sizeof(u16));
1357
1358 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1359 msb->zone_count, msb->logical_block_count);
1360
1361 msb->ftl_initialized = true;
1362 return 0;
1363}
1364
1365static int msb_ftl_scan(struct msb_data *msb)
1366{
1367 u16 pba, lba, other_block;
1368 u8 overwrite_flag, management_flag, other_overwrite_flag;
1369 int error;
1370 struct ms_extra_data_register extra;
1371 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1372
1373 if (!overwrite_flags)
1374 return -ENOMEM;
1375
1376 dbg("Start of media scanning");
1377 for (pba = 0; pba < msb->block_count; pba++) {
1378
1379 if (pba == msb->boot_block_locations[0] ||
1380 pba == msb->boot_block_locations[1]) {
1381 dbg_verbose("pba %05d -> [boot block]", pba);
1382 msb_mark_block_used(msb, pba);
1383 continue;
1384 }
1385
1386 if (test_bit(pba, msb->used_blocks_bitmap)) {
1387 dbg_verbose("pba %05d -> [factory bad]", pba);
1388 continue;
1389 }
1390
1391 memset(&extra, 0, sizeof(extra));
1392 error = msb_read_oob(msb, pba, 0, &extra);
1393
1394 /* can't trust the page if we can't read the oob */
1395 if (error == -EBADMSG) {
1396 pr_notice(
1397 "oob of pba %d damaged, will try to erase it", pba);
1398 msb_mark_block_used(msb, pba);
1399 msb_erase_block(msb, pba);
1400 continue;
1401 } else if (error) {
1402 pr_err("unknown error %d on read of oob of pba %d - aborting",
1403 error, pba);
1404
1405 kfree(overwrite_flags);
1406 return error;
1407 }
1408
1409 lba = be16_to_cpu(extra.logical_address);
1410 management_flag = extra.management_flag;
1411 overwrite_flag = extra.overwrite_flag;
1412 overwrite_flags[pba] = overwrite_flag;
1413
1414 /* Skip bad blocks */
1415 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1416 dbg("pba %05d -> [BAD]", pba);
1417 msb_mark_block_used(msb, pba);
1418 continue;
1419 }
1420
1421 /* Skip system/drm blocks */
1422 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1423 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1424 dbg("pba %05d -> [reserved management flag %02x]",
1425 pba, management_flag);
1426 msb_mark_block_used(msb, pba);
1427 continue;
1428 }
1429
1430 /* Erase temporary tables */
1431 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1432 dbg("pba %05d -> [temp table] - will erase", pba);
1433
1434 msb_mark_block_used(msb, pba);
1435 msb_erase_block(msb, pba);
1436 continue;
1437 }
1438
1439 if (lba == MS_BLOCK_INVALID) {
1440 dbg_verbose("pba %05d -> [free]", pba);
1441 continue;
1442 }
1443
1444 msb_mark_block_used(msb, pba);
1445
1446 /* Block has LBA not according to zoning*/
1447 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1448 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1449 pba, lba);
1450 msb_erase_block(msb, pba);
1451 continue;
1452 }
1453
1454 /* No collisions - great */
1455 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1456 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1457 msb->lba_to_pba_table[lba] = pba;
1458 continue;
1459 }
1460
1461 other_block = msb->lba_to_pba_table[lba];
1462 other_overwrite_flag = overwrite_flags[other_block];
1463
1464 pr_notice("Collision between pba %d and pba %d",
1465 pba, other_block);
1466
1467 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1468 pr_notice("pba %d is marked as stable, use it", pba);
1469 msb_erase_block(msb, other_block);
1470 msb->lba_to_pba_table[lba] = pba;
1471 continue;
1472 }
1473
1474 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1475 pr_notice("pba %d is marked as stable, use it",
1476 other_block);
1477 msb_erase_block(msb, pba);
1478 continue;
1479 }
1480
1481 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1482 pba, other_block, other_block);
1483
1484 msb_erase_block(msb, other_block);
1485 msb->lba_to_pba_table[lba] = pba;
1486 }
1487
1488 dbg("End of media scanning");
1489 kfree(overwrite_flags);
1490 return 0;
1491}
1492
1493static void msb_cache_flush_timer(struct timer_list *t)
1494{
1495 struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1496 msb->need_flush_cache = true;
1497 queue_work(msb->io_queue, &msb->io_work);
1498}
1499
1500
1501static void msb_cache_discard(struct msb_data *msb)
1502{
1503 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1504 return;
1505
1506 del_timer_sync(&msb->cache_flush_timer);
1507
1508 dbg_verbose("Discarding the write cache");
1509 msb->cache_block_lba = MS_BLOCK_INVALID;
1510 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1511}
1512
1513static int msb_cache_init(struct msb_data *msb)
1514{
1515 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1516
1517 if (!msb->cache)
1518 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1519 if (!msb->cache)
1520 return -ENOMEM;
1521
1522 msb_cache_discard(msb);
1523 return 0;
1524}
1525
1526static int msb_cache_flush(struct msb_data *msb)
1527{
1528 struct scatterlist sg;
1529 struct ms_extra_data_register extra;
1530 int page, offset, error;
1531 u16 pba, lba;
1532
1533 if (msb->read_only)
1534 return -EROFS;
1535
1536 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1537 return 0;
1538
1539 lba = msb->cache_block_lba;
1540 pba = msb->lba_to_pba_table[lba];
1541
1542 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1543 pba, msb->cache_block_lba);
1544
1545 sg_init_one(&sg, msb->cache , msb->block_size);
1546
1547 /* Read all missing pages in cache */
1548 for (page = 0; page < msb->pages_in_block; page++) {
1549
1550 if (test_bit(page, &msb->valid_cache_bitmap))
1551 continue;
1552
1553 offset = page * msb->page_size;
1554
1555 dbg_verbose("reading non-present sector %d of cache block %d",
1556 page, lba);
1557 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1558
1559 /* Bad pages are copied with 00 page status */
1560 if (error == -EBADMSG) {
1561 pr_err("read error on sector %d, contents probably damaged", page);
1562 continue;
1563 }
1564
1565 if (error)
1566 return error;
1567
1568 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1569 MEMSTICK_OV_PG_NORMAL) {
1570 dbg("page %d is marked as bad", page);
1571 continue;
1572 }
1573
1574 set_bit(page, &msb->valid_cache_bitmap);
1575 }
1576
1577 /* Write the cache now */
1578 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1579 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1580
1581 /* Mark invalid pages */
1582 if (!error) {
1583 for (page = 0; page < msb->pages_in_block; page++) {
1584
1585 if (test_bit(page, &msb->valid_cache_bitmap))
1586 continue;
1587
1588 dbg("marking page %d as containing damaged data",
1589 page);
1590 msb_set_overwrite_flag(msb,
1591 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1592 }
1593 }
1594
1595 msb_cache_discard(msb);
1596 return error;
1597}
1598
1599static int msb_cache_write(struct msb_data *msb, int lba,
1600 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1601{
1602 int error;
1603 struct scatterlist sg_tmp[10];
1604
1605 if (msb->read_only)
1606 return -EROFS;
1607
1608 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1609 lba != msb->cache_block_lba)
1610 if (add_to_cache_only)
1611 return 0;
1612
1613 /* If we need to write different block */
1614 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1615 lba != msb->cache_block_lba) {
1616 dbg_verbose("first flush the cache");
1617 error = msb_cache_flush(msb);
1618 if (error)
1619 return error;
1620 }
1621
1622 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1623 msb->cache_block_lba = lba;
1624 mod_timer(&msb->cache_flush_timer,
1625 jiffies + msecs_to_jiffies(cache_flush_timeout));
1626 }
1627
1628 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1629
1630 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1631 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1632
1633 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1634 msb->cache + page * msb->page_size, msb->page_size);
1635
1636 set_bit(page, &msb->valid_cache_bitmap);
1637 return 0;
1638}
1639
1640static int msb_cache_read(struct msb_data *msb, int lba,
1641 int page, struct scatterlist *sg, int offset)
1642{
1643 int pba = msb->lba_to_pba_table[lba];
1644 struct scatterlist sg_tmp[10];
1645 int error = 0;
1646
1647 if (lba == msb->cache_block_lba &&
1648 test_bit(page, &msb->valid_cache_bitmap)) {
1649
1650 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1651 lba, pba, page);
1652
1653 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1654 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1655 offset, msb->page_size);
1656 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1657 msb->cache + msb->page_size * page,
1658 msb->page_size);
1659 } else {
1660 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1661 lba, pba, page);
1662
1663 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1664 if (error)
1665 return error;
1666
1667 msb_cache_write(msb, lba, page, true, sg, offset);
1668 }
1669 return error;
1670}
1671
1672/* Emulated geometry table
1673 * This table content isn't that importaint,
1674 * One could put here different values, providing that they still
1675 * cover whole disk.
1676 * 64 MB entry is what windows reports for my 64M memstick */
1677
1678static const struct chs_entry chs_table[] = {
1679/* size sectors cylynders heads */
1680 { 4, 16, 247, 2 },
1681 { 8, 16, 495, 2 },
1682 { 16, 16, 495, 4 },
1683 { 32, 16, 991, 4 },
1684 { 64, 16, 991, 8 },
1685 {128, 16, 991, 16 },
1686 { 0 }
1687};
1688
1689/* Load information about the card */
1690static int msb_init_card(struct memstick_dev *card)
1691{
1692 struct msb_data *msb = memstick_get_drvdata(card);
1693 struct memstick_host *host = card->host;
1694 struct ms_boot_page *boot_block;
1695 int error = 0, i, raw_size_in_megs;
1696
1697 msb->caps = 0;
1698
1699 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1700 card->id.class <= MEMSTICK_CLASS_ROM)
1701 msb->read_only = true;
1702
1703 msb->state = -1;
1704 error = msb_reset(msb, false);
1705 if (error)
1706 return error;
1707
1708 /* Due to a bug in Jmicron driver written by Alex Dubov,
1709 its serial mode barely works,
1710 so we switch to parallel mode right away */
1711 if (host->caps & MEMSTICK_CAP_PAR4)
1712 msb_switch_to_parallel(msb);
1713
1714 msb->page_size = sizeof(struct ms_boot_page);
1715
1716 /* Read the boot page */
1717 error = msb_read_boot_blocks(msb);
1718 if (error)
1719 return -EIO;
1720
1721 boot_block = &msb->boot_page[0];
1722
1723 /* Save intersting attributes from boot page */
1724 msb->block_count = boot_block->attr.number_of_blocks;
1725 msb->page_size = boot_block->attr.page_size;
1726
1727 msb->pages_in_block = boot_block->attr.block_size * 2;
1728 msb->block_size = msb->page_size * msb->pages_in_block;
1729
1730 if (msb->page_size > PAGE_SIZE) {
1731 /* this isn't supported by linux at all, anyway*/
1732 dbg("device page %d size isn't supported", msb->page_size);
1733 return -EINVAL;
1734 }
1735
1736 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1737 if (!msb->block_buffer)
1738 return -ENOMEM;
1739
1740 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1741
1742 for (i = 0; chs_table[i].size; i++) {
1743
1744 if (chs_table[i].size != raw_size_in_megs)
1745 continue;
1746
1747 msb->geometry.cylinders = chs_table[i].cyl;
1748 msb->geometry.heads = chs_table[i].head;
1749 msb->geometry.sectors = chs_table[i].sec;
1750 break;
1751 }
1752
1753 if (boot_block->attr.transfer_supporting == 1)
1754 msb->caps |= MEMSTICK_CAP_PAR4;
1755
1756 if (boot_block->attr.device_type & 0x03)
1757 msb->read_only = true;
1758
1759 dbg("Total block count = %d", msb->block_count);
1760 dbg("Each block consists of %d pages", msb->pages_in_block);
1761 dbg("Page size = %d bytes", msb->page_size);
1762 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1763 dbg("Read only: %d", msb->read_only);
1764
1765#if 0
1766 /* Now we can switch the interface */
1767 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1768 msb_switch_to_parallel(msb);
1769#endif
1770
1771 error = msb_cache_init(msb);
1772 if (error)
1773 return error;
1774
1775 error = msb_ftl_initialize(msb);
1776 if (error)
1777 return error;
1778
1779
1780 /* Read the bad block table */
1781 error = msb_read_bad_block_table(msb, 0);
1782
1783 if (error && error != -ENOMEM) {
1784 dbg("failed to read bad block table from primary boot block, trying from backup");
1785 error = msb_read_bad_block_table(msb, 1);
1786 }
1787
1788 if (error)
1789 return error;
1790
1791 /* *drum roll* Scan the media */
1792 error = msb_ftl_scan(msb);
1793 if (error) {
1794 pr_err("Scan of media failed");
1795 return error;
1796 }
1797
1798 return 0;
1799
1800}
1801
1802static int msb_do_write_request(struct msb_data *msb, int lba,
1803 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1804{
1805 int error = 0;
1806 off_t offset = 0;
1807 *sucessfuly_written = 0;
1808
1809 while (offset < len) {
1810 if (page == 0 && len - offset >= msb->block_size) {
1811
1812 if (msb->cache_block_lba == lba)
1813 msb_cache_discard(msb);
1814
1815 dbg_verbose("Writing whole lba %d", lba);
1816 error = msb_update_block(msb, lba, sg, offset);
1817 if (error)
1818 return error;
1819
1820 offset += msb->block_size;
1821 *sucessfuly_written += msb->block_size;
1822 lba++;
1823 continue;
1824 }
1825
1826 error = msb_cache_write(msb, lba, page, false, sg, offset);
1827 if (error)
1828 return error;
1829
1830 offset += msb->page_size;
1831 *sucessfuly_written += msb->page_size;
1832
1833 page++;
1834 if (page == msb->pages_in_block) {
1835 page = 0;
1836 lba++;
1837 }
1838 }
1839 return 0;
1840}
1841
1842static int msb_do_read_request(struct msb_data *msb, int lba,
1843 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1844{
1845 int error = 0;
1846 int offset = 0;
1847 *sucessfuly_read = 0;
1848
1849 while (offset < len) {
1850
1851 error = msb_cache_read(msb, lba, page, sg, offset);
1852 if (error)
1853 return error;
1854
1855 offset += msb->page_size;
1856 *sucessfuly_read += msb->page_size;
1857
1858 page++;
1859 if (page == msb->pages_in_block) {
1860 page = 0;
1861 lba++;
1862 }
1863 }
1864 return 0;
1865}
1866
1867static void msb_io_work(struct work_struct *work)
1868{
1869 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1870 int page, error, len;
1871 sector_t lba;
1872 struct scatterlist *sg = msb->prealloc_sg;
1873 struct request *req;
1874
1875 dbg_verbose("IO: work started");
1876
1877 while (1) {
1878 spin_lock_irq(&msb->q_lock);
1879
1880 if (msb->need_flush_cache) {
1881 msb->need_flush_cache = false;
1882 spin_unlock_irq(&msb->q_lock);
1883 msb_cache_flush(msb);
1884 continue;
1885 }
1886
1887 req = msb->req;
1888 if (!req) {
1889 dbg_verbose("IO: no more requests exiting");
1890 spin_unlock_irq(&msb->q_lock);
1891 return;
1892 }
1893
1894 spin_unlock_irq(&msb->q_lock);
1895
1896 /* process the request */
1897 dbg_verbose("IO: processing new request");
1898 blk_rq_map_sg(msb->queue, req, sg);
1899
1900 lba = blk_rq_pos(req);
1901
1902 sector_div(lba, msb->page_size / 512);
1903 page = sector_div(lba, msb->pages_in_block);
1904
1905 if (rq_data_dir(msb->req) == READ)
1906 error = msb_do_read_request(msb, lba, page, sg,
1907 blk_rq_bytes(req), &len);
1908 else
1909 error = msb_do_write_request(msb, lba, page, sg,
1910 blk_rq_bytes(req), &len);
1911
1912 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1913 __blk_mq_end_request(req, BLK_STS_OK);
1914 spin_lock_irq(&msb->q_lock);
1915 msb->req = NULL;
1916 spin_unlock_irq(&msb->q_lock);
1917 }
1918
1919 if (error && msb->req) {
1920 blk_status_t ret = errno_to_blk_status(error);
1921
1922 dbg_verbose("IO: ending one sector of the request with error");
1923 blk_mq_end_request(req, ret);
1924 spin_lock_irq(&msb->q_lock);
1925 msb->req = NULL;
1926 spin_unlock_irq(&msb->q_lock);
1927 }
1928
1929 if (msb->req)
1930 dbg_verbose("IO: request still pending");
1931 }
1932}
1933
1934static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1935static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1936
1937static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1938{
1939 struct gendisk *disk = bdev->bd_disk;
1940 struct msb_data *msb = disk->private_data;
1941
1942 dbg_verbose("block device open");
1943
1944 mutex_lock(&msb_disk_lock);
1945
1946 if (msb && msb->card)
1947 msb->usage_count++;
1948
1949 mutex_unlock(&msb_disk_lock);
1950 return 0;
1951}
1952
1953static void msb_data_clear(struct msb_data *msb)
1954{
1955 kfree(msb->boot_page);
1956 kfree(msb->used_blocks_bitmap);
1957 kfree(msb->lba_to_pba_table);
1958 kfree(msb->cache);
1959 msb->card = NULL;
1960}
1961
1962static int msb_disk_release(struct gendisk *disk)
1963{
1964 struct msb_data *msb = disk->private_data;
1965
1966 dbg_verbose("block device release");
1967 mutex_lock(&msb_disk_lock);
1968
1969 if (msb) {
1970 if (msb->usage_count)
1971 msb->usage_count--;
1972
1973 if (!msb->usage_count) {
1974 disk->private_data = NULL;
1975 idr_remove(&msb_disk_idr, msb->disk_id);
1976 put_disk(disk);
1977 kfree(msb);
1978 }
1979 }
1980 mutex_unlock(&msb_disk_lock);
1981 return 0;
1982}
1983
1984static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1985{
1986 msb_disk_release(disk);
1987}
1988
1989static int msb_bd_getgeo(struct block_device *bdev,
1990 struct hd_geometry *geo)
1991{
1992 struct msb_data *msb = bdev->bd_disk->private_data;
1993 *geo = msb->geometry;
1994 return 0;
1995}
1996
1997static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1998 const struct blk_mq_queue_data *bd)
1999{
2000 struct memstick_dev *card = hctx->queue->queuedata;
2001 struct msb_data *msb = memstick_get_drvdata(card);
2002 struct request *req = bd->rq;
2003
2004 dbg_verbose("Submit request");
2005
2006 spin_lock_irq(&msb->q_lock);
2007
2008 if (msb->card_dead) {
2009 dbg("Refusing requests on removed card");
2010
2011 WARN_ON(!msb->io_queue_stopped);
2012
2013 spin_unlock_irq(&msb->q_lock);
2014 blk_mq_start_request(req);
2015 return BLK_STS_IOERR;
2016 }
2017
2018 if (msb->req) {
2019 spin_unlock_irq(&msb->q_lock);
2020 return BLK_STS_DEV_RESOURCE;
2021 }
2022
2023 blk_mq_start_request(req);
2024 msb->req = req;
2025
2026 if (!msb->io_queue_stopped)
2027 queue_work(msb->io_queue, &msb->io_work);
2028
2029 spin_unlock_irq(&msb->q_lock);
2030 return BLK_STS_OK;
2031}
2032
2033static int msb_check_card(struct memstick_dev *card)
2034{
2035 struct msb_data *msb = memstick_get_drvdata(card);
2036 return (msb->card_dead == 0);
2037}
2038
2039static void msb_stop(struct memstick_dev *card)
2040{
2041 struct msb_data *msb = memstick_get_drvdata(card);
2042 unsigned long flags;
2043
2044 dbg("Stopping all msblock IO");
2045
2046 blk_mq_stop_hw_queues(msb->queue);
2047 spin_lock_irqsave(&msb->q_lock, flags);
2048 msb->io_queue_stopped = true;
2049 spin_unlock_irqrestore(&msb->q_lock, flags);
2050
2051 del_timer_sync(&msb->cache_flush_timer);
2052 flush_workqueue(msb->io_queue);
2053
2054 spin_lock_irqsave(&msb->q_lock, flags);
2055 if (msb->req) {
2056 blk_mq_requeue_request(msb->req, false);
2057 msb->req = NULL;
2058 }
2059 spin_unlock_irqrestore(&msb->q_lock, flags);
2060}
2061
2062static void msb_start(struct memstick_dev *card)
2063{
2064 struct msb_data *msb = memstick_get_drvdata(card);
2065 unsigned long flags;
2066
2067 dbg("Resuming IO from msblock");
2068
2069 msb_invalidate_reg_window(msb);
2070
2071 spin_lock_irqsave(&msb->q_lock, flags);
2072 if (!msb->io_queue_stopped || msb->card_dead) {
2073 spin_unlock_irqrestore(&msb->q_lock, flags);
2074 return;
2075 }
2076 spin_unlock_irqrestore(&msb->q_lock, flags);
2077
2078 /* Kick cache flush anyway, its harmless */
2079 msb->need_flush_cache = true;
2080 msb->io_queue_stopped = false;
2081
2082 blk_mq_start_hw_queues(msb->queue);
2083
2084 queue_work(msb->io_queue, &msb->io_work);
2085
2086}
2087
2088static const struct block_device_operations msb_bdops = {
2089 .open = msb_bd_open,
2090 .release = msb_bd_release,
2091 .getgeo = msb_bd_getgeo,
2092 .owner = THIS_MODULE
2093};
2094
2095static const struct blk_mq_ops msb_mq_ops = {
2096 .queue_rq = msb_queue_rq,
2097};
2098
2099/* Registers the block device */
2100static int msb_init_disk(struct memstick_dev *card)
2101{
2102 struct msb_data *msb = memstick_get_drvdata(card);
2103 int rc;
2104 unsigned long capacity;
2105
2106 mutex_lock(&msb_disk_lock);
2107 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2108 mutex_unlock(&msb_disk_lock);
2109
2110 if (msb->disk_id < 0)
2111 return msb->disk_id;
2112
2113 msb->disk = alloc_disk(0);
2114 if (!msb->disk) {
2115 rc = -ENOMEM;
2116 goto out_release_id;
2117 }
2118
2119 msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
2120 BLK_MQ_F_SHOULD_MERGE);
2121 if (IS_ERR(msb->queue)) {
2122 rc = PTR_ERR(msb->queue);
2123 msb->queue = NULL;
2124 goto out_put_disk;
2125 }
2126
2127 msb->queue->queuedata = card;
2128
2129 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2130 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2131 blk_queue_max_segment_size(msb->queue,
2132 MS_BLOCK_MAX_PAGES * msb->page_size);
2133 blk_queue_logical_block_size(msb->queue, msb->page_size);
2134
2135 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2136 msb->disk->fops = &msb_bdops;
2137 msb->disk->private_data = msb;
2138 msb->disk->queue = msb->queue;
2139 msb->disk->flags |= GENHD_FL_EXT_DEVT;
2140
2141 capacity = msb->pages_in_block * msb->logical_block_count;
2142 capacity *= (msb->page_size / 512);
2143 set_capacity(msb->disk, capacity);
2144 dbg("Set total disk size to %lu sectors", capacity);
2145
2146 msb->usage_count = 1;
2147 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2148 INIT_WORK(&msb->io_work, msb_io_work);
2149 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2150
2151 if (msb->read_only)
2152 set_disk_ro(msb->disk, 1);
2153
2154 msb_start(card);
2155 device_add_disk(&card->dev, msb->disk, NULL);
2156 dbg("Disk added");
2157 return 0;
2158
2159out_put_disk:
2160 put_disk(msb->disk);
2161out_release_id:
2162 mutex_lock(&msb_disk_lock);
2163 idr_remove(&msb_disk_idr, msb->disk_id);
2164 mutex_unlock(&msb_disk_lock);
2165 return rc;
2166}
2167
2168static int msb_probe(struct memstick_dev *card)
2169{
2170 struct msb_data *msb;
2171 int rc = 0;
2172
2173 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2174 if (!msb)
2175 return -ENOMEM;
2176 memstick_set_drvdata(card, msb);
2177 msb->card = card;
2178 spin_lock_init(&msb->q_lock);
2179
2180 rc = msb_init_card(card);
2181 if (rc)
2182 goto out_free;
2183
2184 rc = msb_init_disk(card);
2185 if (!rc) {
2186 card->check = msb_check_card;
2187 card->stop = msb_stop;
2188 card->start = msb_start;
2189 return 0;
2190 }
2191out_free:
2192 memstick_set_drvdata(card, NULL);
2193 msb_data_clear(msb);
2194 kfree(msb);
2195 return rc;
2196}
2197
2198static void msb_remove(struct memstick_dev *card)
2199{
2200 struct msb_data *msb = memstick_get_drvdata(card);
2201 unsigned long flags;
2202
2203 if (!msb->io_queue_stopped)
2204 msb_stop(card);
2205
2206 dbg("Removing the disk device");
2207
2208 /* Take care of unhandled + new requests from now on */
2209 spin_lock_irqsave(&msb->q_lock, flags);
2210 msb->card_dead = true;
2211 spin_unlock_irqrestore(&msb->q_lock, flags);
2212 blk_mq_start_hw_queues(msb->queue);
2213
2214 /* Remove the disk */
2215 del_gendisk(msb->disk);
2216 blk_cleanup_queue(msb->queue);
2217 blk_mq_free_tag_set(&msb->tag_set);
2218 msb->queue = NULL;
2219
2220 mutex_lock(&msb_disk_lock);
2221 msb_data_clear(msb);
2222 mutex_unlock(&msb_disk_lock);
2223
2224 msb_disk_release(msb->disk);
2225 memstick_set_drvdata(card, NULL);
2226}
2227
2228#ifdef CONFIG_PM
2229
2230static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2231{
2232 msb_stop(card);
2233 return 0;
2234}
2235
2236static int msb_resume(struct memstick_dev *card)
2237{
2238 struct msb_data *msb = memstick_get_drvdata(card);
2239 struct msb_data *new_msb = NULL;
2240 bool card_dead = true;
2241
2242#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2243 msb->card_dead = true;
2244 return 0;
2245#endif
2246 mutex_lock(&card->host->lock);
2247
2248 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2249 if (!new_msb)
2250 goto out;
2251
2252 new_msb->card = card;
2253 memstick_set_drvdata(card, new_msb);
2254 spin_lock_init(&new_msb->q_lock);
2255 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2256
2257 if (msb_init_card(card))
2258 goto out;
2259
2260 if (msb->block_size != new_msb->block_size)
2261 goto out;
2262
2263 if (memcmp(msb->boot_page, new_msb->boot_page,
2264 sizeof(struct ms_boot_page)))
2265 goto out;
2266
2267 if (msb->logical_block_count != new_msb->logical_block_count ||
2268 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2269 msb->logical_block_count))
2270 goto out;
2271
2272 if (msb->block_count != new_msb->block_count ||
2273 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2274 msb->block_count / 8))
2275 goto out;
2276
2277 card_dead = false;
2278out:
2279 if (card_dead)
2280 dbg("Card was removed/replaced during suspend");
2281
2282 msb->card_dead = card_dead;
2283 memstick_set_drvdata(card, msb);
2284
2285 if (new_msb) {
2286 msb_data_clear(new_msb);
2287 kfree(new_msb);
2288 }
2289
2290 msb_start(card);
2291 mutex_unlock(&card->host->lock);
2292 return 0;
2293}
2294#else
2295
2296#define msb_suspend NULL
2297#define msb_resume NULL
2298
2299#endif /* CONFIG_PM */
2300
2301static struct memstick_device_id msb_id_tbl[] = {
2302 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2303 MEMSTICK_CLASS_FLASH},
2304
2305 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2306 MEMSTICK_CLASS_ROM},
2307
2308 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2309 MEMSTICK_CLASS_RO},
2310
2311 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2312 MEMSTICK_CLASS_WP},
2313
2314 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2315 MEMSTICK_CLASS_DUO},
2316 {}
2317};
2318MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2319
2320
2321static struct memstick_driver msb_driver = {
2322 .driver = {
2323 .name = DRIVER_NAME,
2324 .owner = THIS_MODULE
2325 },
2326 .id_table = msb_id_tbl,
2327 .probe = msb_probe,
2328 .remove = msb_remove,
2329 .suspend = msb_suspend,
2330 .resume = msb_resume
2331};
2332
2333static int __init msb_init(void)
2334{
2335 int rc = memstick_register_driver(&msb_driver);
2336 if (rc)
2337 pr_err("failed to register memstick driver (error %d)\n", rc);
2338
2339 return rc;
2340}
2341
2342static void __exit msb_exit(void)
2343{
2344 memstick_unregister_driver(&msb_driver);
2345 idr_destroy(&msb_disk_idr);
2346}
2347
2348module_init(msb_init);
2349module_exit(msb_exit);
2350
2351module_param(cache_flush_timeout, int, S_IRUGO);
2352MODULE_PARM_DESC(cache_flush_timeout,
2353 "Cache flush timeout in msec (1000 default)");
2354module_param(debug, int, S_IRUGO | S_IWUSR);
2355MODULE_PARM_DESC(debug, "Debug level (0-2)");
2356
2357module_param(verify_writes, bool, S_IRUGO);
2358MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2359
2360MODULE_LICENSE("GPL");
2361MODULE_AUTHOR("Maxim Levitsky");
2362MODULE_DESCRIPTION("Sony MemoryStick block device driver");
1/*
2 * ms_block.c - Sony MemoryStick (legacy) storage support
3
4 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Minor portions of the driver were copied from mspro_block.c which is
11 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
12 *
13 */
14#define DRIVER_NAME "ms_block"
15#define pr_fmt(fmt) DRIVER_NAME ": " fmt
16
17#include <linux/module.h>
18#include <linux/blkdev.h>
19#include <linux/memstick.h>
20#include <linux/idr.h>
21#include <linux/hdreg.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/random.h>
25#include <linux/bitmap.h>
26#include <linux/scatterlist.h>
27#include <linux/jiffies.h>
28#include <linux/workqueue.h>
29#include <linux/mutex.h>
30#include "ms_block.h"
31
32static int debug;
33static int cache_flush_timeout = 1000;
34static bool verify_writes;
35
36/*
37 * Copies section of 'sg_from' starting from offset 'offset' and with length
38 * 'len' To another scatterlist of to_nents enties
39 */
40static size_t msb_sg_copy(struct scatterlist *sg_from,
41 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
42{
43 size_t copied = 0;
44
45 while (offset > 0) {
46 if (offset >= sg_from->length) {
47 if (sg_is_last(sg_from))
48 return 0;
49
50 offset -= sg_from->length;
51 sg_from = sg_next(sg_from);
52 continue;
53 }
54
55 copied = min(len, sg_from->length - offset);
56 sg_set_page(sg_to, sg_page(sg_from),
57 copied, sg_from->offset + offset);
58
59 len -= copied;
60 offset = 0;
61
62 if (sg_is_last(sg_from) || !len)
63 goto out;
64
65 sg_to = sg_next(sg_to);
66 to_nents--;
67 sg_from = sg_next(sg_from);
68 }
69
70 while (len > sg_from->length && to_nents--) {
71 len -= sg_from->length;
72 copied += sg_from->length;
73
74 sg_set_page(sg_to, sg_page(sg_from),
75 sg_from->length, sg_from->offset);
76
77 if (sg_is_last(sg_from) || !len)
78 goto out;
79
80 sg_from = sg_next(sg_from);
81 sg_to = sg_next(sg_to);
82 }
83
84 if (len && to_nents) {
85 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
86 copied += len;
87 }
88out:
89 sg_mark_end(sg_to);
90 return copied;
91}
92
93/*
94 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
95 * to linear buffer of length 'len' at address 'buffer'
96 * Returns 0 if equal and -1 otherwice
97 */
98static int msb_sg_compare_to_buffer(struct scatterlist *sg,
99 size_t offset, u8 *buffer, size_t len)
100{
101 int retval = 0, cmplen;
102 struct sg_mapping_iter miter;
103
104 sg_miter_start(&miter, sg, sg_nents(sg),
105 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
106
107 while (sg_miter_next(&miter) && len > 0) {
108 if (offset >= miter.length) {
109 offset -= miter.length;
110 continue;
111 }
112
113 cmplen = min(miter.length - offset, len);
114 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
115 if (retval)
116 break;
117
118 buffer += cmplen;
119 len -= cmplen;
120 offset = 0;
121 }
122
123 if (!retval && len)
124 retval = -1;
125
126 sg_miter_stop(&miter);
127 return retval;
128}
129
130
131/* Get zone at which block with logical address 'lba' lives
132 * Flash is broken into zones.
133 * Each zone consists of 512 eraseblocks, out of which in first
134 * zone 494 are used and 496 are for all following zones.
135 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
136*/
137static int msb_get_zone_from_lba(int lba)
138{
139 if (lba < 494)
140 return 0;
141 return ((lba - 494) / 496) + 1;
142}
143
144/* Get zone of physical block. Trivial */
145static int msb_get_zone_from_pba(int pba)
146{
147 return pba / MS_BLOCKS_IN_ZONE;
148}
149
150/* Debug test to validate free block counts */
151static int msb_validate_used_block_bitmap(struct msb_data *msb)
152{
153 int total_free_blocks = 0;
154 int i;
155
156 if (!debug)
157 return 0;
158
159 for (i = 0; i < msb->zone_count; i++)
160 total_free_blocks += msb->free_block_count[i];
161
162 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
163 msb->block_count) == total_free_blocks)
164 return 0;
165
166 pr_err("BUG: free block counts don't match the bitmap");
167 msb->read_only = true;
168 return -EINVAL;
169}
170
171/* Mark physical block as used */
172static void msb_mark_block_used(struct msb_data *msb, int pba)
173{
174 int zone = msb_get_zone_from_pba(pba);
175
176 if (test_bit(pba, msb->used_blocks_bitmap)) {
177 pr_err(
178 "BUG: attempt to mark already used pba %d as used", pba);
179 msb->read_only = true;
180 return;
181 }
182
183 if (msb_validate_used_block_bitmap(msb))
184 return;
185
186 /* No races because all IO is single threaded */
187 __set_bit(pba, msb->used_blocks_bitmap);
188 msb->free_block_count[zone]--;
189}
190
191/* Mark physical block as free */
192static void msb_mark_block_unused(struct msb_data *msb, int pba)
193{
194 int zone = msb_get_zone_from_pba(pba);
195
196 if (!test_bit(pba, msb->used_blocks_bitmap)) {
197 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
198 msb->read_only = true;
199 return;
200 }
201
202 if (msb_validate_used_block_bitmap(msb))
203 return;
204
205 /* No races because all IO is single threaded */
206 __clear_bit(pba, msb->used_blocks_bitmap);
207 msb->free_block_count[zone]++;
208}
209
210/* Invalidate current register window */
211static void msb_invalidate_reg_window(struct msb_data *msb)
212{
213 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
214 msb->reg_addr.w_length = sizeof(struct ms_id_register);
215 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
216 msb->reg_addr.r_length = sizeof(struct ms_id_register);
217 msb->addr_valid = false;
218}
219
220/* Start a state machine */
221static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
222 (struct memstick_dev *card, struct memstick_request **req))
223{
224 struct memstick_dev *card = msb->card;
225
226 WARN_ON(msb->state != -1);
227 msb->int_polling = false;
228 msb->state = 0;
229 msb->exit_error = 0;
230
231 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
232
233 card->next_request = state_func;
234 memstick_new_req(card->host);
235 wait_for_completion(&card->mrq_complete);
236
237 WARN_ON(msb->state != -1);
238 return msb->exit_error;
239}
240
241/* State machines call that to exit */
242static int msb_exit_state_machine(struct msb_data *msb, int error)
243{
244 WARN_ON(msb->state == -1);
245
246 msb->state = -1;
247 msb->exit_error = error;
248 msb->card->next_request = h_msb_default_bad;
249
250 /* Invalidate reg window on errors */
251 if (error)
252 msb_invalidate_reg_window(msb);
253
254 complete(&msb->card->mrq_complete);
255 return -ENXIO;
256}
257
258/* read INT register */
259static int msb_read_int_reg(struct msb_data *msb, long timeout)
260{
261 struct memstick_request *mrq = &msb->card->current_mrq;
262
263 WARN_ON(msb->state == -1);
264
265 if (!msb->int_polling) {
266 msb->int_timeout = jiffies +
267 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
268 msb->int_polling = true;
269 } else if (time_after(jiffies, msb->int_timeout)) {
270 mrq->data[0] = MEMSTICK_INT_CMDNAK;
271 return 0;
272 }
273
274 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
275 mrq->need_card_int && !mrq->error) {
276 mrq->data[0] = mrq->int_reg;
277 mrq->need_card_int = false;
278 return 0;
279 } else {
280 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
281 return 1;
282 }
283}
284
285/* Read a register */
286static int msb_read_regs(struct msb_data *msb, int offset, int len)
287{
288 struct memstick_request *req = &msb->card->current_mrq;
289
290 if (msb->reg_addr.r_offset != offset ||
291 msb->reg_addr.r_length != len || !msb->addr_valid) {
292
293 msb->reg_addr.r_offset = offset;
294 msb->reg_addr.r_length = len;
295 msb->addr_valid = true;
296
297 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
298 &msb->reg_addr, sizeof(msb->reg_addr));
299 return 0;
300 }
301
302 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
303 return 1;
304}
305
306/* Write a card register */
307static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
308{
309 struct memstick_request *req = &msb->card->current_mrq;
310
311 if (msb->reg_addr.w_offset != offset ||
312 msb->reg_addr.w_length != len || !msb->addr_valid) {
313
314 msb->reg_addr.w_offset = offset;
315 msb->reg_addr.w_length = len;
316 msb->addr_valid = true;
317
318 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
319 &msb->reg_addr, sizeof(msb->reg_addr));
320 return 0;
321 }
322
323 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
324 return 1;
325}
326
327/* Handler for absence of IO */
328static int h_msb_default_bad(struct memstick_dev *card,
329 struct memstick_request **mrq)
330{
331 return -ENXIO;
332}
333
334/*
335 * This function is a handler for reads of one page from device.
336 * Writes output to msb->current_sg, takes sector address from msb->reg.param
337 * Can also be used to read extra data only. Set params accordintly.
338 */
339static int h_msb_read_page(struct memstick_dev *card,
340 struct memstick_request **out_mrq)
341{
342 struct msb_data *msb = memstick_get_drvdata(card);
343 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
344 struct scatterlist sg[2];
345 u8 command, intreg;
346
347 if (mrq->error) {
348 dbg("read_page, unknown error");
349 return msb_exit_state_machine(msb, mrq->error);
350 }
351again:
352 switch (msb->state) {
353 case MSB_RP_SEND_BLOCK_ADDRESS:
354 /* msb_write_regs sometimes "fails" because it needs to update
355 the reg window, and thus it returns request for that.
356 Then we stay in this state and retry */
357 if (!msb_write_regs(msb,
358 offsetof(struct ms_register, param),
359 sizeof(struct ms_param_register),
360 (unsigned char *)&msb->regs.param))
361 return 0;
362
363 msb->state = MSB_RP_SEND_READ_COMMAND;
364 return 0;
365
366 case MSB_RP_SEND_READ_COMMAND:
367 command = MS_CMD_BLOCK_READ;
368 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
369 msb->state = MSB_RP_SEND_INT_REQ;
370 return 0;
371
372 case MSB_RP_SEND_INT_REQ:
373 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
374 /* If dont actually need to send the int read request (only in
375 serial mode), then just fall through */
376 if (msb_read_int_reg(msb, -1))
377 return 0;
378 /* fallthrough */
379
380 case MSB_RP_RECEIVE_INT_REQ_RESULT:
381 intreg = mrq->data[0];
382 msb->regs.status.interrupt = intreg;
383
384 if (intreg & MEMSTICK_INT_CMDNAK)
385 return msb_exit_state_machine(msb, -EIO);
386
387 if (!(intreg & MEMSTICK_INT_CED)) {
388 msb->state = MSB_RP_SEND_INT_REQ;
389 goto again;
390 }
391
392 msb->int_polling = false;
393 msb->state = (intreg & MEMSTICK_INT_ERR) ?
394 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
395 goto again;
396
397 case MSB_RP_SEND_READ_STATUS_REG:
398 /* read the status register to understand source of the INT_ERR */
399 if (!msb_read_regs(msb,
400 offsetof(struct ms_register, status),
401 sizeof(struct ms_status_register)))
402 return 0;
403
404 msb->state = MSB_RP_RECEIVE_STATUS_REG;
405 return 0;
406
407 case MSB_RP_RECEIVE_STATUS_REG:
408 msb->regs.status = *(struct ms_status_register *)mrq->data;
409 msb->state = MSB_RP_SEND_OOB_READ;
410 /* fallthrough */
411
412 case MSB_RP_SEND_OOB_READ:
413 if (!msb_read_regs(msb,
414 offsetof(struct ms_register, extra_data),
415 sizeof(struct ms_extra_data_register)))
416 return 0;
417
418 msb->state = MSB_RP_RECEIVE_OOB_READ;
419 return 0;
420
421 case MSB_RP_RECEIVE_OOB_READ:
422 msb->regs.extra_data =
423 *(struct ms_extra_data_register *) mrq->data;
424 msb->state = MSB_RP_SEND_READ_DATA;
425 /* fallthrough */
426
427 case MSB_RP_SEND_READ_DATA:
428 /* Skip that state if we only read the oob */
429 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
430 msb->state = MSB_RP_RECEIVE_READ_DATA;
431 goto again;
432 }
433
434 sg_init_table(sg, ARRAY_SIZE(sg));
435 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
436 msb->current_sg_offset,
437 msb->page_size);
438
439 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
440 msb->state = MSB_RP_RECEIVE_READ_DATA;
441 return 0;
442
443 case MSB_RP_RECEIVE_READ_DATA:
444 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
445 msb->current_sg_offset += msb->page_size;
446 return msb_exit_state_machine(msb, 0);
447 }
448
449 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
450 dbg("read_page: uncorrectable error");
451 return msb_exit_state_machine(msb, -EBADMSG);
452 }
453
454 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
455 dbg("read_page: correctable error");
456 msb->current_sg_offset += msb->page_size;
457 return msb_exit_state_machine(msb, -EUCLEAN);
458 } else {
459 dbg("read_page: INT error, but no status error bits");
460 return msb_exit_state_machine(msb, -EIO);
461 }
462 }
463
464 BUG();
465}
466
467/*
468 * Handler of writes of exactly one block.
469 * Takes address from msb->regs.param.
470 * Writes same extra data to blocks, also taken
471 * from msb->regs.extra
472 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
473 * device refuses to take the command or something else
474 */
475static int h_msb_write_block(struct memstick_dev *card,
476 struct memstick_request **out_mrq)
477{
478 struct msb_data *msb = memstick_get_drvdata(card);
479 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
480 struct scatterlist sg[2];
481 u8 intreg, command;
482
483 if (mrq->error)
484 return msb_exit_state_machine(msb, mrq->error);
485
486again:
487 switch (msb->state) {
488
489 /* HACK: Jmicon handling of TPCs between 8 and
490 * sizeof(memstick_request.data) is broken due to hardware
491 * bug in PIO mode that is used for these TPCs
492 * Therefore split the write
493 */
494
495 case MSB_WB_SEND_WRITE_PARAMS:
496 if (!msb_write_regs(msb,
497 offsetof(struct ms_register, param),
498 sizeof(struct ms_param_register),
499 &msb->regs.param))
500 return 0;
501
502 msb->state = MSB_WB_SEND_WRITE_OOB;
503 return 0;
504
505 case MSB_WB_SEND_WRITE_OOB:
506 if (!msb_write_regs(msb,
507 offsetof(struct ms_register, extra_data),
508 sizeof(struct ms_extra_data_register),
509 &msb->regs.extra_data))
510 return 0;
511 msb->state = MSB_WB_SEND_WRITE_COMMAND;
512 return 0;
513
514
515 case MSB_WB_SEND_WRITE_COMMAND:
516 command = MS_CMD_BLOCK_WRITE;
517 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
518 msb->state = MSB_WB_SEND_INT_REQ;
519 return 0;
520
521 case MSB_WB_SEND_INT_REQ:
522 msb->state = MSB_WB_RECEIVE_INT_REQ;
523 if (msb_read_int_reg(msb, -1))
524 return 0;
525 /* fallthrough */
526
527 case MSB_WB_RECEIVE_INT_REQ:
528 intreg = mrq->data[0];
529 msb->regs.status.interrupt = intreg;
530
531 /* errors mean out of here, and fast... */
532 if (intreg & (MEMSTICK_INT_CMDNAK))
533 return msb_exit_state_machine(msb, -EIO);
534
535 if (intreg & MEMSTICK_INT_ERR)
536 return msb_exit_state_machine(msb, -EBADMSG);
537
538
539 /* for last page we need to poll CED */
540 if (msb->current_page == msb->pages_in_block) {
541 if (intreg & MEMSTICK_INT_CED)
542 return msb_exit_state_machine(msb, 0);
543 msb->state = MSB_WB_SEND_INT_REQ;
544 goto again;
545
546 }
547
548 /* for non-last page we need BREQ before writing next chunk */
549 if (!(intreg & MEMSTICK_INT_BREQ)) {
550 msb->state = MSB_WB_SEND_INT_REQ;
551 goto again;
552 }
553
554 msb->int_polling = false;
555 msb->state = MSB_WB_SEND_WRITE_DATA;
556 /* fallthrough */
557
558 case MSB_WB_SEND_WRITE_DATA:
559 sg_init_table(sg, ARRAY_SIZE(sg));
560
561 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
562 msb->current_sg_offset,
563 msb->page_size) < msb->page_size)
564 return msb_exit_state_machine(msb, -EIO);
565
566 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
567 mrq->need_card_int = 1;
568 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
569 return 0;
570
571 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
572 msb->current_page++;
573 msb->current_sg_offset += msb->page_size;
574 msb->state = MSB_WB_SEND_INT_REQ;
575 goto again;
576 default:
577 BUG();
578 }
579
580 return 0;
581}
582
583/*
584 * This function is used to send simple IO requests to device that consist
585 * of register write + command
586 */
587static int h_msb_send_command(struct memstick_dev *card,
588 struct memstick_request **out_mrq)
589{
590 struct msb_data *msb = memstick_get_drvdata(card);
591 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
592 u8 intreg;
593
594 if (mrq->error) {
595 dbg("send_command: unknown error");
596 return msb_exit_state_machine(msb, mrq->error);
597 }
598again:
599 switch (msb->state) {
600
601 /* HACK: see h_msb_write_block */
602 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
603 if (!msb_write_regs(msb,
604 offsetof(struct ms_register, param),
605 sizeof(struct ms_param_register),
606 &msb->regs.param))
607 return 0;
608 msb->state = MSB_SC_SEND_WRITE_OOB;
609 return 0;
610
611 case MSB_SC_SEND_WRITE_OOB:
612 if (!msb->command_need_oob) {
613 msb->state = MSB_SC_SEND_COMMAND;
614 goto again;
615 }
616
617 if (!msb_write_regs(msb,
618 offsetof(struct ms_register, extra_data),
619 sizeof(struct ms_extra_data_register),
620 &msb->regs.extra_data))
621 return 0;
622
623 msb->state = MSB_SC_SEND_COMMAND;
624 return 0;
625
626 case MSB_SC_SEND_COMMAND:
627 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
628 msb->state = MSB_SC_SEND_INT_REQ;
629 return 0;
630
631 case MSB_SC_SEND_INT_REQ:
632 msb->state = MSB_SC_RECEIVE_INT_REQ;
633 if (msb_read_int_reg(msb, -1))
634 return 0;
635 /* fallthrough */
636
637 case MSB_SC_RECEIVE_INT_REQ:
638 intreg = mrq->data[0];
639
640 if (intreg & MEMSTICK_INT_CMDNAK)
641 return msb_exit_state_machine(msb, -EIO);
642 if (intreg & MEMSTICK_INT_ERR)
643 return msb_exit_state_machine(msb, -EBADMSG);
644
645 if (!(intreg & MEMSTICK_INT_CED)) {
646 msb->state = MSB_SC_SEND_INT_REQ;
647 goto again;
648 }
649
650 return msb_exit_state_machine(msb, 0);
651 }
652
653 BUG();
654}
655
656/* Small handler for card reset */
657static int h_msb_reset(struct memstick_dev *card,
658 struct memstick_request **out_mrq)
659{
660 u8 command = MS_CMD_RESET;
661 struct msb_data *msb = memstick_get_drvdata(card);
662 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
663
664 if (mrq->error)
665 return msb_exit_state_machine(msb, mrq->error);
666
667 switch (msb->state) {
668 case MSB_RS_SEND:
669 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
670 mrq->need_card_int = 0;
671 msb->state = MSB_RS_CONFIRM;
672 return 0;
673 case MSB_RS_CONFIRM:
674 return msb_exit_state_machine(msb, 0);
675 }
676 BUG();
677}
678
679/* This handler is used to do serial->parallel switch */
680static int h_msb_parallel_switch(struct memstick_dev *card,
681 struct memstick_request **out_mrq)
682{
683 struct msb_data *msb = memstick_get_drvdata(card);
684 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
685 struct memstick_host *host = card->host;
686
687 if (mrq->error) {
688 dbg("parallel_switch: error");
689 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
690 return msb_exit_state_machine(msb, mrq->error);
691 }
692
693 switch (msb->state) {
694 case MSB_PS_SEND_SWITCH_COMMAND:
695 /* Set the parallel interface on memstick side */
696 msb->regs.param.system |= MEMSTICK_SYS_PAM;
697
698 if (!msb_write_regs(msb,
699 offsetof(struct ms_register, param),
700 1,
701 (unsigned char *)&msb->regs.param))
702 return 0;
703
704 msb->state = MSB_PS_SWICH_HOST;
705 return 0;
706
707 case MSB_PS_SWICH_HOST:
708 /* Set parallel interface on our side + send a dummy request
709 to see if card responds */
710 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
711 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
712 msb->state = MSB_PS_CONFIRM;
713 return 0;
714
715 case MSB_PS_CONFIRM:
716 return msb_exit_state_machine(msb, 0);
717 }
718
719 BUG();
720}
721
722static int msb_switch_to_parallel(struct msb_data *msb);
723
724/* Reset the card, to guard against hw errors beeing treated as bad blocks */
725static int msb_reset(struct msb_data *msb, bool full)
726{
727
728 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
729 struct memstick_dev *card = msb->card;
730 struct memstick_host *host = card->host;
731 int error;
732
733 /* Reset the card */
734 msb->regs.param.system = MEMSTICK_SYS_BAMD;
735
736 if (full) {
737 error = host->set_param(host,
738 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
739 if (error)
740 goto out_error;
741
742 msb_invalidate_reg_window(msb);
743
744 error = host->set_param(host,
745 MEMSTICK_POWER, MEMSTICK_POWER_ON);
746 if (error)
747 goto out_error;
748
749 error = host->set_param(host,
750 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
751 if (error) {
752out_error:
753 dbg("Failed to reset the host controller");
754 msb->read_only = true;
755 return -EFAULT;
756 }
757 }
758
759 error = msb_run_state_machine(msb, h_msb_reset);
760 if (error) {
761 dbg("Failed to reset the card");
762 msb->read_only = true;
763 return -ENODEV;
764 }
765
766 /* Set parallel mode */
767 if (was_parallel)
768 msb_switch_to_parallel(msb);
769 return 0;
770}
771
772/* Attempts to switch interface to parallel mode */
773static int msb_switch_to_parallel(struct msb_data *msb)
774{
775 int error;
776
777 error = msb_run_state_machine(msb, h_msb_parallel_switch);
778 if (error) {
779 pr_err("Switch to parallel failed");
780 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
781 msb_reset(msb, true);
782 return -EFAULT;
783 }
784
785 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
786 return 0;
787}
788
789/* Changes overwrite flag on a page */
790static int msb_set_overwrite_flag(struct msb_data *msb,
791 u16 pba, u8 page, u8 flag)
792{
793 if (msb->read_only)
794 return -EROFS;
795
796 msb->regs.param.block_address = cpu_to_be16(pba);
797 msb->regs.param.page_address = page;
798 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
799 msb->regs.extra_data.overwrite_flag = flag;
800 msb->command_value = MS_CMD_BLOCK_WRITE;
801 msb->command_need_oob = true;
802
803 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
804 flag, pba, page);
805 return msb_run_state_machine(msb, h_msb_send_command);
806}
807
808static int msb_mark_bad(struct msb_data *msb, int pba)
809{
810 pr_notice("marking pba %d as bad", pba);
811 msb_reset(msb, true);
812 return msb_set_overwrite_flag(
813 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
814}
815
816static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
817{
818 dbg("marking page %d of pba %d as bad", page, pba);
819 msb_reset(msb, true);
820 return msb_set_overwrite_flag(msb,
821 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
822}
823
824/* Erases one physical block */
825static int msb_erase_block(struct msb_data *msb, u16 pba)
826{
827 int error, try;
828 if (msb->read_only)
829 return -EROFS;
830
831 dbg_verbose("erasing pba %d", pba);
832
833 for (try = 1; try < 3; try++) {
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
839
840
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
843 break;
844 }
845
846 if (error) {
847 pr_err("erase failed, marking pba %d as bad", pba);
848 msb_mark_bad(msb, pba);
849 }
850
851 dbg_verbose("erase success, marking pba %d as unused", pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
854 return error;
855}
856
857/* Reads one page from device */
858static int msb_read_page(struct msb_data *msb,
859 u16 pba, u8 page, struct ms_extra_data_register *extra,
860 struct scatterlist *sg, int offset)
861{
862 int try, error;
863
864 if (pba == MS_BLOCK_INVALID) {
865 unsigned long flags;
866 struct sg_mapping_iter miter;
867 size_t len = msb->page_size;
868
869 dbg_verbose("read unmapped sector. returning 0xFF");
870
871 local_irq_save(flags);
872 sg_miter_start(&miter, sg, sg_nents(sg),
873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
874
875 while (sg_miter_next(&miter) && len > 0) {
876
877 int chunklen;
878
879 if (offset && offset >= miter.length) {
880 offset -= miter.length;
881 continue;
882 }
883
884 chunklen = min(miter.length - offset, len);
885 memset(miter.addr + offset, 0xFF, chunklen);
886 len -= chunklen;
887 offset = 0;
888 }
889
890 sg_miter_stop(&miter);
891 local_irq_restore(flags);
892
893 if (offset)
894 return -EFAULT;
895
896 if (extra)
897 memset(extra, 0xFF, sizeof(*extra));
898 return 0;
899 }
900
901 if (pba >= msb->block_count) {
902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
903 return -EINVAL;
904 }
905
906 for (try = 1; try < 3; try++) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
910
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
914
915
916 if (error == -EUCLEAN) {
917 pr_notice("correctable error on pba %d, page %d",
918 pba, page);
919 error = 0;
920 }
921
922 if (!error && extra)
923 *extra = msb->regs.extra_data;
924
925 if (!error || msb_reset(msb, true))
926 break;
927
928 }
929
930 /* Mark bad pages */
931 if (error == -EBADMSG) {
932 pr_err("uncorrectable error on read of pba %d, page %d",
933 pba, page);
934
935 if (msb->regs.extra_data.overwrite_flag &
936 MEMSTICK_OVERWRITE_PGST0)
937 msb_mark_page_bad(msb, pba, page);
938 return -EBADMSG;
939 }
940
941 if (error)
942 pr_err("read of pba %d, page %d failed with error %d",
943 pba, page, error);
944 return error;
945}
946
947/* Reads oob of page only */
948static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 struct ms_extra_data_register *extra)
950{
951 int error;
952
953 BUG_ON(!extra);
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
957
958 if (pba > msb->block_count) {
959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
960 return -EINVAL;
961 }
962
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
965
966 if (error == -EUCLEAN) {
967 pr_notice("correctable error on pba %d, page %d",
968 pba, page);
969 return 0;
970 }
971
972 return error;
973}
974
975/* Reads a block and compares it with data contained in scatterlist orig_sg */
976static int msb_verify_block(struct msb_data *msb, u16 pba,
977 struct scatterlist *orig_sg, int offset)
978{
979 struct scatterlist sg;
980 int page = 0, error;
981
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
983
984 while (page < msb->pages_in_block) {
985
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
988 if (error)
989 return error;
990 page++;
991 }
992
993 if (msb_sg_compare_to_buffer(orig_sg, offset,
994 msb->block_buffer, msb->block_size))
995 return -EIO;
996 return 0;
997}
998
999/* Writes exectly one block + oob */
1000static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002{
1003 int error, current_try = 1;
1004 BUG_ON(sg->length < msb->page_size);
1005
1006 if (msb->read_only)
1007 return -EROFS;
1008
1009 if (pba == MS_BLOCK_INVALID) {
1010 pr_err(
1011 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1012 return -EINVAL;
1013 }
1014
1015 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1016 pr_err(
1017 "BUG: write: attempt to write beyond the end of device");
1018 return -EINVAL;
1019 }
1020
1021 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1022 pr_err("BUG: write: lba zone mismatch");
1023 return -EINVAL;
1024 }
1025
1026 if (pba == msb->boot_block_locations[0] ||
1027 pba == msb->boot_block_locations[1]) {
1028 pr_err("BUG: write: attempt to write to boot blocks!");
1029 return -EINVAL;
1030 }
1031
1032 while (1) {
1033
1034 if (msb->read_only)
1035 return -EROFS;
1036
1037 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1038 msb->regs.param.page_address = 0;
1039 msb->regs.param.block_address = cpu_to_be16(pba);
1040
1041 msb->regs.extra_data.management_flag = 0xFF;
1042 msb->regs.extra_data.overwrite_flag = 0xF8;
1043 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1044
1045 msb->current_sg = sg;
1046 msb->current_sg_offset = offset;
1047 msb->current_page = 0;
1048
1049 error = msb_run_state_machine(msb, h_msb_write_block);
1050
1051 /* Sector we just wrote to is assumed erased since its pba
1052 was erased. If it wasn't erased, write will succeed
1053 and will just clear the bits that were set in the block
1054 thus test that what we have written,
1055 matches what we expect.
1056 We do trust the blocks that we erased */
1057 if (!error && (verify_writes ||
1058 !test_bit(pba, msb->erased_blocks_bitmap)))
1059 error = msb_verify_block(msb, pba, sg, offset);
1060
1061 if (!error)
1062 break;
1063
1064 if (current_try > 1 || msb_reset(msb, true))
1065 break;
1066
1067 pr_err("write failed, trying to erase the pba %d", pba);
1068 error = msb_erase_block(msb, pba);
1069 if (error)
1070 break;
1071
1072 current_try++;
1073 }
1074 return error;
1075}
1076
1077/* Finds a free block for write replacement */
1078static u16 msb_get_free_block(struct msb_data *msb, int zone)
1079{
1080 u16 pos;
1081 int pba = zone * MS_BLOCKS_IN_ZONE;
1082 int i;
1083
1084 get_random_bytes(&pos, sizeof(pos));
1085
1086 if (!msb->free_block_count[zone]) {
1087 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1088 msb->read_only = true;
1089 return MS_BLOCK_INVALID;
1090 }
1091
1092 pos %= msb->free_block_count[zone];
1093
1094 dbg_verbose("have %d choices for a free block, selected randomally: %d",
1095 msb->free_block_count[zone], pos);
1096
1097 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1098 msb->block_count, pba);
1099 for (i = 0; i < pos; ++i)
1100 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1101 msb->block_count, pba + 1);
1102
1103 dbg_verbose("result of the free blocks scan: pba %d", pba);
1104
1105 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1106 pr_err("BUG: cant get a free block");
1107 msb->read_only = true;
1108 return MS_BLOCK_INVALID;
1109 }
1110
1111 msb_mark_block_used(msb, pba);
1112 return pba;
1113}
1114
1115static int msb_update_block(struct msb_data *msb, u16 lba,
1116 struct scatterlist *sg, int offset)
1117{
1118 u16 pba, new_pba;
1119 int error, try;
1120
1121 pba = msb->lba_to_pba_table[lba];
1122 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1123
1124 if (pba != MS_BLOCK_INVALID) {
1125 dbg_verbose("setting the update flag on the block");
1126 msb_set_overwrite_flag(msb, pba, 0,
1127 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1128 }
1129
1130 for (try = 0; try < 3; try++) {
1131 new_pba = msb_get_free_block(msb,
1132 msb_get_zone_from_lba(lba));
1133
1134 if (new_pba == MS_BLOCK_INVALID) {
1135 error = -EIO;
1136 goto out;
1137 }
1138
1139 dbg_verbose("block update: writing updated block to the pba %d",
1140 new_pba);
1141 error = msb_write_block(msb, new_pba, lba, sg, offset);
1142 if (error == -EBADMSG) {
1143 msb_mark_bad(msb, new_pba);
1144 continue;
1145 }
1146
1147 if (error)
1148 goto out;
1149
1150 dbg_verbose("block update: erasing the old block");
1151 msb_erase_block(msb, pba);
1152 msb->lba_to_pba_table[lba] = new_pba;
1153 return 0;
1154 }
1155out:
1156 if (error) {
1157 pr_err("block update error after %d tries, switching to r/o mode", try);
1158 msb->read_only = true;
1159 }
1160 return error;
1161}
1162
1163/* Converts endiannes in the boot block for easy use */
1164static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1165{
1166 p->header.block_id = be16_to_cpu(p->header.block_id);
1167 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1168 p->entry.disabled_block.start_addr
1169 = be32_to_cpu(p->entry.disabled_block.start_addr);
1170 p->entry.disabled_block.data_size
1171 = be32_to_cpu(p->entry.disabled_block.data_size);
1172 p->entry.cis_idi.start_addr
1173 = be32_to_cpu(p->entry.cis_idi.start_addr);
1174 p->entry.cis_idi.data_size
1175 = be32_to_cpu(p->entry.cis_idi.data_size);
1176 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1177 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1178 p->attr.number_of_effective_blocks
1179 = be16_to_cpu(p->attr.number_of_effective_blocks);
1180 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1181 p->attr.memory_manufacturer_code
1182 = be16_to_cpu(p->attr.memory_manufacturer_code);
1183 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1184 p->attr.implemented_capacity
1185 = be16_to_cpu(p->attr.implemented_capacity);
1186 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1187 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1188}
1189
1190static int msb_read_boot_blocks(struct msb_data *msb)
1191{
1192 int pba = 0;
1193 struct scatterlist sg;
1194 struct ms_extra_data_register extra;
1195 struct ms_boot_page *page;
1196
1197 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1198 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1199 msb->boot_block_count = 0;
1200
1201 dbg_verbose("Start of a scan for the boot blocks");
1202
1203 if (!msb->boot_page) {
1204 page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
1205 if (!page)
1206 return -ENOMEM;
1207
1208 msb->boot_page = page;
1209 } else
1210 page = msb->boot_page;
1211
1212 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1213
1214 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1215
1216 sg_init_one(&sg, page, sizeof(*page));
1217 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1218 dbg("boot scan: can't read pba %d", pba);
1219 continue;
1220 }
1221
1222 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1223 dbg("management flag doesn't indicate boot block %d",
1224 pba);
1225 continue;
1226 }
1227
1228 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1229 dbg("the pba at %d doesn' contain boot block ID", pba);
1230 continue;
1231 }
1232
1233 msb_fix_boot_page_endianness(page);
1234 msb->boot_block_locations[msb->boot_block_count] = pba;
1235
1236 page++;
1237 msb->boot_block_count++;
1238
1239 if (msb->boot_block_count == 2)
1240 break;
1241 }
1242
1243 if (!msb->boot_block_count) {
1244 pr_err("media doesn't contain master page, aborting");
1245 return -EIO;
1246 }
1247
1248 dbg_verbose("End of scan for boot blocks");
1249 return 0;
1250}
1251
1252static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1253{
1254 struct ms_boot_page *boot_block;
1255 struct scatterlist sg;
1256 u16 *buffer = NULL;
1257 int offset = 0;
1258 int i, error = 0;
1259 int data_size, data_offset, page, page_offset, size_to_read;
1260 u16 pba;
1261
1262 BUG_ON(block_nr > 1);
1263 boot_block = &msb->boot_page[block_nr];
1264 pba = msb->boot_block_locations[block_nr];
1265
1266 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1267 return -EINVAL;
1268
1269 data_size = boot_block->entry.disabled_block.data_size;
1270 data_offset = sizeof(struct ms_boot_page) +
1271 boot_block->entry.disabled_block.start_addr;
1272 if (!data_size)
1273 return 0;
1274
1275 page = data_offset / msb->page_size;
1276 page_offset = data_offset % msb->page_size;
1277 size_to_read =
1278 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1279 msb->page_size;
1280
1281 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1282 pba, data_offset, data_size);
1283
1284 buffer = kzalloc(size_to_read, GFP_KERNEL);
1285 if (!buffer)
1286 return -ENOMEM;
1287
1288 /* Read the buffer */
1289 sg_init_one(&sg, buffer, size_to_read);
1290
1291 while (offset < size_to_read) {
1292 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1293 if (error)
1294 goto out;
1295
1296 page++;
1297 offset += msb->page_size;
1298
1299 if (page == msb->pages_in_block) {
1300 pr_err(
1301 "bad block table extends beyond the boot block");
1302 break;
1303 }
1304 }
1305
1306 /* Process the bad block table */
1307 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1308
1309 u16 bad_block = be16_to_cpu(buffer[i]);
1310
1311 if (bad_block >= msb->block_count) {
1312 dbg("bad block table contains invalid block %d",
1313 bad_block);
1314 continue;
1315 }
1316
1317 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1318 dbg("duplicate bad block %d in the table",
1319 bad_block);
1320 continue;
1321 }
1322
1323 dbg("block %d is marked as factory bad", bad_block);
1324 msb_mark_block_used(msb, bad_block);
1325 }
1326out:
1327 kfree(buffer);
1328 return error;
1329}
1330
1331static int msb_ftl_initialize(struct msb_data *msb)
1332{
1333 int i;
1334
1335 if (msb->ftl_initialized)
1336 return 0;
1337
1338 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1339 msb->logical_block_count = msb->zone_count * 496 - 2;
1340
1341 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1342 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1343 msb->lba_to_pba_table =
1344 kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
1345
1346 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1347 !msb->erased_blocks_bitmap) {
1348 kfree(msb->used_blocks_bitmap);
1349 kfree(msb->lba_to_pba_table);
1350 kfree(msb->erased_blocks_bitmap);
1351 return -ENOMEM;
1352 }
1353
1354 for (i = 0; i < msb->zone_count; i++)
1355 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1356
1357 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1358 msb->logical_block_count * sizeof(u16));
1359
1360 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1361 msb->zone_count, msb->logical_block_count);
1362
1363 msb->ftl_initialized = true;
1364 return 0;
1365}
1366
1367static int msb_ftl_scan(struct msb_data *msb)
1368{
1369 u16 pba, lba, other_block;
1370 u8 overwrite_flag, management_flag, other_overwrite_flag;
1371 int error;
1372 struct ms_extra_data_register extra;
1373 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1374
1375 if (!overwrite_flags)
1376 return -ENOMEM;
1377
1378 dbg("Start of media scanning");
1379 for (pba = 0; pba < msb->block_count; pba++) {
1380
1381 if (pba == msb->boot_block_locations[0] ||
1382 pba == msb->boot_block_locations[1]) {
1383 dbg_verbose("pba %05d -> [boot block]", pba);
1384 msb_mark_block_used(msb, pba);
1385 continue;
1386 }
1387
1388 if (test_bit(pba, msb->used_blocks_bitmap)) {
1389 dbg_verbose("pba %05d -> [factory bad]", pba);
1390 continue;
1391 }
1392
1393 memset(&extra, 0, sizeof(extra));
1394 error = msb_read_oob(msb, pba, 0, &extra);
1395
1396 /* can't trust the page if we can't read the oob */
1397 if (error == -EBADMSG) {
1398 pr_notice(
1399 "oob of pba %d damaged, will try to erase it", pba);
1400 msb_mark_block_used(msb, pba);
1401 msb_erase_block(msb, pba);
1402 continue;
1403 } else if (error) {
1404 pr_err("unknown error %d on read of oob of pba %d - aborting",
1405 error, pba);
1406
1407 kfree(overwrite_flags);
1408 return error;
1409 }
1410
1411 lba = be16_to_cpu(extra.logical_address);
1412 management_flag = extra.management_flag;
1413 overwrite_flag = extra.overwrite_flag;
1414 overwrite_flags[pba] = overwrite_flag;
1415
1416 /* Skip bad blocks */
1417 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1418 dbg("pba %05d -> [BAD]", pba);
1419 msb_mark_block_used(msb, pba);
1420 continue;
1421 }
1422
1423 /* Skip system/drm blocks */
1424 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1425 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1426 dbg("pba %05d -> [reserved management flag %02x]",
1427 pba, management_flag);
1428 msb_mark_block_used(msb, pba);
1429 continue;
1430 }
1431
1432 /* Erase temporary tables */
1433 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1434 dbg("pba %05d -> [temp table] - will erase", pba);
1435
1436 msb_mark_block_used(msb, pba);
1437 msb_erase_block(msb, pba);
1438 continue;
1439 }
1440
1441 if (lba == MS_BLOCK_INVALID) {
1442 dbg_verbose("pba %05d -> [free]", pba);
1443 continue;
1444 }
1445
1446 msb_mark_block_used(msb, pba);
1447
1448 /* Block has LBA not according to zoning*/
1449 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1450 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1451 pba, lba);
1452 msb_erase_block(msb, pba);
1453 continue;
1454 }
1455
1456 /* No collisions - great */
1457 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1458 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1459 msb->lba_to_pba_table[lba] = pba;
1460 continue;
1461 }
1462
1463 other_block = msb->lba_to_pba_table[lba];
1464 other_overwrite_flag = overwrite_flags[other_block];
1465
1466 pr_notice("Collision between pba %d and pba %d",
1467 pba, other_block);
1468
1469 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1470 pr_notice("pba %d is marked as stable, use it", pba);
1471 msb_erase_block(msb, other_block);
1472 msb->lba_to_pba_table[lba] = pba;
1473 continue;
1474 }
1475
1476 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1477 pr_notice("pba %d is marked as stable, use it",
1478 other_block);
1479 msb_erase_block(msb, pba);
1480 continue;
1481 }
1482
1483 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1484 pba, other_block, other_block);
1485
1486 msb_erase_block(msb, other_block);
1487 msb->lba_to_pba_table[lba] = pba;
1488 }
1489
1490 dbg("End of media scanning");
1491 kfree(overwrite_flags);
1492 return 0;
1493}
1494
1495static void msb_cache_flush_timer(unsigned long data)
1496{
1497 struct msb_data *msb = (struct msb_data *)data;
1498 msb->need_flush_cache = true;
1499 queue_work(msb->io_queue, &msb->io_work);
1500}
1501
1502
1503static void msb_cache_discard(struct msb_data *msb)
1504{
1505 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1506 return;
1507
1508 del_timer_sync(&msb->cache_flush_timer);
1509
1510 dbg_verbose("Discarding the write cache");
1511 msb->cache_block_lba = MS_BLOCK_INVALID;
1512 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1513}
1514
1515static int msb_cache_init(struct msb_data *msb)
1516{
1517 setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
1518 (unsigned long)msb);
1519
1520 if (!msb->cache)
1521 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1522 if (!msb->cache)
1523 return -ENOMEM;
1524
1525 msb_cache_discard(msb);
1526 return 0;
1527}
1528
1529static int msb_cache_flush(struct msb_data *msb)
1530{
1531 struct scatterlist sg;
1532 struct ms_extra_data_register extra;
1533 int page, offset, error;
1534 u16 pba, lba;
1535
1536 if (msb->read_only)
1537 return -EROFS;
1538
1539 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1540 return 0;
1541
1542 lba = msb->cache_block_lba;
1543 pba = msb->lba_to_pba_table[lba];
1544
1545 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1546 pba, msb->cache_block_lba);
1547
1548 sg_init_one(&sg, msb->cache , msb->block_size);
1549
1550 /* Read all missing pages in cache */
1551 for (page = 0; page < msb->pages_in_block; page++) {
1552
1553 if (test_bit(page, &msb->valid_cache_bitmap))
1554 continue;
1555
1556 offset = page * msb->page_size;
1557
1558 dbg_verbose("reading non-present sector %d of cache block %d",
1559 page, lba);
1560 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1561
1562 /* Bad pages are copied with 00 page status */
1563 if (error == -EBADMSG) {
1564 pr_err("read error on sector %d, contents probably damaged", page);
1565 continue;
1566 }
1567
1568 if (error)
1569 return error;
1570
1571 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1572 MEMSTICK_OV_PG_NORMAL) {
1573 dbg("page %d is marked as bad", page);
1574 continue;
1575 }
1576
1577 set_bit(page, &msb->valid_cache_bitmap);
1578 }
1579
1580 /* Write the cache now */
1581 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1582 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1583
1584 /* Mark invalid pages */
1585 if (!error) {
1586 for (page = 0; page < msb->pages_in_block; page++) {
1587
1588 if (test_bit(page, &msb->valid_cache_bitmap))
1589 continue;
1590
1591 dbg("marking page %d as containing damaged data",
1592 page);
1593 msb_set_overwrite_flag(msb,
1594 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1595 }
1596 }
1597
1598 msb_cache_discard(msb);
1599 return error;
1600}
1601
1602static int msb_cache_write(struct msb_data *msb, int lba,
1603 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1604{
1605 int error;
1606 struct scatterlist sg_tmp[10];
1607
1608 if (msb->read_only)
1609 return -EROFS;
1610
1611 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1612 lba != msb->cache_block_lba)
1613 if (add_to_cache_only)
1614 return 0;
1615
1616 /* If we need to write different block */
1617 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1618 lba != msb->cache_block_lba) {
1619 dbg_verbose("first flush the cache");
1620 error = msb_cache_flush(msb);
1621 if (error)
1622 return error;
1623 }
1624
1625 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1626 msb->cache_block_lba = lba;
1627 mod_timer(&msb->cache_flush_timer,
1628 jiffies + msecs_to_jiffies(cache_flush_timeout));
1629 }
1630
1631 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1632
1633 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1634 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1635
1636 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1637 msb->cache + page * msb->page_size, msb->page_size);
1638
1639 set_bit(page, &msb->valid_cache_bitmap);
1640 return 0;
1641}
1642
1643static int msb_cache_read(struct msb_data *msb, int lba,
1644 int page, struct scatterlist *sg, int offset)
1645{
1646 int pba = msb->lba_to_pba_table[lba];
1647 struct scatterlist sg_tmp[10];
1648 int error = 0;
1649
1650 if (lba == msb->cache_block_lba &&
1651 test_bit(page, &msb->valid_cache_bitmap)) {
1652
1653 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1654 lba, pba, page);
1655
1656 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1657 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1658 offset, msb->page_size);
1659 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1660 msb->cache + msb->page_size * page,
1661 msb->page_size);
1662 } else {
1663 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1664 lba, pba, page);
1665
1666 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1667 if (error)
1668 return error;
1669
1670 msb_cache_write(msb, lba, page, true, sg, offset);
1671 }
1672 return error;
1673}
1674
1675/* Emulated geometry table
1676 * This table content isn't that importaint,
1677 * One could put here different values, providing that they still
1678 * cover whole disk.
1679 * 64 MB entry is what windows reports for my 64M memstick */
1680
1681static const struct chs_entry chs_table[] = {
1682/* size sectors cylynders heads */
1683 { 4, 16, 247, 2 },
1684 { 8, 16, 495, 2 },
1685 { 16, 16, 495, 4 },
1686 { 32, 16, 991, 4 },
1687 { 64, 16, 991, 8 },
1688 {128, 16, 991, 16 },
1689 { 0 }
1690};
1691
1692/* Load information about the card */
1693static int msb_init_card(struct memstick_dev *card)
1694{
1695 struct msb_data *msb = memstick_get_drvdata(card);
1696 struct memstick_host *host = card->host;
1697 struct ms_boot_page *boot_block;
1698 int error = 0, i, raw_size_in_megs;
1699
1700 msb->caps = 0;
1701
1702 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1703 card->id.class <= MEMSTICK_CLASS_ROM)
1704 msb->read_only = true;
1705
1706 msb->state = -1;
1707 error = msb_reset(msb, false);
1708 if (error)
1709 return error;
1710
1711 /* Due to a bug in Jmicron driver written by Alex Dubov,
1712 its serial mode barely works,
1713 so we switch to parallel mode right away */
1714 if (host->caps & MEMSTICK_CAP_PAR4)
1715 msb_switch_to_parallel(msb);
1716
1717 msb->page_size = sizeof(struct ms_boot_page);
1718
1719 /* Read the boot page */
1720 error = msb_read_boot_blocks(msb);
1721 if (error)
1722 return -EIO;
1723
1724 boot_block = &msb->boot_page[0];
1725
1726 /* Save intersting attributes from boot page */
1727 msb->block_count = boot_block->attr.number_of_blocks;
1728 msb->page_size = boot_block->attr.page_size;
1729
1730 msb->pages_in_block = boot_block->attr.block_size * 2;
1731 msb->block_size = msb->page_size * msb->pages_in_block;
1732
1733 if (msb->page_size > PAGE_SIZE) {
1734 /* this isn't supported by linux at all, anyway*/
1735 dbg("device page %d size isn't supported", msb->page_size);
1736 return -EINVAL;
1737 }
1738
1739 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1740 if (!msb->block_buffer)
1741 return -ENOMEM;
1742
1743 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1744
1745 for (i = 0; chs_table[i].size; i++) {
1746
1747 if (chs_table[i].size != raw_size_in_megs)
1748 continue;
1749
1750 msb->geometry.cylinders = chs_table[i].cyl;
1751 msb->geometry.heads = chs_table[i].head;
1752 msb->geometry.sectors = chs_table[i].sec;
1753 break;
1754 }
1755
1756 if (boot_block->attr.transfer_supporting == 1)
1757 msb->caps |= MEMSTICK_CAP_PAR4;
1758
1759 if (boot_block->attr.device_type & 0x03)
1760 msb->read_only = true;
1761
1762 dbg("Total block count = %d", msb->block_count);
1763 dbg("Each block consists of %d pages", msb->pages_in_block);
1764 dbg("Page size = %d bytes", msb->page_size);
1765 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1766 dbg("Read only: %d", msb->read_only);
1767
1768#if 0
1769 /* Now we can switch the interface */
1770 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1771 msb_switch_to_parallel(msb);
1772#endif
1773
1774 error = msb_cache_init(msb);
1775 if (error)
1776 return error;
1777
1778 error = msb_ftl_initialize(msb);
1779 if (error)
1780 return error;
1781
1782
1783 /* Read the bad block table */
1784 error = msb_read_bad_block_table(msb, 0);
1785
1786 if (error && error != -ENOMEM) {
1787 dbg("failed to read bad block table from primary boot block, trying from backup");
1788 error = msb_read_bad_block_table(msb, 1);
1789 }
1790
1791 if (error)
1792 return error;
1793
1794 /* *drum roll* Scan the media */
1795 error = msb_ftl_scan(msb);
1796 if (error) {
1797 pr_err("Scan of media failed");
1798 return error;
1799 }
1800
1801 return 0;
1802
1803}
1804
1805static int msb_do_write_request(struct msb_data *msb, int lba,
1806 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1807{
1808 int error = 0;
1809 off_t offset = 0;
1810 *sucessfuly_written = 0;
1811
1812 while (offset < len) {
1813 if (page == 0 && len - offset >= msb->block_size) {
1814
1815 if (msb->cache_block_lba == lba)
1816 msb_cache_discard(msb);
1817
1818 dbg_verbose("Writing whole lba %d", lba);
1819 error = msb_update_block(msb, lba, sg, offset);
1820 if (error)
1821 return error;
1822
1823 offset += msb->block_size;
1824 *sucessfuly_written += msb->block_size;
1825 lba++;
1826 continue;
1827 }
1828
1829 error = msb_cache_write(msb, lba, page, false, sg, offset);
1830 if (error)
1831 return error;
1832
1833 offset += msb->page_size;
1834 *sucessfuly_written += msb->page_size;
1835
1836 page++;
1837 if (page == msb->pages_in_block) {
1838 page = 0;
1839 lba++;
1840 }
1841 }
1842 return 0;
1843}
1844
1845static int msb_do_read_request(struct msb_data *msb, int lba,
1846 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1847{
1848 int error = 0;
1849 int offset = 0;
1850 *sucessfuly_read = 0;
1851
1852 while (offset < len) {
1853
1854 error = msb_cache_read(msb, lba, page, sg, offset);
1855 if (error)
1856 return error;
1857
1858 offset += msb->page_size;
1859 *sucessfuly_read += msb->page_size;
1860
1861 page++;
1862 if (page == msb->pages_in_block) {
1863 page = 0;
1864 lba++;
1865 }
1866 }
1867 return 0;
1868}
1869
1870static void msb_io_work(struct work_struct *work)
1871{
1872 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1873 int page, error, len;
1874 sector_t lba;
1875 unsigned long flags;
1876 struct scatterlist *sg = msb->prealloc_sg;
1877
1878 dbg_verbose("IO: work started");
1879
1880 while (1) {
1881 spin_lock_irqsave(&msb->q_lock, flags);
1882
1883 if (msb->need_flush_cache) {
1884 msb->need_flush_cache = false;
1885 spin_unlock_irqrestore(&msb->q_lock, flags);
1886 msb_cache_flush(msb);
1887 continue;
1888 }
1889
1890 if (!msb->req) {
1891 msb->req = blk_fetch_request(msb->queue);
1892 if (!msb->req) {
1893 dbg_verbose("IO: no more requests exiting");
1894 spin_unlock_irqrestore(&msb->q_lock, flags);
1895 return;
1896 }
1897 }
1898
1899 spin_unlock_irqrestore(&msb->q_lock, flags);
1900
1901 /* If card was removed meanwhile */
1902 if (!msb->req)
1903 return;
1904
1905 /* process the request */
1906 dbg_verbose("IO: processing new request");
1907 blk_rq_map_sg(msb->queue, msb->req, sg);
1908
1909 lba = blk_rq_pos(msb->req);
1910
1911 sector_div(lba, msb->page_size / 512);
1912 page = sector_div(lba, msb->pages_in_block);
1913
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(msb->req), &len);
1917 else
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(msb->req), &len);
1920
1921 spin_lock_irqsave(&msb->q_lock, flags);
1922
1923 if (len)
1924 if (!__blk_end_request(msb->req, 0, len))
1925 msb->req = NULL;
1926
1927 if (error && msb->req) {
1928 dbg_verbose("IO: ending one sector of the request with error");
1929 if (!__blk_end_request(msb->req, error, msb->page_size))
1930 msb->req = NULL;
1931 }
1932
1933 if (msb->req)
1934 dbg_verbose("IO: request still pending");
1935
1936 spin_unlock_irqrestore(&msb->q_lock, flags);
1937 }
1938}
1939
1940static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1941static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1942
1943static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1944{
1945 struct gendisk *disk = bdev->bd_disk;
1946 struct msb_data *msb = disk->private_data;
1947
1948 dbg_verbose("block device open");
1949
1950 mutex_lock(&msb_disk_lock);
1951
1952 if (msb && msb->card)
1953 msb->usage_count++;
1954
1955 mutex_unlock(&msb_disk_lock);
1956 return 0;
1957}
1958
1959static void msb_data_clear(struct msb_data *msb)
1960{
1961 kfree(msb->boot_page);
1962 kfree(msb->used_blocks_bitmap);
1963 kfree(msb->lba_to_pba_table);
1964 kfree(msb->cache);
1965 msb->card = NULL;
1966}
1967
1968static int msb_disk_release(struct gendisk *disk)
1969{
1970 struct msb_data *msb = disk->private_data;
1971
1972 dbg_verbose("block device release");
1973 mutex_lock(&msb_disk_lock);
1974
1975 if (msb) {
1976 if (msb->usage_count)
1977 msb->usage_count--;
1978
1979 if (!msb->usage_count) {
1980 disk->private_data = NULL;
1981 idr_remove(&msb_disk_idr, msb->disk_id);
1982 put_disk(disk);
1983 kfree(msb);
1984 }
1985 }
1986 mutex_unlock(&msb_disk_lock);
1987 return 0;
1988}
1989
1990static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1991{
1992 msb_disk_release(disk);
1993}
1994
1995static int msb_bd_getgeo(struct block_device *bdev,
1996 struct hd_geometry *geo)
1997{
1998 struct msb_data *msb = bdev->bd_disk->private_data;
1999 *geo = msb->geometry;
2000 return 0;
2001}
2002
2003static int msb_prepare_req(struct request_queue *q, struct request *req)
2004{
2005 if (req->cmd_type != REQ_TYPE_FS) {
2006 blk_dump_rq_flags(req, "MS unsupported request");
2007 return BLKPREP_KILL;
2008 }
2009 req->rq_flags |= RQF_DONTPREP;
2010 return BLKPREP_OK;
2011}
2012
2013static void msb_submit_req(struct request_queue *q)
2014{
2015 struct memstick_dev *card = q->queuedata;
2016 struct msb_data *msb = memstick_get_drvdata(card);
2017 struct request *req = NULL;
2018
2019 dbg_verbose("Submit request");
2020
2021 if (msb->card_dead) {
2022 dbg("Refusing requests on removed card");
2023
2024 WARN_ON(!msb->io_queue_stopped);
2025
2026 while ((req = blk_fetch_request(q)) != NULL)
2027 __blk_end_request_all(req, -ENODEV);
2028 return;
2029 }
2030
2031 if (msb->req)
2032 return;
2033
2034 if (!msb->io_queue_stopped)
2035 queue_work(msb->io_queue, &msb->io_work);
2036}
2037
2038static int msb_check_card(struct memstick_dev *card)
2039{
2040 struct msb_data *msb = memstick_get_drvdata(card);
2041 return (msb->card_dead == 0);
2042}
2043
2044static void msb_stop(struct memstick_dev *card)
2045{
2046 struct msb_data *msb = memstick_get_drvdata(card);
2047 unsigned long flags;
2048
2049 dbg("Stopping all msblock IO");
2050
2051 spin_lock_irqsave(&msb->q_lock, flags);
2052 blk_stop_queue(msb->queue);
2053 msb->io_queue_stopped = true;
2054 spin_unlock_irqrestore(&msb->q_lock, flags);
2055
2056 del_timer_sync(&msb->cache_flush_timer);
2057 flush_workqueue(msb->io_queue);
2058
2059 if (msb->req) {
2060 spin_lock_irqsave(&msb->q_lock, flags);
2061 blk_requeue_request(msb->queue, msb->req);
2062 msb->req = NULL;
2063 spin_unlock_irqrestore(&msb->q_lock, flags);
2064 }
2065
2066}
2067
2068static void msb_start(struct memstick_dev *card)
2069{
2070 struct msb_data *msb = memstick_get_drvdata(card);
2071 unsigned long flags;
2072
2073 dbg("Resuming IO from msblock");
2074
2075 msb_invalidate_reg_window(msb);
2076
2077 spin_lock_irqsave(&msb->q_lock, flags);
2078 if (!msb->io_queue_stopped || msb->card_dead) {
2079 spin_unlock_irqrestore(&msb->q_lock, flags);
2080 return;
2081 }
2082 spin_unlock_irqrestore(&msb->q_lock, flags);
2083
2084 /* Kick cache flush anyway, its harmless */
2085 msb->need_flush_cache = true;
2086 msb->io_queue_stopped = false;
2087
2088 spin_lock_irqsave(&msb->q_lock, flags);
2089 blk_start_queue(msb->queue);
2090 spin_unlock_irqrestore(&msb->q_lock, flags);
2091
2092 queue_work(msb->io_queue, &msb->io_work);
2093
2094}
2095
2096static const struct block_device_operations msb_bdops = {
2097 .open = msb_bd_open,
2098 .release = msb_bd_release,
2099 .getgeo = msb_bd_getgeo,
2100 .owner = THIS_MODULE
2101};
2102
2103/* Registers the block device */
2104static int msb_init_disk(struct memstick_dev *card)
2105{
2106 struct msb_data *msb = memstick_get_drvdata(card);
2107 struct memstick_host *host = card->host;
2108 int rc;
2109 u64 limit = BLK_BOUNCE_HIGH;
2110 unsigned long capacity;
2111
2112 if (host->dev.dma_mask && *(host->dev.dma_mask))
2113 limit = *(host->dev.dma_mask);
2114
2115 mutex_lock(&msb_disk_lock);
2116 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2117 mutex_unlock(&msb_disk_lock);
2118
2119 if (msb->disk_id < 0)
2120 return msb->disk_id;
2121
2122 msb->disk = alloc_disk(0);
2123 if (!msb->disk) {
2124 rc = -ENOMEM;
2125 goto out_release_id;
2126 }
2127
2128 msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2129 if (!msb->queue) {
2130 rc = -ENOMEM;
2131 goto out_put_disk;
2132 }
2133
2134 msb->queue->queuedata = card;
2135 blk_queue_prep_rq(msb->queue, msb_prepare_req);
2136
2137 blk_queue_bounce_limit(msb->queue, limit);
2138 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2139 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2140 blk_queue_max_segment_size(msb->queue,
2141 MS_BLOCK_MAX_PAGES * msb->page_size);
2142 blk_queue_logical_block_size(msb->queue, msb->page_size);
2143
2144 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2145 msb->disk->fops = &msb_bdops;
2146 msb->disk->private_data = msb;
2147 msb->disk->queue = msb->queue;
2148 msb->disk->flags |= GENHD_FL_EXT_DEVT;
2149
2150 capacity = msb->pages_in_block * msb->logical_block_count;
2151 capacity *= (msb->page_size / 512);
2152 set_capacity(msb->disk, capacity);
2153 dbg("Set total disk size to %lu sectors", capacity);
2154
2155 msb->usage_count = 1;
2156 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2157 INIT_WORK(&msb->io_work, msb_io_work);
2158 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2159
2160 if (msb->read_only)
2161 set_disk_ro(msb->disk, 1);
2162
2163 msb_start(card);
2164 device_add_disk(&card->dev, msb->disk);
2165 dbg("Disk added");
2166 return 0;
2167
2168out_put_disk:
2169 put_disk(msb->disk);
2170out_release_id:
2171 mutex_lock(&msb_disk_lock);
2172 idr_remove(&msb_disk_idr, msb->disk_id);
2173 mutex_unlock(&msb_disk_lock);
2174 return rc;
2175}
2176
2177static int msb_probe(struct memstick_dev *card)
2178{
2179 struct msb_data *msb;
2180 int rc = 0;
2181
2182 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2183 if (!msb)
2184 return -ENOMEM;
2185 memstick_set_drvdata(card, msb);
2186 msb->card = card;
2187 spin_lock_init(&msb->q_lock);
2188
2189 rc = msb_init_card(card);
2190 if (rc)
2191 goto out_free;
2192
2193 rc = msb_init_disk(card);
2194 if (!rc) {
2195 card->check = msb_check_card;
2196 card->stop = msb_stop;
2197 card->start = msb_start;
2198 return 0;
2199 }
2200out_free:
2201 memstick_set_drvdata(card, NULL);
2202 msb_data_clear(msb);
2203 kfree(msb);
2204 return rc;
2205}
2206
2207static void msb_remove(struct memstick_dev *card)
2208{
2209 struct msb_data *msb = memstick_get_drvdata(card);
2210 unsigned long flags;
2211
2212 if (!msb->io_queue_stopped)
2213 msb_stop(card);
2214
2215 dbg("Removing the disk device");
2216
2217 /* Take care of unhandled + new requests from now on */
2218 spin_lock_irqsave(&msb->q_lock, flags);
2219 msb->card_dead = true;
2220 blk_start_queue(msb->queue);
2221 spin_unlock_irqrestore(&msb->q_lock, flags);
2222
2223 /* Remove the disk */
2224 del_gendisk(msb->disk);
2225 blk_cleanup_queue(msb->queue);
2226 msb->queue = NULL;
2227
2228 mutex_lock(&msb_disk_lock);
2229 msb_data_clear(msb);
2230 mutex_unlock(&msb_disk_lock);
2231
2232 msb_disk_release(msb->disk);
2233 memstick_set_drvdata(card, NULL);
2234}
2235
2236#ifdef CONFIG_PM
2237
2238static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2239{
2240 msb_stop(card);
2241 return 0;
2242}
2243
2244static int msb_resume(struct memstick_dev *card)
2245{
2246 struct msb_data *msb = memstick_get_drvdata(card);
2247 struct msb_data *new_msb = NULL;
2248 bool card_dead = true;
2249
2250#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2251 msb->card_dead = true;
2252 return 0;
2253#endif
2254 mutex_lock(&card->host->lock);
2255
2256 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2257 if (!new_msb)
2258 goto out;
2259
2260 new_msb->card = card;
2261 memstick_set_drvdata(card, new_msb);
2262 spin_lock_init(&new_msb->q_lock);
2263 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2264
2265 if (msb_init_card(card))
2266 goto out;
2267
2268 if (msb->block_size != new_msb->block_size)
2269 goto out;
2270
2271 if (memcmp(msb->boot_page, new_msb->boot_page,
2272 sizeof(struct ms_boot_page)))
2273 goto out;
2274
2275 if (msb->logical_block_count != new_msb->logical_block_count ||
2276 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2277 msb->logical_block_count))
2278 goto out;
2279
2280 if (msb->block_count != new_msb->block_count ||
2281 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2282 msb->block_count / 8))
2283 goto out;
2284
2285 card_dead = false;
2286out:
2287 if (card_dead)
2288 dbg("Card was removed/replaced during suspend");
2289
2290 msb->card_dead = card_dead;
2291 memstick_set_drvdata(card, msb);
2292
2293 if (new_msb) {
2294 msb_data_clear(new_msb);
2295 kfree(new_msb);
2296 }
2297
2298 msb_start(card);
2299 mutex_unlock(&card->host->lock);
2300 return 0;
2301}
2302#else
2303
2304#define msb_suspend NULL
2305#define msb_resume NULL
2306
2307#endif /* CONFIG_PM */
2308
2309static struct memstick_device_id msb_id_tbl[] = {
2310 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2311 MEMSTICK_CLASS_FLASH},
2312
2313 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2314 MEMSTICK_CLASS_ROM},
2315
2316 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2317 MEMSTICK_CLASS_RO},
2318
2319 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2320 MEMSTICK_CLASS_WP},
2321
2322 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2323 MEMSTICK_CLASS_DUO},
2324 {}
2325};
2326MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2327
2328
2329static struct memstick_driver msb_driver = {
2330 .driver = {
2331 .name = DRIVER_NAME,
2332 .owner = THIS_MODULE
2333 },
2334 .id_table = msb_id_tbl,
2335 .probe = msb_probe,
2336 .remove = msb_remove,
2337 .suspend = msb_suspend,
2338 .resume = msb_resume
2339};
2340
2341static int __init msb_init(void)
2342{
2343 int rc = memstick_register_driver(&msb_driver);
2344 if (rc)
2345 pr_err("failed to register memstick driver (error %d)\n", rc);
2346
2347 return rc;
2348}
2349
2350static void __exit msb_exit(void)
2351{
2352 memstick_unregister_driver(&msb_driver);
2353 idr_destroy(&msb_disk_idr);
2354}
2355
2356module_init(msb_init);
2357module_exit(msb_exit);
2358
2359module_param(cache_flush_timeout, int, S_IRUGO);
2360MODULE_PARM_DESC(cache_flush_timeout,
2361 "Cache flush timeout in msec (1000 default)");
2362module_param(debug, int, S_IRUGO | S_IWUSR);
2363MODULE_PARM_DESC(debug, "Debug level (0-2)");
2364
2365module_param(verify_writes, bool, S_IRUGO);
2366MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2367
2368MODULE_LICENSE("GPL");
2369MODULE_AUTHOR("Maxim Levitsky");
2370MODULE_DESCRIPTION("Sony MemoryStick block device driver");