Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6 *
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/pci.h>
18#include <linux/raid_class.h>
19#include <asm/unaligned.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_host.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_tcq.h>
25#include "myrb.h"
26
27static struct raid_template *myrb_raid_template;
28
29static void myrb_monitor(struct work_struct *work);
30static inline void myrb_translate_devstate(void *DeviceState);
31
32static inline int myrb_logical_channel(struct Scsi_Host *shost)
33{
34 return shost->max_channel - 1;
35}
36
37static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
39 const char *name;
40} myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
47};
48
49static const char *myrb_devstate_name(enum myrb_devstate state)
50{
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 int i;
53
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
56 return entry[i].name;
57 }
58 return "Unknown";
59}
60
61static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
63 const char *name;
64} myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
71};
72
73static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74{
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 int i;
77
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
80 return entry[i].name;
81 }
82 return NULL;
83}
84
85/*
86 * myrb_create_mempools - allocates auxiliary data structures
87 *
88 * Return: true on success, false otherwise.
89 */
90static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91{
92 size_t elem_size, elem_align;
93
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
101 return false;
102 }
103
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
109 cb->sg_pool = NULL;
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
112 return false;
113 }
114
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 if (!cb->work_q) {
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
122 cb->sg_pool = NULL;
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
125 return false;
126 }
127
128 /*
129 * Initialize the Monitoring Timer.
130 */
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133
134 return true;
135}
136
137/*
138 * myrb_destroy_mempools - tears down the memory pools for the controller
139 */
140static void myrb_destroy_mempools(struct myrb_hba *cb)
141{
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
144
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
147}
148
149/*
150 * myrb_reset_cmd - reset command block
151 */
152static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153{
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 cmd_blk->status = 0;
158}
159
160/*
161 * myrb_qcmd - queues command block for execution
162 */
163static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164{
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
178}
179
180/*
181 * myrb_exec_cmd - executes command block and waits for completion.
182 *
183 * Return: command status
184 */
185static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
187{
188 DECLARE_COMPLETION_ONSTACK(cmpl);
189 unsigned long flags;
190
191 cmd_blk->completion = &cmpl;
192
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
196
197 wait_for_completion(&cmpl);
198 return cmd_blk->status;
199}
200
201/*
202 * myrb_exec_type3 - executes a type 3 command and waits for completion.
203 *
204 * Return: command status
205 */
206static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 enum myrb_cmd_opcode op, dma_addr_t addr)
208{
209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 unsigned short status;
212
213 mutex_lock(&cb->dcmd_mutex);
214 myrb_reset_cmd(cmd_blk);
215 mbox->type3.id = MYRB_DCMD_TAG;
216 mbox->type3.opcode = op;
217 mbox->type3.addr = addr;
218 status = myrb_exec_cmd(cb, cmd_blk);
219 mutex_unlock(&cb->dcmd_mutex);
220 return status;
221}
222
223/*
224 * myrb_exec_type3D - executes a type 3D command and waits for completion.
225 *
226 * Return: command status
227 */
228static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 struct myrb_pdev_state *pdev_info)
231{
232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 unsigned short status;
235 dma_addr_t pdev_info_addr;
236
237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 sizeof(struct myrb_pdev_state),
239 DMA_FROM_DEVICE);
240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 return MYRB_STATUS_SUBSYS_FAILED;
242
243 mutex_lock(&cb->dcmd_mutex);
244 myrb_reset_cmd(cmd_blk);
245 mbox->type3D.id = MYRB_DCMD_TAG;
246 mbox->type3D.opcode = op;
247 mbox->type3D.channel = sdev->channel;
248 mbox->type3D.target = sdev->id;
249 mbox->type3D.addr = pdev_info_addr;
250 status = myrb_exec_cmd(cb, cmd_blk);
251 mutex_unlock(&cb->dcmd_mutex);
252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 if (status == MYRB_STATUS_SUCCESS &&
255 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 myrb_translate_devstate(pdev_info);
257
258 return status;
259}
260
261static char *myrb_event_msg[] = {
262 "killed because write recovery failed",
263 "killed because of SCSI bus reset failure",
264 "killed because of double check condition",
265 "killed because it was removed",
266 "killed because of gross error on SCSI chip",
267 "killed because of bad tag returned from drive",
268 "killed because of timeout on SCSI command",
269 "killed because of reset SCSI command issued from system",
270 "killed because busy or parity error count exceeded limit",
271 "killed because of 'kill drive' command from system",
272 "killed because of selection timeout",
273 "killed due to SCSI phase sequence error",
274 "killed due to unknown status",
275};
276
277/**
278 * myrb_get_event - get event log from HBA
279 * @cb: pointer to the hba structure
280 * @event: number of the event
281 *
282 * Execute a type 3E command and logs the event message
283 */
284static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
285{
286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 struct myrb_log_entry *ev_buf;
289 dma_addr_t ev_addr;
290 unsigned short status;
291
292 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 sizeof(struct myrb_log_entry),
294 &ev_addr, GFP_KERNEL);
295 if (!ev_buf)
296 return;
297
298 myrb_reset_cmd(cmd_blk);
299 mbox->type3E.id = MYRB_MCMD_TAG;
300 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 mbox->type3E.opqual = 1;
303 mbox->type3E.ev_seq = event;
304 mbox->type3E.addr = ev_addr;
305 status = myrb_exec_cmd(cb, cmd_blk);
306 if (status != MYRB_STATUS_SUCCESS)
307 shost_printk(KERN_INFO, cb->host,
308 "Failed to get event log %d, status %04x\n",
309 event, status);
310
311 else if (ev_buf->seq_num == event) {
312 struct scsi_sense_hdr sshdr;
313
314 memset(&sshdr, 0, sizeof(sshdr));
315 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
316
317 if (sshdr.sense_key == VENDOR_SPECIFIC &&
318 sshdr.asc == 0x80 &&
319 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 shost_printk(KERN_CRIT, cb->host,
321 "Physical drive %d:%d: %s\n",
322 ev_buf->channel, ev_buf->target,
323 myrb_event_msg[sshdr.ascq]);
324 else
325 shost_printk(KERN_CRIT, cb->host,
326 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 ev_buf->channel, ev_buf->target,
328 sshdr.sense_key, sshdr.asc, sshdr.ascq);
329 }
330
331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
332 ev_buf, ev_addr);
333}
334
335/*
336 * myrb_get_errtable - retrieves the error table from the controller
337 *
338 * Executes a type 3 command and logs the error table from the controller.
339 */
340static void myrb_get_errtable(struct myrb_hba *cb)
341{
342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 unsigned short status;
345 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
346
347 memcpy(&old_table, cb->err_table, sizeof(old_table));
348
349 myrb_reset_cmd(cmd_blk);
350 mbox->type3.id = MYRB_MCMD_TAG;
351 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 mbox->type3.addr = cb->err_table_addr;
353 status = myrb_exec_cmd(cb, cmd_blk);
354 if (status == MYRB_STATUS_SUCCESS) {
355 struct myrb_error_entry *table = cb->err_table;
356 struct myrb_error_entry *new, *old;
357 size_t err_table_offset;
358 struct scsi_device *sdev;
359
360 shost_for_each_device(sdev, cb->host) {
361 if (sdev->channel >= myrb_logical_channel(cb->host))
362 continue;
363 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
364 + sdev->id;
365 new = table + err_table_offset;
366 old = &old_table[err_table_offset];
367 if (new->parity_err == old->parity_err &&
368 new->soft_err == old->soft_err &&
369 new->hard_err == old->hard_err &&
370 new->misc_err == old->misc_err)
371 continue;
372 sdev_printk(KERN_CRIT, sdev,
373 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 new->parity_err, new->soft_err,
375 new->hard_err, new->misc_err);
376 }
377 }
378}
379
380/*
381 * myrb_get_ldev_info - retrieves the logical device table from the controller
382 *
383 * Executes a type 3 command and updates the logical device table.
384 *
385 * Return: command status
386 */
387static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
388{
389 unsigned short status;
390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 struct Scsi_Host *shost = cb->host;
392
393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394 cb->ldev_info_addr);
395 if (status != MYRB_STATUS_SUCCESS)
396 return status;
397
398 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 struct myrb_ldev_info *old = NULL;
400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 struct scsi_device *sdev;
402
403 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
404 ldev_num, 0);
405 if (!sdev) {
406 if (new->state == MYRB_DEVICE_OFFLINE)
407 continue;
408 shost_printk(KERN_INFO, shost,
409 "Adding Logical Drive %d in state %s\n",
410 ldev_num, myrb_devstate_name(new->state));
411 scsi_add_device(shost, myrb_logical_channel(shost),
412 ldev_num, 0);
413 continue;
414 }
415 old = sdev->hostdata;
416 if (new->state != old->state)
417 shost_printk(KERN_INFO, shost,
418 "Logical Drive %d is now %s\n",
419 ldev_num, myrb_devstate_name(new->state));
420 if (new->wb_enabled != old->wb_enabled)
421 sdev_printk(KERN_INFO, sdev,
422 "Logical Drive is now WRITE %s\n",
423 (new->wb_enabled ? "BACK" : "THRU"));
424 memcpy(old, new, sizeof(*new));
425 scsi_device_put(sdev);
426 }
427 return status;
428}
429
430/*
431 * myrb_get_rbld_progress - get rebuild progress information
432 *
433 * Executes a type 3 command and returns the rebuild progress
434 * information.
435 *
436 * Return: command status
437 */
438static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 struct myrb_rbld_progress *rbld)
440{
441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 struct myrb_rbld_progress *rbld_buf;
444 dma_addr_t rbld_addr;
445 unsigned short status;
446
447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 sizeof(struct myrb_rbld_progress),
449 &rbld_addr, GFP_KERNEL);
450 if (!rbld_buf)
451 return MYRB_STATUS_RBLD_NOT_CHECKED;
452
453 myrb_reset_cmd(cmd_blk);
454 mbox->type3.id = MYRB_MCMD_TAG;
455 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 mbox->type3.addr = rbld_addr;
457 status = myrb_exec_cmd(cb, cmd_blk);
458 if (rbld)
459 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 rbld_buf, rbld_addr);
462 return status;
463}
464
465/*
466 * myrb_update_rbld_progress - updates the rebuild status
467 *
468 * Updates the rebuild status for the attached logical devices.
469 */
470static void myrb_update_rbld_progress(struct myrb_hba *cb)
471{
472 struct myrb_rbld_progress rbld_buf;
473 unsigned short status;
474
475 status = myrb_get_rbld_progress(cb, &rbld_buf);
476 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 status = MYRB_STATUS_RBLD_SUCCESS;
479 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 unsigned int blocks_done =
481 rbld_buf.ldev_size - rbld_buf.blocks_left;
482 struct scsi_device *sdev;
483
484 sdev = scsi_device_lookup(cb->host,
485 myrb_logical_channel(cb->host),
486 rbld_buf.ldev_num, 0);
487 if (!sdev)
488 return;
489
490 switch (status) {
491 case MYRB_STATUS_SUCCESS:
492 sdev_printk(KERN_INFO, sdev,
493 "Rebuild in Progress, %d%% completed\n",
494 (100 * (blocks_done >> 7))
495 / (rbld_buf.ldev_size >> 7));
496 break;
497 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 sdev_printk(KERN_INFO, sdev,
499 "Rebuild Failed due to Logical Drive Failure\n");
500 break;
501 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 sdev_printk(KERN_INFO, sdev,
503 "Rebuild Failed due to Bad Blocks on Other Drives\n");
504 break;
505 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 sdev_printk(KERN_INFO, sdev,
507 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
508 break;
509 case MYRB_STATUS_RBLD_SUCCESS:
510 sdev_printk(KERN_INFO, sdev,
511 "Rebuild Completed Successfully\n");
512 break;
513 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 sdev_printk(KERN_INFO, sdev,
515 "Rebuild Successfully Terminated\n");
516 break;
517 default:
518 break;
519 }
520 scsi_device_put(sdev);
521 }
522 cb->last_rbld_status = status;
523}
524
525/*
526 * myrb_get_cc_progress - retrieve the rebuild status
527 *
528 * Execute a type 3 Command and fetch the rebuild / consistency check
529 * status.
530 */
531static void myrb_get_cc_progress(struct myrb_hba *cb)
532{
533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 struct myrb_rbld_progress *rbld_buf;
536 dma_addr_t rbld_addr;
537 unsigned short status;
538
539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 sizeof(struct myrb_rbld_progress),
541 &rbld_addr, GFP_KERNEL);
542 if (!rbld_buf) {
543 cb->need_cc_status = true;
544 return;
545 }
546 myrb_reset_cmd(cmd_blk);
547 mbox->type3.id = MYRB_MCMD_TAG;
548 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 mbox->type3.addr = rbld_addr;
550 status = myrb_exec_cmd(cb, cmd_blk);
551 if (status == MYRB_STATUS_SUCCESS) {
552 unsigned int ldev_num = rbld_buf->ldev_num;
553 unsigned int ldev_size = rbld_buf->ldev_size;
554 unsigned int blocks_done =
555 ldev_size - rbld_buf->blocks_left;
556 struct scsi_device *sdev;
557
558 sdev = scsi_device_lookup(cb->host,
559 myrb_logical_channel(cb->host),
560 ldev_num, 0);
561 if (sdev) {
562 sdev_printk(KERN_INFO, sdev,
563 "Consistency Check in Progress: %d%% completed\n",
564 (100 * (blocks_done >> 7))
565 / (ldev_size >> 7));
566 scsi_device_put(sdev);
567 }
568 }
569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 rbld_buf, rbld_addr);
571}
572
573/*
574 * myrb_bgi_control - updates background initialisation status
575 *
576 * Executes a type 3B command and updates the background initialisation status
577 */
578static void myrb_bgi_control(struct myrb_hba *cb)
579{
580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 struct myrb_bgi_status *bgi, *last_bgi;
583 dma_addr_t bgi_addr;
584 struct scsi_device *sdev = NULL;
585 unsigned short status;
586
587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 &bgi_addr, GFP_KERNEL);
589 if (!bgi) {
590 shost_printk(KERN_ERR, cb->host,
591 "Failed to allocate bgi memory\n");
592 return;
593 }
594 myrb_reset_cmd(cmd_blk);
595 mbox->type3B.id = MYRB_DCMD_TAG;
596 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 mbox->type3B.optype = 0x20;
598 mbox->type3B.addr = bgi_addr;
599 status = myrb_exec_cmd(cb, cmd_blk);
600 last_bgi = &cb->bgi_status;
601 sdev = scsi_device_lookup(cb->host,
602 myrb_logical_channel(cb->host),
603 bgi->ldev_num, 0);
604 switch (status) {
605 case MYRB_STATUS_SUCCESS:
606 switch (bgi->status) {
607 case MYRB_BGI_INVALID:
608 break;
609 case MYRB_BGI_STARTED:
610 if (!sdev)
611 break;
612 sdev_printk(KERN_INFO, sdev,
613 "Background Initialization Started\n");
614 break;
615 case MYRB_BGI_INPROGRESS:
616 if (!sdev)
617 break;
618 if (bgi->blocks_done == last_bgi->blocks_done &&
619 bgi->ldev_num == last_bgi->ldev_num)
620 break;
621 sdev_printk(KERN_INFO, sdev,
622 "Background Initialization in Progress: %d%% completed\n",
623 (100 * (bgi->blocks_done >> 7))
624 / (bgi->ldev_size >> 7));
625 break;
626 case MYRB_BGI_SUSPENDED:
627 if (!sdev)
628 break;
629 sdev_printk(KERN_INFO, sdev,
630 "Background Initialization Suspended\n");
631 break;
632 case MYRB_BGI_CANCELLED:
633 if (!sdev)
634 break;
635 sdev_printk(KERN_INFO, sdev,
636 "Background Initialization Cancelled\n");
637 break;
638 }
639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
640 break;
641 case MYRB_STATUS_BGI_SUCCESS:
642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 sdev_printk(KERN_INFO, sdev,
644 "Background Initialization Completed Successfully\n");
645 cb->bgi_status.status = MYRB_BGI_INVALID;
646 break;
647 case MYRB_STATUS_BGI_ABORTED:
648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 sdev_printk(KERN_INFO, sdev,
650 "Background Initialization Aborted\n");
651 fallthrough;
652 case MYRB_STATUS_NO_BGI_INPROGRESS:
653 cb->bgi_status.status = MYRB_BGI_INVALID;
654 break;
655 }
656 if (sdev)
657 scsi_device_put(sdev);
658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
659 bgi, bgi_addr);
660}
661
662/*
663 * myrb_hba_enquiry - updates the controller status
664 *
665 * Executes a DAC_V1_Enquiry command and updates the controller status.
666 *
667 * Return: command status
668 */
669static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
670{
671 struct myrb_enquiry old, *new;
672 unsigned short status;
673
674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
675
676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 if (status != MYRB_STATUS_SUCCESS)
678 return status;
679
680 new = cb->enquiry;
681 if (new->ldev_count > old.ldev_count) {
682 int ldev_num = old.ldev_count - 1;
683
684 while (++ldev_num < new->ldev_count)
685 shost_printk(KERN_CRIT, cb->host,
686 "Logical Drive %d Now Exists\n",
687 ldev_num);
688 }
689 if (new->ldev_count < old.ldev_count) {
690 int ldev_num = new->ldev_count - 1;
691
692 while (++ldev_num < old.ldev_count)
693 shost_printk(KERN_CRIT, cb->host,
694 "Logical Drive %d No Longer Exists\n",
695 ldev_num);
696 }
697 if (new->status.deferred != old.status.deferred)
698 shost_printk(KERN_CRIT, cb->host,
699 "Deferred Write Error Flag is now %s\n",
700 (new->status.deferred ? "TRUE" : "FALSE"));
701 if (new->ev_seq != old.ev_seq) {
702 cb->new_ev_seq = new->ev_seq;
703 cb->need_err_info = true;
704 shost_printk(KERN_INFO, cb->host,
705 "Event log %d/%d (%d/%d) available\n",
706 cb->old_ev_seq, cb->new_ev_seq,
707 old.ev_seq, new->ev_seq);
708 }
709 if ((new->ldev_critical > 0 &&
710 new->ldev_critical != old.ldev_critical) ||
711 (new->ldev_offline > 0 &&
712 new->ldev_offline != old.ldev_offline) ||
713 (new->ldev_count != old.ldev_count)) {
714 shost_printk(KERN_INFO, cb->host,
715 "Logical drive count changed (%d/%d/%d)\n",
716 new->ldev_critical,
717 new->ldev_offline,
718 new->ldev_count);
719 cb->need_ldev_info = true;
720 }
721 if (new->pdev_dead > 0 ||
722 new->pdev_dead != old.pdev_dead ||
723 time_after_eq(jiffies, cb->secondary_monitor_time
724 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 cb->need_bgi_status = cb->bgi_status_supported;
726 cb->secondary_monitor_time = jiffies;
727 }
728 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 cb->need_rbld = true;
733 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
734 }
735 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
736 switch (new->rbld) {
737 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 shost_printk(KERN_INFO, cb->host,
739 "Consistency Check Completed Successfully\n");
740 break;
741 case MYRB_STDBY_RBLD_IN_PROGRESS:
742 case MYRB_BG_RBLD_IN_PROGRESS:
743 break;
744 case MYRB_BG_CHECK_IN_PROGRESS:
745 cb->need_cc_status = true;
746 break;
747 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 shost_printk(KERN_INFO, cb->host,
749 "Consistency Check Completed with Error\n");
750 break;
751 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 shost_printk(KERN_INFO, cb->host,
753 "Consistency Check Failed - Physical Device Failed\n");
754 break;
755 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 shost_printk(KERN_INFO, cb->host,
757 "Consistency Check Failed - Logical Drive Failed\n");
758 break;
759 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 shost_printk(KERN_INFO, cb->host,
761 "Consistency Check Failed - Other Causes\n");
762 break;
763 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 shost_printk(KERN_INFO, cb->host,
765 "Consistency Check Successfully Terminated\n");
766 break;
767 }
768 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 cb->need_cc_status = true;
770
771 return MYRB_STATUS_SUCCESS;
772}
773
774/*
775 * myrb_set_pdev_state - sets the device state for a physical device
776 *
777 * Return: command status
778 */
779static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 struct scsi_device *sdev, enum myrb_devstate state)
781{
782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 unsigned short status;
785
786 mutex_lock(&cb->dcmd_mutex);
787 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 mbox->type3D.id = MYRB_DCMD_TAG;
789 mbox->type3D.channel = sdev->channel;
790 mbox->type3D.target = sdev->id;
791 mbox->type3D.state = state & 0x1F;
792 status = myrb_exec_cmd(cb, cmd_blk);
793 mutex_unlock(&cb->dcmd_mutex);
794
795 return status;
796}
797
798/*
799 * myrb_enable_mmio - enables the Memory Mailbox Interface
800 *
801 * PD and P controller types have no memory mailbox, but still need the
802 * other dma mapped memory.
803 *
804 * Return: true on success, false otherwise.
805 */
806static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
807{
808 void __iomem *base = cb->io_base;
809 struct pci_dev *pdev = cb->pdev;
810 size_t err_table_size;
811 size_t ldev_info_size;
812 union myrb_cmd_mbox *cmd_mbox_mem;
813 struct myrb_stat_mbox *stat_mbox_mem;
814 union myrb_cmd_mbox mbox;
815 unsigned short status;
816
817 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
818
819 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 dev_err(&pdev->dev, "DMA mask out of range\n");
821 return false;
822 }
823
824 cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 sizeof(struct myrb_enquiry),
826 &cb->enquiry_addr, GFP_KERNEL);
827 if (!cb->enquiry)
828 return false;
829
830 err_table_size = sizeof(struct myrb_error_entry) *
831 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 &cb->err_table_addr, GFP_KERNEL);
834 if (!cb->err_table)
835 return false;
836
837 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 &cb->ldev_info_addr, GFP_KERNEL);
840 if (!cb->ldev_info_buf)
841 return false;
842
843 /*
844 * Skip mailbox initialisation for PD and P Controllers
845 */
846 if (!mmio_init_fn)
847 return true;
848
849 /* These are the base addresses for the command memory mailbox array */
850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
852 cb->cmd_mbox_size,
853 &cb->cmd_mbox_addr,
854 GFP_KERNEL);
855 if (!cb->first_cmd_mbox)
856 return false;
857
858 cmd_mbox_mem = cb->first_cmd_mbox;
859 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 cb->last_cmd_mbox = cmd_mbox_mem;
861 cb->next_cmd_mbox = cb->first_cmd_mbox;
862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
864
865 /* These are the base addresses for the status memory mailbox array */
866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 sizeof(struct myrb_stat_mbox);
868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
869 cb->stat_mbox_size,
870 &cb->stat_mbox_addr,
871 GFP_KERNEL);
872 if (!cb->first_stat_mbox)
873 return false;
874
875 stat_mbox_mem = cb->first_stat_mbox;
876 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 cb->last_stat_mbox = stat_mbox_mem;
878 cb->next_stat_mbox = cb->first_stat_mbox;
879
880 /* Enable the Memory Mailbox Interface. */
881 cb->dual_mode_interface = true;
882 mbox.typeX.opcode = 0x2B;
883 mbox.typeX.id = 0;
884 mbox.typeX.opcode2 = 0x14;
885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
887
888 status = mmio_init_fn(pdev, base, &mbox);
889 if (status != MYRB_STATUS_SUCCESS) {
890 cb->dual_mode_interface = false;
891 mbox.typeX.opcode2 = 0x10;
892 status = mmio_init_fn(pdev, base, &mbox);
893 if (status != MYRB_STATUS_SUCCESS) {
894 dev_err(&pdev->dev,
895 "Failed to enable mailbox, statux %02X\n",
896 status);
897 return false;
898 }
899 }
900 return true;
901}
902
903/*
904 * myrb_get_hba_config - reads the configuration information
905 *
906 * Reads the configuration information from the controller and
907 * initializes the controller structure.
908 *
909 * Return: 0 on success, errno otherwise
910 */
911static int myrb_get_hba_config(struct myrb_hba *cb)
912{
913 struct myrb_enquiry2 *enquiry2;
914 dma_addr_t enquiry2_addr;
915 struct myrb_config2 *config2;
916 dma_addr_t config2_addr;
917 struct Scsi_Host *shost = cb->host;
918 struct pci_dev *pdev = cb->pdev;
919 int pchan_max = 0, pchan_cur = 0;
920 unsigned short status;
921 int ret = -ENODEV, memsize = 0;
922
923 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 &enquiry2_addr, GFP_KERNEL);
925 if (!enquiry2) {
926 shost_printk(KERN_ERR, cb->host,
927 "Failed to allocate V1 enquiry2 memory\n");
928 return -ENOMEM;
929 }
930 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 &config2_addr, GFP_KERNEL);
932 if (!config2) {
933 shost_printk(KERN_ERR, cb->host,
934 "Failed to allocate V1 config2 memory\n");
935 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 enquiry2, enquiry2_addr);
937 return -ENOMEM;
938 }
939 mutex_lock(&cb->dma_mutex);
940 status = myrb_hba_enquiry(cb);
941 mutex_unlock(&cb->dma_mutex);
942 if (status != MYRB_STATUS_SUCCESS) {
943 shost_printk(KERN_WARNING, cb->host,
944 "Failed it issue V1 Enquiry\n");
945 goto out_free;
946 }
947
948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 if (status != MYRB_STATUS_SUCCESS) {
950 shost_printk(KERN_WARNING, cb->host,
951 "Failed to issue V1 Enquiry2\n");
952 goto out_free;
953 }
954
955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 if (status != MYRB_STATUS_SUCCESS) {
957 shost_printk(KERN_WARNING, cb->host,
958 "Failed to issue ReadConfig2\n");
959 goto out_free;
960 }
961
962 status = myrb_get_ldev_info(cb);
963 if (status != MYRB_STATUS_SUCCESS) {
964 shost_printk(KERN_WARNING, cb->host,
965 "Failed to get logical drive information\n");
966 goto out_free;
967 }
968
969 /*
970 * Initialize the Controller Model Name and Full Model Name fields.
971 */
972 switch (enquiry2->hw.sub_model) {
973 case DAC960_V1_P_PD_PU:
974 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 strcpy(cb->model_name, "DAC960PU");
976 else
977 strcpy(cb->model_name, "DAC960PD");
978 break;
979 case DAC960_V1_PL:
980 strcpy(cb->model_name, "DAC960PL");
981 break;
982 case DAC960_V1_PG:
983 strcpy(cb->model_name, "DAC960PG");
984 break;
985 case DAC960_V1_PJ:
986 strcpy(cb->model_name, "DAC960PJ");
987 break;
988 case DAC960_V1_PR:
989 strcpy(cb->model_name, "DAC960PR");
990 break;
991 case DAC960_V1_PT:
992 strcpy(cb->model_name, "DAC960PT");
993 break;
994 case DAC960_V1_PTL0:
995 strcpy(cb->model_name, "DAC960PTL0");
996 break;
997 case DAC960_V1_PRL:
998 strcpy(cb->model_name, "DAC960PRL");
999 break;
1000 case DAC960_V1_PTL1:
1001 strcpy(cb->model_name, "DAC960PTL1");
1002 break;
1003 case DAC960_V1_1164P:
1004 strcpy(cb->model_name, "eXtremeRAID 1100");
1005 break;
1006 default:
1007 shost_printk(KERN_WARNING, cb->host,
1008 "Unknown Model %X\n",
1009 enquiry2->hw.sub_model);
1010 goto out;
1011 }
1012 /*
1013 * Initialize the Controller Firmware Version field and verify that it
1014 * is a supported firmware version.
1015 * The supported firmware versions are:
1016 *
1017 * DAC1164P 5.06 and above
1018 * DAC960PTL/PRL/PJ/PG 4.06 and above
1019 * DAC960PU/PD/PL 3.51 and above
1020 * DAC960PU/PD/PL/P 2.73 and above
1021 */
1022#if defined(CONFIG_ALPHA)
1023 /*
1024 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 * the last custom FW revision to be released by DEC for these older
1027 * controllers, appears to work quite well with this driver.
1028 *
1029 * Cards tested successfully were several versions each of the PD and
1030 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 * back of the board, of:
1033 *
1034 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1035 * or D040349 (3-channel)
1036 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1037 * or D040397 (3-channel)
1038 */
1039# define FIRMWARE_27X "2.70"
1040#else
1041# define FIRMWARE_27X "2.73"
1042#endif
1043
1044 if (enquiry2->fw.major_version == 0) {
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 enquiry2->fw.firmware_type = '0';
1048 enquiry2->fw.turn_id = 0;
1049 }
1050 snprintf(cb->fw_version, sizeof(cb->fw_version),
1051 "%u.%02u-%c-%02u",
1052 enquiry2->fw.major_version,
1053 enquiry2->fw.minor_version,
1054 enquiry2->fw.firmware_type,
1055 enquiry2->fw.turn_id);
1056 if (!((enquiry2->fw.major_version == 5 &&
1057 enquiry2->fw.minor_version >= 6) ||
1058 (enquiry2->fw.major_version == 4 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 3 &&
1061 enquiry2->fw.minor_version >= 51) ||
1062 (enquiry2->fw.major_version == 2 &&
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 shost_printk(KERN_WARNING, cb->host,
1065 "Firmware Version '%s' unsupported\n",
1066 cb->fw_version);
1067 goto out;
1068 }
1069 /*
1070 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 * Enclosure Management Enabled fields.
1072 */
1073 switch (enquiry2->hw.model) {
1074 case MYRB_5_CHANNEL_BOARD:
1075 pchan_max = 5;
1076 break;
1077 case MYRB_3_CHANNEL_BOARD:
1078 case MYRB_3_CHANNEL_ASIC_DAC:
1079 pchan_max = 3;
1080 break;
1081 case MYRB_2_CHANNEL_BOARD:
1082 pchan_max = 2;
1083 break;
1084 default:
1085 pchan_max = enquiry2->cfg_chan;
1086 break;
1087 }
1088 pchan_cur = enquiry2->cur_chan;
1089 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090 cb->bus_width = 32;
1091 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092 cb->bus_width = 16;
1093 else
1094 cb->bus_width = 8;
1095 cb->ldev_block_size = enquiry2->ldev_block_size;
1096 shost->max_channel = pchan_cur;
1097 shost->max_id = enquiry2->max_targets;
1098 memsize = enquiry2->mem_size >> 20;
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100 /*
1101 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 * The Driver Queue Depth must be at most one less than the
1105 * Controller Queue Depth to allow for an automatic drive
1106 * rebuild operation.
1107 */
1108 shost->can_queue = cb->enquiry->max_tcq;
1109 if (shost->can_queue < 3)
1110 shost->can_queue = enquiry2->max_cmds;
1111 if (shost->can_queue < 3)
1112 /* Play safe and disable TCQ */
1113 shost->can_queue = 1;
1114
1115 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 shost->max_sectors = enquiry2->max_sectors;
1118 shost->sg_tablesize = enquiry2->max_sge;
1119 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121 /*
1122 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123 */
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 >> (10 - MYRB_BLKSIZE_BITS);
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128 /* Assume 255/63 translation */
1129 cb->ldev_geom_heads = 255;
1130 cb->ldev_geom_sectors = 63;
1131 if (config2->drive_geometry) {
1132 cb->ldev_geom_heads = 128;
1133 cb->ldev_geom_sectors = 32;
1134 }
1135
1136 /*
1137 * Initialize the Background Initialization Status.
1138 */
1139 if ((cb->fw_version[0] == '4' &&
1140 strcmp(cb->fw_version, "4.08") >= 0) ||
1141 (cb->fw_version[0] == '5' &&
1142 strcmp(cb->fw_version, "5.08") >= 0)) {
1143 cb->bgi_status_supported = true;
1144 myrb_bgi_control(cb);
1145 }
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147 ret = 0;
1148
1149out:
1150 shost_printk(KERN_INFO, cb->host,
1151 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152 shost_printk(KERN_INFO, cb->host,
1153 " Firmware Version: %s, Memory Size: %dMB\n",
1154 cb->fw_version, memsize);
1155 if (cb->io_addr == 0)
1156 shost_printk(KERN_INFO, cb->host,
1157 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 (unsigned long)cb->pci_addr, cb->irq);
1159 else
1160 shost_printk(KERN_INFO, cb->host,
1161 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163 cb->irq);
1164 shost_printk(KERN_INFO, cb->host,
1165 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 cb->host->can_queue, cb->host->max_sectors);
1167 shost_printk(KERN_INFO, cb->host,
1168 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 cb->host->can_queue, cb->host->sg_tablesize,
1170 MYRB_SCATTER_GATHER_LIMIT);
1171 shost_printk(KERN_INFO, cb->host,
1172 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 cb->stripe_size, cb->segment_size,
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175 cb->safte_enabled ?
1176 " SAF-TE Enclosure Management Enabled" : "");
1177 shost_printk(KERN_INFO, cb->host,
1178 " Physical: %d/%d channels %d/%d/%d devices\n",
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180 cb->host->max_id);
1181
1182 shost_printk(KERN_INFO, cb->host,
1183 " Logical: 1/1 channels, %d/%d disks\n",
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185
1186out_free:
1187 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 enquiry2, enquiry2_addr);
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 config2, config2_addr);
1191
1192 return ret;
1193}
1194
1195/*
1196 * myrb_unmap - unmaps controller structures
1197 */
1198static void myrb_unmap(struct myrb_hba *cb)
1199{
1200 if (cb->ldev_info_buf) {
1201 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202 MYRB_MAX_LDEVS;
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 cb->ldev_info_buf, cb->ldev_info_addr);
1205 cb->ldev_info_buf = NULL;
1206 }
1207 if (cb->err_table) {
1208 size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 cb->err_table, cb->err_table_addr);
1212 cb->err_table = NULL;
1213 }
1214 if (cb->enquiry) {
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 cb->enquiry, cb->enquiry_addr);
1217 cb->enquiry = NULL;
1218 }
1219 if (cb->first_stat_mbox) {
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 cb->first_stat_mbox, cb->stat_mbox_addr);
1222 cb->first_stat_mbox = NULL;
1223 }
1224 if (cb->first_cmd_mbox) {
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 cb->first_cmd_mbox = NULL;
1228 }
1229}
1230
1231/*
1232 * myrb_cleanup - cleanup controller structures
1233 */
1234static void myrb_cleanup(struct myrb_hba *cb)
1235{
1236 struct pci_dev *pdev = cb->pdev;
1237
1238 /* Free the memory mailbox, status, and related structures */
1239 myrb_unmap(cb);
1240
1241 if (cb->mmio_base) {
1242 if (cb->disable_intr)
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1245 }
1246 if (cb->irq)
1247 free_irq(cb->irq, cb);
1248 if (cb->io_addr)
1249 release_region(cb->io_addr, 0x80);
1250 pci_set_drvdata(pdev, NULL);
1251 pci_disable_device(pdev);
1252 scsi_host_put(cb->host);
1253}
1254
1255static int myrb_host_reset(struct scsi_cmnd *scmd)
1256{
1257 struct Scsi_Host *shost = scmd->device->host;
1258 struct myrb_hba *cb = shost_priv(shost);
1259
1260 cb->reset(cb->io_base);
1261 return SUCCESS;
1262}
1263
1264static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 struct scsi_cmnd *scmd)
1266{
1267 struct request *rq = scsi_cmd_to_rq(scmd);
1268 struct myrb_hba *cb = shost_priv(shost);
1269 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 struct myrb_dcdb *dcdb;
1272 dma_addr_t dcdb_addr;
1273 struct scsi_device *sdev = scmd->device;
1274 struct scatterlist *sgl;
1275 unsigned long flags;
1276 int nsge;
1277
1278 myrb_reset_cmd(cmd_blk);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 if (!dcdb)
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282 nsge = scsi_dma_map(scmd);
1283 if (nsge > 1) {
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 scmd->result = (DID_ERROR << 16);
1286 scsi_done(scmd);
1287 return 0;
1288 }
1289
1290 mbox->type3.opcode = MYRB_CMD_DCDB;
1291 mbox->type3.id = rq->tag + 3;
1292 mbox->type3.addr = dcdb_addr;
1293 dcdb->channel = sdev->channel;
1294 dcdb->target = sdev->id;
1295 switch (scmd->sc_data_direction) {
1296 case DMA_NONE:
1297 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298 break;
1299 case DMA_TO_DEVICE:
1300 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 break;
1302 case DMA_FROM_DEVICE:
1303 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304 break;
1305 default:
1306 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307 break;
1308 }
1309 dcdb->early_status = false;
1310 if (rq->timeout <= 10)
1311 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 else if (rq->timeout <= 60)
1313 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 else if (rq->timeout <= 600)
1315 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 else
1317 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 dcdb->no_autosense = false;
1319 dcdb->allow_disconnect = true;
1320 sgl = scsi_sglist(scmd);
1321 dcdb->dma_addr = sg_dma_address(sgl);
1322 if (sg_dma_len(sgl) > USHRT_MAX) {
1323 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 } else {
1326 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 dcdb->xfer_len_hi4 = 0;
1328 }
1329 dcdb->cdb_len = scmd->cmd_len;
1330 dcdb->sense_len = sizeof(dcdb->sense);
1331 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1336 return 0;
1337}
1338
1339static void myrb_inquiry(struct myrb_hba *cb,
1340 struct scsi_cmnd *scmd)
1341{
1342 unsigned char inq[36] = {
1343 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 0x20, 0x20, 0x20, 0x20,
1348 };
1349
1350 if (cb->bus_width > 16)
1351 inq[7] |= 1 << 6;
1352 if (cb->bus_width > 8)
1353 inq[7] |= 1 << 5;
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1358
1359 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360}
1361
1362static void
1363myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 struct myrb_ldev_info *ldev_info)
1365{
1366 unsigned char modes[32], *mode_pg;
1367 bool dbd;
1368 size_t mode_len;
1369
1370 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371 if (dbd) {
1372 mode_len = 24;
1373 mode_pg = &modes[4];
1374 } else {
1375 mode_len = 32;
1376 mode_pg = &modes[12];
1377 }
1378 memset(modes, 0, sizeof(modes));
1379 modes[0] = mode_len - 1;
1380 if (!dbd) {
1381 unsigned char *block_desc = &modes[4];
1382
1383 modes[3] = 8;
1384 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386 }
1387 mode_pg[0] = 0x08;
1388 mode_pg[1] = 0x12;
1389 if (ldev_info->wb_enabled)
1390 mode_pg[2] |= 0x04;
1391 if (cb->segment_size) {
1392 mode_pg[2] |= 0x08;
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394 }
1395
1396 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397}
1398
1399static void myrb_request_sense(struct myrb_hba *cb,
1400 struct scsi_cmnd *scmd)
1401{
1402 scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1403 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 SCSI_SENSE_BUFFERSIZE);
1405}
1406
1407static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 struct myrb_ldev_info *ldev_info)
1409{
1410 unsigned char data[8];
1411
1412 dev_dbg(&scmd->device->sdev_gendev,
1413 "Capacity %u, blocksize %u\n",
1414 ldev_info->size, cb->ldev_block_size);
1415 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 scsi_sg_copy_from_buffer(scmd, data, 8);
1418}
1419
1420static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 struct scsi_cmnd *scmd)
1422{
1423 struct myrb_hba *cb = shost_priv(shost);
1424 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 struct myrb_ldev_info *ldev_info;
1427 struct scsi_device *sdev = scmd->device;
1428 struct scatterlist *sgl;
1429 unsigned long flags;
1430 u64 lba;
1431 u32 block_cnt;
1432 int nsge;
1433
1434 ldev_info = sdev->hostdata;
1435 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 ldev_info->state != MYRB_DEVICE_WO) {
1437 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 scmd->result = (DID_BAD_TARGET << 16);
1440 scsi_done(scmd);
1441 return 0;
1442 }
1443 switch (scmd->cmnd[0]) {
1444 case TEST_UNIT_READY:
1445 scmd->result = (DID_OK << 16);
1446 scsi_done(scmd);
1447 return 0;
1448 case INQUIRY:
1449 if (scmd->cmnd[1] & 1) {
1450 /* Illegal request, invalid field in CDB */
1451 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1452 } else {
1453 myrb_inquiry(cb, scmd);
1454 scmd->result = (DID_OK << 16);
1455 }
1456 scsi_done(scmd);
1457 return 0;
1458 case SYNCHRONIZE_CACHE:
1459 scmd->result = (DID_OK << 16);
1460 scsi_done(scmd);
1461 return 0;
1462 case MODE_SENSE:
1463 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1464 (scmd->cmnd[2] & 0x3F) != 0x08) {
1465 /* Illegal request, invalid field in CDB */
1466 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1467 } else {
1468 myrb_mode_sense(cb, scmd, ldev_info);
1469 scmd->result = (DID_OK << 16);
1470 }
1471 scsi_done(scmd);
1472 return 0;
1473 case READ_CAPACITY:
1474 if ((scmd->cmnd[1] & 1) ||
1475 (scmd->cmnd[8] & 1)) {
1476 /* Illegal request, invalid field in CDB */
1477 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1478 scsi_done(scmd);
1479 return 0;
1480 }
1481 lba = get_unaligned_be32(&scmd->cmnd[2]);
1482 if (lba) {
1483 /* Illegal request, invalid field in CDB */
1484 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1485 scsi_done(scmd);
1486 return 0;
1487 }
1488 myrb_read_capacity(cb, scmd, ldev_info);
1489 scsi_done(scmd);
1490 return 0;
1491 case REQUEST_SENSE:
1492 myrb_request_sense(cb, scmd);
1493 scmd->result = (DID_OK << 16);
1494 return 0;
1495 case SEND_DIAGNOSTIC:
1496 if (scmd->cmnd[1] != 0x04) {
1497 /* Illegal request, invalid field in CDB */
1498 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1499 } else {
1500 /* Assume good status */
1501 scmd->result = (DID_OK << 16);
1502 }
1503 scsi_done(scmd);
1504 return 0;
1505 case READ_6:
1506 if (ldev_info->state == MYRB_DEVICE_WO) {
1507 /* Data protect, attempt to read invalid data */
1508 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1509 scsi_done(scmd);
1510 return 0;
1511 }
1512 fallthrough;
1513 case WRITE_6:
1514 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1515 (scmd->cmnd[2] << 8) |
1516 scmd->cmnd[3]);
1517 block_cnt = scmd->cmnd[4];
1518 break;
1519 case READ_10:
1520 if (ldev_info->state == MYRB_DEVICE_WO) {
1521 /* Data protect, attempt to read invalid data */
1522 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1523 scsi_done(scmd);
1524 return 0;
1525 }
1526 fallthrough;
1527 case WRITE_10:
1528 case VERIFY: /* 0x2F */
1529 case WRITE_VERIFY: /* 0x2E */
1530 lba = get_unaligned_be32(&scmd->cmnd[2]);
1531 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1532 break;
1533 case READ_12:
1534 if (ldev_info->state == MYRB_DEVICE_WO) {
1535 /* Data protect, attempt to read invalid data */
1536 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1537 scsi_done(scmd);
1538 return 0;
1539 }
1540 fallthrough;
1541 case WRITE_12:
1542 case VERIFY_12: /* 0xAF */
1543 case WRITE_VERIFY_12: /* 0xAE */
1544 lba = get_unaligned_be32(&scmd->cmnd[2]);
1545 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1546 break;
1547 default:
1548 /* Illegal request, invalid opcode */
1549 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1550 scsi_done(scmd);
1551 return 0;
1552 }
1553
1554 myrb_reset_cmd(cmd_blk);
1555 mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1556 if (scmd->sc_data_direction == DMA_NONE)
1557 goto submit;
1558 nsge = scsi_dma_map(scmd);
1559 if (nsge == 1) {
1560 sgl = scsi_sglist(scmd);
1561 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1562 mbox->type5.opcode = MYRB_CMD_READ;
1563 else
1564 mbox->type5.opcode = MYRB_CMD_WRITE;
1565
1566 mbox->type5.ld.xfer_len = block_cnt;
1567 mbox->type5.ld.ldev_num = sdev->id;
1568 mbox->type5.lba = lba;
1569 mbox->type5.addr = (u32)sg_dma_address(sgl);
1570 } else {
1571 struct myrb_sge *hw_sgl;
1572 dma_addr_t hw_sgl_addr;
1573 int i;
1574
1575 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1576 if (!hw_sgl)
1577 return SCSI_MLQUEUE_HOST_BUSY;
1578
1579 cmd_blk->sgl = hw_sgl;
1580 cmd_blk->sgl_addr = hw_sgl_addr;
1581
1582 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1583 mbox->type5.opcode = MYRB_CMD_READ_SG;
1584 else
1585 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1586
1587 mbox->type5.ld.xfer_len = block_cnt;
1588 mbox->type5.ld.ldev_num = sdev->id;
1589 mbox->type5.lba = lba;
1590 mbox->type5.addr = hw_sgl_addr;
1591 mbox->type5.sg_count = nsge;
1592
1593 scsi_for_each_sg(scmd, sgl, nsge, i) {
1594 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1595 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1596 hw_sgl++;
1597 }
1598 }
1599submit:
1600 spin_lock_irqsave(&cb->queue_lock, flags);
1601 cb->qcmd(cb, cmd_blk);
1602 spin_unlock_irqrestore(&cb->queue_lock, flags);
1603
1604 return 0;
1605}
1606
1607static int myrb_queuecommand(struct Scsi_Host *shost,
1608 struct scsi_cmnd *scmd)
1609{
1610 struct scsi_device *sdev = scmd->device;
1611
1612 if (sdev->channel > myrb_logical_channel(shost)) {
1613 scmd->result = (DID_BAD_TARGET << 16);
1614 scsi_done(scmd);
1615 return 0;
1616 }
1617 if (sdev->channel == myrb_logical_channel(shost))
1618 return myrb_ldev_queuecommand(shost, scmd);
1619
1620 return myrb_pthru_queuecommand(shost, scmd);
1621}
1622
1623static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1624{
1625 struct myrb_hba *cb = shost_priv(sdev->host);
1626 struct myrb_ldev_info *ldev_info;
1627 unsigned short ldev_num = sdev->id;
1628 enum raid_level level;
1629
1630 ldev_info = cb->ldev_info_buf + ldev_num;
1631 if (!ldev_info)
1632 return -ENXIO;
1633
1634 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1635 if (!sdev->hostdata)
1636 return -ENOMEM;
1637 dev_dbg(&sdev->sdev_gendev,
1638 "slave alloc ldev %d state %x\n",
1639 ldev_num, ldev_info->state);
1640 memcpy(sdev->hostdata, ldev_info,
1641 sizeof(*ldev_info));
1642 switch (ldev_info->raid_level) {
1643 case MYRB_RAID_LEVEL0:
1644 level = RAID_LEVEL_LINEAR;
1645 break;
1646 case MYRB_RAID_LEVEL1:
1647 level = RAID_LEVEL_1;
1648 break;
1649 case MYRB_RAID_LEVEL3:
1650 level = RAID_LEVEL_3;
1651 break;
1652 case MYRB_RAID_LEVEL5:
1653 level = RAID_LEVEL_5;
1654 break;
1655 case MYRB_RAID_LEVEL6:
1656 level = RAID_LEVEL_6;
1657 break;
1658 case MYRB_RAID_JBOD:
1659 level = RAID_LEVEL_JBOD;
1660 break;
1661 default:
1662 level = RAID_LEVEL_UNKNOWN;
1663 break;
1664 }
1665 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1666 return 0;
1667}
1668
1669static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1670{
1671 struct myrb_hba *cb = shost_priv(sdev->host);
1672 struct myrb_pdev_state *pdev_info;
1673 unsigned short status;
1674
1675 if (sdev->id > MYRB_MAX_TARGETS)
1676 return -ENXIO;
1677
1678 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
1679 if (!pdev_info)
1680 return -ENOMEM;
1681
1682 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1683 sdev, pdev_info);
1684 if (status != MYRB_STATUS_SUCCESS) {
1685 dev_dbg(&sdev->sdev_gendev,
1686 "Failed to get device state, status %x\n",
1687 status);
1688 kfree(pdev_info);
1689 return -ENXIO;
1690 }
1691 if (!pdev_info->present) {
1692 dev_dbg(&sdev->sdev_gendev,
1693 "device not present, skip\n");
1694 kfree(pdev_info);
1695 return -ENXIO;
1696 }
1697 dev_dbg(&sdev->sdev_gendev,
1698 "slave alloc pdev %d:%d state %x\n",
1699 sdev->channel, sdev->id, pdev_info->state);
1700 sdev->hostdata = pdev_info;
1701
1702 return 0;
1703}
1704
1705static int myrb_slave_alloc(struct scsi_device *sdev)
1706{
1707 if (sdev->channel > myrb_logical_channel(sdev->host))
1708 return -ENXIO;
1709
1710 if (sdev->lun > 0)
1711 return -ENXIO;
1712
1713 if (sdev->channel == myrb_logical_channel(sdev->host))
1714 return myrb_ldev_slave_alloc(sdev);
1715
1716 return myrb_pdev_slave_alloc(sdev);
1717}
1718
1719static int myrb_slave_configure(struct scsi_device *sdev)
1720{
1721 struct myrb_ldev_info *ldev_info;
1722
1723 if (sdev->channel > myrb_logical_channel(sdev->host))
1724 return -ENXIO;
1725
1726 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1727 sdev->no_uld_attach = 1;
1728 return 0;
1729 }
1730 if (sdev->lun != 0)
1731 return -ENXIO;
1732
1733 ldev_info = sdev->hostdata;
1734 if (!ldev_info)
1735 return -ENXIO;
1736 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1737 sdev_printk(KERN_INFO, sdev,
1738 "Logical drive is %s\n",
1739 myrb_devstate_name(ldev_info->state));
1740
1741 sdev->tagged_supported = 1;
1742 return 0;
1743}
1744
1745static void myrb_slave_destroy(struct scsi_device *sdev)
1746{
1747 kfree(sdev->hostdata);
1748}
1749
1750static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1751 sector_t capacity, int geom[])
1752{
1753 struct myrb_hba *cb = shost_priv(sdev->host);
1754
1755 geom[0] = cb->ldev_geom_heads;
1756 geom[1] = cb->ldev_geom_sectors;
1757 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1758
1759 return 0;
1760}
1761
1762static ssize_t raid_state_show(struct device *dev,
1763 struct device_attribute *attr, char *buf)
1764{
1765 struct scsi_device *sdev = to_scsi_device(dev);
1766 struct myrb_hba *cb = shost_priv(sdev->host);
1767 int ret;
1768
1769 if (!sdev->hostdata)
1770 return snprintf(buf, 16, "Unknown\n");
1771
1772 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1773 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1774 const char *name;
1775
1776 name = myrb_devstate_name(ldev_info->state);
1777 if (name)
1778 ret = snprintf(buf, 32, "%s\n", name);
1779 else
1780 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1781 ldev_info->state);
1782 } else {
1783 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1784 unsigned short status;
1785 const char *name;
1786
1787 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1788 sdev, pdev_info);
1789 if (status != MYRB_STATUS_SUCCESS)
1790 sdev_printk(KERN_INFO, sdev,
1791 "Failed to get device state, status %x\n",
1792 status);
1793
1794 if (!pdev_info->present)
1795 name = "Removed";
1796 else
1797 name = myrb_devstate_name(pdev_info->state);
1798 if (name)
1799 ret = snprintf(buf, 32, "%s\n", name);
1800 else
1801 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1802 pdev_info->state);
1803 }
1804 return ret;
1805}
1806
1807static ssize_t raid_state_store(struct device *dev,
1808 struct device_attribute *attr, const char *buf, size_t count)
1809{
1810 struct scsi_device *sdev = to_scsi_device(dev);
1811 struct myrb_hba *cb = shost_priv(sdev->host);
1812 struct myrb_pdev_state *pdev_info;
1813 enum myrb_devstate new_state;
1814 unsigned short status;
1815
1816 if (!strncmp(buf, "kill", 4) ||
1817 !strncmp(buf, "offline", 7))
1818 new_state = MYRB_DEVICE_DEAD;
1819 else if (!strncmp(buf, "online", 6))
1820 new_state = MYRB_DEVICE_ONLINE;
1821 else if (!strncmp(buf, "standby", 7))
1822 new_state = MYRB_DEVICE_STANDBY;
1823 else
1824 return -EINVAL;
1825
1826 pdev_info = sdev->hostdata;
1827 if (!pdev_info) {
1828 sdev_printk(KERN_INFO, sdev,
1829 "Failed - no physical device information\n");
1830 return -ENXIO;
1831 }
1832 if (!pdev_info->present) {
1833 sdev_printk(KERN_INFO, sdev,
1834 "Failed - device not present\n");
1835 return -ENXIO;
1836 }
1837
1838 if (pdev_info->state == new_state)
1839 return count;
1840
1841 status = myrb_set_pdev_state(cb, sdev, new_state);
1842 switch (status) {
1843 case MYRB_STATUS_SUCCESS:
1844 break;
1845 case MYRB_STATUS_START_DEVICE_FAILED:
1846 sdev_printk(KERN_INFO, sdev,
1847 "Failed - Unable to Start Device\n");
1848 count = -EAGAIN;
1849 break;
1850 case MYRB_STATUS_NO_DEVICE:
1851 sdev_printk(KERN_INFO, sdev,
1852 "Failed - No Device at Address\n");
1853 count = -ENODEV;
1854 break;
1855 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1856 sdev_printk(KERN_INFO, sdev,
1857 "Failed - Invalid Channel or Target or Modifier\n");
1858 count = -EINVAL;
1859 break;
1860 case MYRB_STATUS_CHANNEL_BUSY:
1861 sdev_printk(KERN_INFO, sdev,
1862 "Failed - Channel Busy\n");
1863 count = -EBUSY;
1864 break;
1865 default:
1866 sdev_printk(KERN_INFO, sdev,
1867 "Failed - Unexpected Status %04X\n", status);
1868 count = -EIO;
1869 break;
1870 }
1871 return count;
1872}
1873static DEVICE_ATTR_RW(raid_state);
1874
1875static ssize_t raid_level_show(struct device *dev,
1876 struct device_attribute *attr, char *buf)
1877{
1878 struct scsi_device *sdev = to_scsi_device(dev);
1879
1880 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1881 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1882 const char *name;
1883
1884 if (!ldev_info)
1885 return -ENXIO;
1886
1887 name = myrb_raidlevel_name(ldev_info->raid_level);
1888 if (!name)
1889 return snprintf(buf, 32, "Invalid (%02X)\n",
1890 ldev_info->state);
1891 return snprintf(buf, 32, "%s\n", name);
1892 }
1893 return snprintf(buf, 32, "Physical Drive\n");
1894}
1895static DEVICE_ATTR_RO(raid_level);
1896
1897static ssize_t rebuild_show(struct device *dev,
1898 struct device_attribute *attr, char *buf)
1899{
1900 struct scsi_device *sdev = to_scsi_device(dev);
1901 struct myrb_hba *cb = shost_priv(sdev->host);
1902 struct myrb_rbld_progress rbld_buf;
1903 unsigned char status;
1904
1905 if (sdev->channel < myrb_logical_channel(sdev->host))
1906 return snprintf(buf, 32, "physical device - not rebuilding\n");
1907
1908 status = myrb_get_rbld_progress(cb, &rbld_buf);
1909
1910 if (rbld_buf.ldev_num != sdev->id ||
1911 status != MYRB_STATUS_SUCCESS)
1912 return snprintf(buf, 32, "not rebuilding\n");
1913
1914 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1915 rbld_buf.ldev_size - rbld_buf.blocks_left,
1916 rbld_buf.ldev_size);
1917}
1918
1919static ssize_t rebuild_store(struct device *dev,
1920 struct device_attribute *attr, const char *buf, size_t count)
1921{
1922 struct scsi_device *sdev = to_scsi_device(dev);
1923 struct myrb_hba *cb = shost_priv(sdev->host);
1924 struct myrb_cmdblk *cmd_blk;
1925 union myrb_cmd_mbox *mbox;
1926 unsigned short status;
1927 int rc, start;
1928 const char *msg;
1929
1930 rc = kstrtoint(buf, 0, &start);
1931 if (rc)
1932 return rc;
1933
1934 if (sdev->channel >= myrb_logical_channel(sdev->host))
1935 return -ENXIO;
1936
1937 status = myrb_get_rbld_progress(cb, NULL);
1938 if (start) {
1939 if (status == MYRB_STATUS_SUCCESS) {
1940 sdev_printk(KERN_INFO, sdev,
1941 "Rebuild Not Initiated; already in progress\n");
1942 return -EALREADY;
1943 }
1944 mutex_lock(&cb->dcmd_mutex);
1945 cmd_blk = &cb->dcmd_blk;
1946 myrb_reset_cmd(cmd_blk);
1947 mbox = &cmd_blk->mbox;
1948 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1949 mbox->type3D.id = MYRB_DCMD_TAG;
1950 mbox->type3D.channel = sdev->channel;
1951 mbox->type3D.target = sdev->id;
1952 status = myrb_exec_cmd(cb, cmd_blk);
1953 mutex_unlock(&cb->dcmd_mutex);
1954 } else {
1955 struct pci_dev *pdev = cb->pdev;
1956 unsigned char *rate;
1957 dma_addr_t rate_addr;
1958
1959 if (status != MYRB_STATUS_SUCCESS) {
1960 sdev_printk(KERN_INFO, sdev,
1961 "Rebuild Not Cancelled; not in progress\n");
1962 return 0;
1963 }
1964
1965 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1966 &rate_addr, GFP_KERNEL);
1967 if (rate == NULL) {
1968 sdev_printk(KERN_INFO, sdev,
1969 "Cancellation of Rebuild Failed - Out of Memory\n");
1970 return -ENOMEM;
1971 }
1972 mutex_lock(&cb->dcmd_mutex);
1973 cmd_blk = &cb->dcmd_blk;
1974 myrb_reset_cmd(cmd_blk);
1975 mbox = &cmd_blk->mbox;
1976 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1977 mbox->type3R.id = MYRB_DCMD_TAG;
1978 mbox->type3R.rbld_rate = 0xFF;
1979 mbox->type3R.addr = rate_addr;
1980 status = myrb_exec_cmd(cb, cmd_blk);
1981 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1982 mutex_unlock(&cb->dcmd_mutex);
1983 }
1984 if (status == MYRB_STATUS_SUCCESS) {
1985 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1986 start ? "Initiated" : "Cancelled");
1987 return count;
1988 }
1989 if (!start) {
1990 sdev_printk(KERN_INFO, sdev,
1991 "Rebuild Not Cancelled, status 0x%x\n",
1992 status);
1993 return -EIO;
1994 }
1995
1996 switch (status) {
1997 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1998 msg = "Attempt to Rebuild Online or Unresponsive Drive";
1999 break;
2000 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2001 msg = "New Disk Failed During Rebuild";
2002 break;
2003 case MYRB_STATUS_INVALID_ADDRESS:
2004 msg = "Invalid Device Address";
2005 break;
2006 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2007 msg = "Already in Progress";
2008 break;
2009 default:
2010 msg = NULL;
2011 break;
2012 }
2013 if (msg)
2014 sdev_printk(KERN_INFO, sdev,
2015 "Rebuild Failed - %s\n", msg);
2016 else
2017 sdev_printk(KERN_INFO, sdev,
2018 "Rebuild Failed, status 0x%x\n", status);
2019
2020 return -EIO;
2021}
2022static DEVICE_ATTR_RW(rebuild);
2023
2024static ssize_t consistency_check_store(struct device *dev,
2025 struct device_attribute *attr, const char *buf, size_t count)
2026{
2027 struct scsi_device *sdev = to_scsi_device(dev);
2028 struct myrb_hba *cb = shost_priv(sdev->host);
2029 struct myrb_rbld_progress rbld_buf;
2030 struct myrb_cmdblk *cmd_blk;
2031 union myrb_cmd_mbox *mbox;
2032 unsigned short ldev_num = 0xFFFF;
2033 unsigned short status;
2034 int rc, start;
2035 const char *msg;
2036
2037 rc = kstrtoint(buf, 0, &start);
2038 if (rc)
2039 return rc;
2040
2041 if (sdev->channel < myrb_logical_channel(sdev->host))
2042 return -ENXIO;
2043
2044 status = myrb_get_rbld_progress(cb, &rbld_buf);
2045 if (start) {
2046 if (status == MYRB_STATUS_SUCCESS) {
2047 sdev_printk(KERN_INFO, sdev,
2048 "Check Consistency Not Initiated; already in progress\n");
2049 return -EALREADY;
2050 }
2051 mutex_lock(&cb->dcmd_mutex);
2052 cmd_blk = &cb->dcmd_blk;
2053 myrb_reset_cmd(cmd_blk);
2054 mbox = &cmd_blk->mbox;
2055 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2056 mbox->type3C.id = MYRB_DCMD_TAG;
2057 mbox->type3C.ldev_num = sdev->id;
2058 mbox->type3C.auto_restore = true;
2059
2060 status = myrb_exec_cmd(cb, cmd_blk);
2061 mutex_unlock(&cb->dcmd_mutex);
2062 } else {
2063 struct pci_dev *pdev = cb->pdev;
2064 unsigned char *rate;
2065 dma_addr_t rate_addr;
2066
2067 if (ldev_num != sdev->id) {
2068 sdev_printk(KERN_INFO, sdev,
2069 "Check Consistency Not Cancelled; not in progress\n");
2070 return 0;
2071 }
2072 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2073 &rate_addr, GFP_KERNEL);
2074 if (rate == NULL) {
2075 sdev_printk(KERN_INFO, sdev,
2076 "Cancellation of Check Consistency Failed - Out of Memory\n");
2077 return -ENOMEM;
2078 }
2079 mutex_lock(&cb->dcmd_mutex);
2080 cmd_blk = &cb->dcmd_blk;
2081 myrb_reset_cmd(cmd_blk);
2082 mbox = &cmd_blk->mbox;
2083 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2084 mbox->type3R.id = MYRB_DCMD_TAG;
2085 mbox->type3R.rbld_rate = 0xFF;
2086 mbox->type3R.addr = rate_addr;
2087 status = myrb_exec_cmd(cb, cmd_blk);
2088 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2089 mutex_unlock(&cb->dcmd_mutex);
2090 }
2091 if (status == MYRB_STATUS_SUCCESS) {
2092 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2093 start ? "Initiated" : "Cancelled");
2094 return count;
2095 }
2096 if (!start) {
2097 sdev_printk(KERN_INFO, sdev,
2098 "Check Consistency Not Cancelled, status 0x%x\n",
2099 status);
2100 return -EIO;
2101 }
2102
2103 switch (status) {
2104 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2105 msg = "Dependent Physical Device is DEAD";
2106 break;
2107 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2108 msg = "New Disk Failed During Rebuild";
2109 break;
2110 case MYRB_STATUS_INVALID_ADDRESS:
2111 msg = "Invalid or Nonredundant Logical Drive";
2112 break;
2113 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2114 msg = "Already in Progress";
2115 break;
2116 default:
2117 msg = NULL;
2118 break;
2119 }
2120 if (msg)
2121 sdev_printk(KERN_INFO, sdev,
2122 "Check Consistency Failed - %s\n", msg);
2123 else
2124 sdev_printk(KERN_INFO, sdev,
2125 "Check Consistency Failed, status 0x%x\n", status);
2126
2127 return -EIO;
2128}
2129
2130static ssize_t consistency_check_show(struct device *dev,
2131 struct device_attribute *attr, char *buf)
2132{
2133 return rebuild_show(dev, attr, buf);
2134}
2135static DEVICE_ATTR_RW(consistency_check);
2136
2137static ssize_t ctlr_num_show(struct device *dev,
2138 struct device_attribute *attr, char *buf)
2139{
2140 struct Scsi_Host *shost = class_to_shost(dev);
2141 struct myrb_hba *cb = shost_priv(shost);
2142
2143 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2144}
2145static DEVICE_ATTR_RO(ctlr_num);
2146
2147static ssize_t firmware_show(struct device *dev,
2148 struct device_attribute *attr, char *buf)
2149{
2150 struct Scsi_Host *shost = class_to_shost(dev);
2151 struct myrb_hba *cb = shost_priv(shost);
2152
2153 return snprintf(buf, 16, "%s\n", cb->fw_version);
2154}
2155static DEVICE_ATTR_RO(firmware);
2156
2157static ssize_t model_show(struct device *dev,
2158 struct device_attribute *attr, char *buf)
2159{
2160 struct Scsi_Host *shost = class_to_shost(dev);
2161 struct myrb_hba *cb = shost_priv(shost);
2162
2163 return snprintf(buf, 16, "%s\n", cb->model_name);
2164}
2165static DEVICE_ATTR_RO(model);
2166
2167static ssize_t flush_cache_store(struct device *dev,
2168 struct device_attribute *attr, const char *buf, size_t count)
2169{
2170 struct Scsi_Host *shost = class_to_shost(dev);
2171 struct myrb_hba *cb = shost_priv(shost);
2172 unsigned short status;
2173
2174 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2175 if (status == MYRB_STATUS_SUCCESS) {
2176 shost_printk(KERN_INFO, shost,
2177 "Cache Flush Completed\n");
2178 return count;
2179 }
2180 shost_printk(KERN_INFO, shost,
2181 "Cache Flush Failed, status %x\n", status);
2182 return -EIO;
2183}
2184static DEVICE_ATTR_WO(flush_cache);
2185
2186static struct attribute *myrb_sdev_attrs[] = {
2187 &dev_attr_rebuild.attr,
2188 &dev_attr_consistency_check.attr,
2189 &dev_attr_raid_state.attr,
2190 &dev_attr_raid_level.attr,
2191 NULL,
2192};
2193
2194ATTRIBUTE_GROUPS(myrb_sdev);
2195
2196static struct attribute *myrb_shost_attrs[] = {
2197 &dev_attr_ctlr_num.attr,
2198 &dev_attr_model.attr,
2199 &dev_attr_firmware.attr,
2200 &dev_attr_flush_cache.attr,
2201 NULL,
2202};
2203
2204ATTRIBUTE_GROUPS(myrb_shost);
2205
2206static const struct scsi_host_template myrb_template = {
2207 .module = THIS_MODULE,
2208 .name = "DAC960",
2209 .proc_name = "myrb",
2210 .queuecommand = myrb_queuecommand,
2211 .eh_host_reset_handler = myrb_host_reset,
2212 .slave_alloc = myrb_slave_alloc,
2213 .slave_configure = myrb_slave_configure,
2214 .slave_destroy = myrb_slave_destroy,
2215 .bios_param = myrb_biosparam,
2216 .cmd_size = sizeof(struct myrb_cmdblk),
2217 .shost_groups = myrb_shost_groups,
2218 .sdev_groups = myrb_sdev_groups,
2219 .this_id = -1,
2220};
2221
2222/**
2223 * myrb_is_raid - return boolean indicating device is raid volume
2224 * @dev: the device struct object
2225 */
2226static int myrb_is_raid(struct device *dev)
2227{
2228 struct scsi_device *sdev = to_scsi_device(dev);
2229
2230 return sdev->channel == myrb_logical_channel(sdev->host);
2231}
2232
2233/**
2234 * myrb_get_resync - get raid volume resync percent complete
2235 * @dev: the device struct object
2236 */
2237static void myrb_get_resync(struct device *dev)
2238{
2239 struct scsi_device *sdev = to_scsi_device(dev);
2240 struct myrb_hba *cb = shost_priv(sdev->host);
2241 struct myrb_rbld_progress rbld_buf;
2242 unsigned int percent_complete = 0;
2243 unsigned short status;
2244 unsigned int ldev_size = 0, remaining = 0;
2245
2246 if (sdev->channel < myrb_logical_channel(sdev->host))
2247 return;
2248 status = myrb_get_rbld_progress(cb, &rbld_buf);
2249 if (status == MYRB_STATUS_SUCCESS) {
2250 if (rbld_buf.ldev_num == sdev->id) {
2251 ldev_size = rbld_buf.ldev_size;
2252 remaining = rbld_buf.blocks_left;
2253 }
2254 }
2255 if (remaining && ldev_size)
2256 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2257 raid_set_resync(myrb_raid_template, dev, percent_complete);
2258}
2259
2260/**
2261 * myrb_get_state - get raid volume status
2262 * @dev: the device struct object
2263 */
2264static void myrb_get_state(struct device *dev)
2265{
2266 struct scsi_device *sdev = to_scsi_device(dev);
2267 struct myrb_hba *cb = shost_priv(sdev->host);
2268 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2269 enum raid_state state = RAID_STATE_UNKNOWN;
2270 unsigned short status;
2271
2272 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2273 state = RAID_STATE_UNKNOWN;
2274 else {
2275 status = myrb_get_rbld_progress(cb, NULL);
2276 if (status == MYRB_STATUS_SUCCESS)
2277 state = RAID_STATE_RESYNCING;
2278 else {
2279 switch (ldev_info->state) {
2280 case MYRB_DEVICE_ONLINE:
2281 state = RAID_STATE_ACTIVE;
2282 break;
2283 case MYRB_DEVICE_WO:
2284 case MYRB_DEVICE_CRITICAL:
2285 state = RAID_STATE_DEGRADED;
2286 break;
2287 default:
2288 state = RAID_STATE_OFFLINE;
2289 }
2290 }
2291 }
2292 raid_set_state(myrb_raid_template, dev, state);
2293}
2294
2295static struct raid_function_template myrb_raid_functions = {
2296 .cookie = &myrb_template,
2297 .is_raid = myrb_is_raid,
2298 .get_resync = myrb_get_resync,
2299 .get_state = myrb_get_state,
2300};
2301
2302static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2303 struct scsi_cmnd *scmd)
2304{
2305 unsigned short status;
2306
2307 if (!cmd_blk)
2308 return;
2309
2310 scsi_dma_unmap(scmd);
2311
2312 if (cmd_blk->dcdb) {
2313 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2314 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2315 cmd_blk->dcdb_addr);
2316 cmd_blk->dcdb = NULL;
2317 }
2318 if (cmd_blk->sgl) {
2319 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2320 cmd_blk->sgl = NULL;
2321 cmd_blk->sgl_addr = 0;
2322 }
2323 status = cmd_blk->status;
2324 switch (status) {
2325 case MYRB_STATUS_SUCCESS:
2326 case MYRB_STATUS_DEVICE_BUSY:
2327 scmd->result = (DID_OK << 16) | status;
2328 break;
2329 case MYRB_STATUS_BAD_DATA:
2330 dev_dbg(&scmd->device->sdev_gendev,
2331 "Bad Data Encountered\n");
2332 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2333 /* Unrecovered read error */
2334 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2335 else
2336 /* Write error */
2337 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2338 break;
2339 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2340 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2341 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2342 /* Unrecovered read error, auto-reallocation failed */
2343 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2344 else
2345 /* Write error, auto-reallocation failed */
2346 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2347 break;
2348 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2349 dev_dbg(&scmd->device->sdev_gendev,
2350 "Logical Drive Nonexistent or Offline");
2351 scmd->result = (DID_BAD_TARGET << 16);
2352 break;
2353 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2354 dev_dbg(&scmd->device->sdev_gendev,
2355 "Attempt to Access Beyond End of Logical Drive");
2356 /* Logical block address out of range */
2357 scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2358 break;
2359 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2360 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2361 scmd->result = (DID_BAD_TARGET << 16);
2362 break;
2363 default:
2364 scmd_printk(KERN_ERR, scmd,
2365 "Unexpected Error Status %04X", status);
2366 scmd->result = (DID_ERROR << 16);
2367 break;
2368 }
2369 scsi_done(scmd);
2370}
2371
2372static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2373{
2374 if (!cmd_blk)
2375 return;
2376
2377 if (cmd_blk->completion) {
2378 complete(cmd_blk->completion);
2379 cmd_blk->completion = NULL;
2380 }
2381}
2382
2383static void myrb_monitor(struct work_struct *work)
2384{
2385 struct myrb_hba *cb = container_of(work,
2386 struct myrb_hba, monitor_work.work);
2387 struct Scsi_Host *shost = cb->host;
2388 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2389
2390 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2391
2392 if (cb->new_ev_seq > cb->old_ev_seq) {
2393 int event = cb->old_ev_seq;
2394
2395 dev_dbg(&shost->shost_gendev,
2396 "get event log no %d/%d\n",
2397 cb->new_ev_seq, event);
2398 myrb_get_event(cb, event);
2399 cb->old_ev_seq = event + 1;
2400 interval = 10;
2401 } else if (cb->need_err_info) {
2402 cb->need_err_info = false;
2403 dev_dbg(&shost->shost_gendev, "get error table\n");
2404 myrb_get_errtable(cb);
2405 interval = 10;
2406 } else if (cb->need_rbld && cb->rbld_first) {
2407 cb->need_rbld = false;
2408 dev_dbg(&shost->shost_gendev,
2409 "get rebuild progress\n");
2410 myrb_update_rbld_progress(cb);
2411 interval = 10;
2412 } else if (cb->need_ldev_info) {
2413 cb->need_ldev_info = false;
2414 dev_dbg(&shost->shost_gendev,
2415 "get logical drive info\n");
2416 myrb_get_ldev_info(cb);
2417 interval = 10;
2418 } else if (cb->need_rbld) {
2419 cb->need_rbld = false;
2420 dev_dbg(&shost->shost_gendev,
2421 "get rebuild progress\n");
2422 myrb_update_rbld_progress(cb);
2423 interval = 10;
2424 } else if (cb->need_cc_status) {
2425 cb->need_cc_status = false;
2426 dev_dbg(&shost->shost_gendev,
2427 "get consistency check progress\n");
2428 myrb_get_cc_progress(cb);
2429 interval = 10;
2430 } else if (cb->need_bgi_status) {
2431 cb->need_bgi_status = false;
2432 dev_dbg(&shost->shost_gendev, "get background init status\n");
2433 myrb_bgi_control(cb);
2434 interval = 10;
2435 } else {
2436 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2437 mutex_lock(&cb->dma_mutex);
2438 myrb_hba_enquiry(cb);
2439 mutex_unlock(&cb->dma_mutex);
2440 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2441 cb->need_err_info || cb->need_rbld ||
2442 cb->need_ldev_info || cb->need_cc_status ||
2443 cb->need_bgi_status) {
2444 dev_dbg(&shost->shost_gendev,
2445 "reschedule monitor\n");
2446 interval = 0;
2447 }
2448 }
2449 if (interval > 1)
2450 cb->primary_monitor_time = jiffies;
2451 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2452}
2453
2454/*
2455 * myrb_err_status - reports controller BIOS messages
2456 *
2457 * Controller BIOS messages are passed through the Error Status Register
2458 * when the driver performs the BIOS handshaking.
2459 *
2460 * Return: true for fatal errors and false otherwise.
2461 */
2462static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2463 unsigned char parm0, unsigned char parm1)
2464{
2465 struct pci_dev *pdev = cb->pdev;
2466
2467 switch (error) {
2468 case 0x00:
2469 dev_info(&pdev->dev,
2470 "Physical Device %d:%d Not Responding\n",
2471 parm1, parm0);
2472 break;
2473 case 0x08:
2474 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2475 break;
2476 case 0x30:
2477 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2478 break;
2479 case 0x60:
2480 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2481 break;
2482 case 0x70:
2483 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2484 break;
2485 case 0x90:
2486 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2487 parm1, parm0);
2488 break;
2489 case 0xA0:
2490 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2491 break;
2492 case 0xB0:
2493 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2494 break;
2495 case 0xD0:
2496 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2497 break;
2498 case 0xF0:
2499 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2500 return true;
2501 default:
2502 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2503 error);
2504 return true;
2505 }
2506 return false;
2507}
2508
2509/*
2510 * Hardware-specific functions
2511 */
2512
2513/*
2514 * DAC960 LA Series Controllers
2515 */
2516
2517static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2518{
2519 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2520}
2521
2522static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2523{
2524 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2525}
2526
2527static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2528{
2529 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2530}
2531
2532static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2533{
2534 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2535}
2536
2537static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2538{
2539 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2540
2541 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2542}
2543
2544static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2545{
2546 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2547
2548 return !(idb & DAC960_LA_IDB_INIT_DONE);
2549}
2550
2551static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2552{
2553 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2554}
2555
2556static inline void DAC960_LA_ack_intr(void __iomem *base)
2557{
2558 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2559 base + DAC960_LA_ODB_OFFSET);
2560}
2561
2562static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2563{
2564 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2565
2566 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2567}
2568
2569static inline void DAC960_LA_enable_intr(void __iomem *base)
2570{
2571 unsigned char odb = 0xFF;
2572
2573 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2574 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2575}
2576
2577static inline void DAC960_LA_disable_intr(void __iomem *base)
2578{
2579 unsigned char odb = 0xFF;
2580
2581 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2582 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2583}
2584
2585static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2586 union myrb_cmd_mbox *mbox)
2587{
2588 mem_mbox->words[1] = mbox->words[1];
2589 mem_mbox->words[2] = mbox->words[2];
2590 mem_mbox->words[3] = mbox->words[3];
2591 /* Memory barrier to prevent reordering */
2592 wmb();
2593 mem_mbox->words[0] = mbox->words[0];
2594 /* Memory barrier to force PCI access */
2595 mb();
2596}
2597
2598static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2599 union myrb_cmd_mbox *mbox)
2600{
2601 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2602 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2603 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2604 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2605}
2606
2607static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2608{
2609 return readw(base + DAC960_LA_STS_OFFSET);
2610}
2611
2612static inline bool
2613DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2614 unsigned char *param0, unsigned char *param1)
2615{
2616 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2617
2618 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2619 return false;
2620 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2621
2622 *error = errsts;
2623 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2624 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2625 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2626 return true;
2627}
2628
2629static inline unsigned short
2630DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2631 union myrb_cmd_mbox *mbox)
2632{
2633 unsigned short status;
2634 int timeout = 0;
2635
2636 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2637 if (!DAC960_LA_hw_mbox_is_full(base))
2638 break;
2639 udelay(10);
2640 timeout++;
2641 }
2642 if (DAC960_LA_hw_mbox_is_full(base)) {
2643 dev_err(&pdev->dev,
2644 "Timeout waiting for empty mailbox\n");
2645 return MYRB_STATUS_SUBSYS_TIMEOUT;
2646 }
2647 DAC960_LA_write_hw_mbox(base, mbox);
2648 DAC960_LA_hw_mbox_new_cmd(base);
2649 timeout = 0;
2650 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2651 if (DAC960_LA_hw_mbox_status_available(base))
2652 break;
2653 udelay(10);
2654 timeout++;
2655 }
2656 if (!DAC960_LA_hw_mbox_status_available(base)) {
2657 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2658 return MYRB_STATUS_SUBSYS_TIMEOUT;
2659 }
2660 status = DAC960_LA_read_status(base);
2661 DAC960_LA_ack_hw_mbox_intr(base);
2662 DAC960_LA_ack_hw_mbox_status(base);
2663
2664 return status;
2665}
2666
2667static int DAC960_LA_hw_init(struct pci_dev *pdev,
2668 struct myrb_hba *cb, void __iomem *base)
2669{
2670 int timeout = 0;
2671 unsigned char error, parm0, parm1;
2672
2673 DAC960_LA_disable_intr(base);
2674 DAC960_LA_ack_hw_mbox_status(base);
2675 udelay(1000);
2676 while (DAC960_LA_init_in_progress(base) &&
2677 timeout < MYRB_MAILBOX_TIMEOUT) {
2678 if (DAC960_LA_read_error_status(base, &error,
2679 &parm0, &parm1) &&
2680 myrb_err_status(cb, error, parm0, parm1))
2681 return -ENODEV;
2682 udelay(10);
2683 timeout++;
2684 }
2685 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2686 dev_err(&pdev->dev,
2687 "Timeout waiting for Controller Initialisation\n");
2688 return -ETIMEDOUT;
2689 }
2690 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2691 dev_err(&pdev->dev,
2692 "Unable to Enable Memory Mailbox Interface\n");
2693 DAC960_LA_reset_ctrl(base);
2694 return -ENODEV;
2695 }
2696 DAC960_LA_enable_intr(base);
2697 cb->qcmd = myrb_qcmd;
2698 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2699 if (cb->dual_mode_interface)
2700 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2701 else
2702 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2703 cb->disable_intr = DAC960_LA_disable_intr;
2704 cb->reset = DAC960_LA_reset_ctrl;
2705
2706 return 0;
2707}
2708
2709static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2710{
2711 struct myrb_hba *cb = arg;
2712 void __iomem *base = cb->io_base;
2713 struct myrb_stat_mbox *next_stat_mbox;
2714 unsigned long flags;
2715
2716 spin_lock_irqsave(&cb->queue_lock, flags);
2717 DAC960_LA_ack_intr(base);
2718 next_stat_mbox = cb->next_stat_mbox;
2719 while (next_stat_mbox->valid) {
2720 unsigned char id = next_stat_mbox->id;
2721 struct scsi_cmnd *scmd = NULL;
2722 struct myrb_cmdblk *cmd_blk = NULL;
2723
2724 if (id == MYRB_DCMD_TAG)
2725 cmd_blk = &cb->dcmd_blk;
2726 else if (id == MYRB_MCMD_TAG)
2727 cmd_blk = &cb->mcmd_blk;
2728 else {
2729 scmd = scsi_host_find_tag(cb->host, id - 3);
2730 if (scmd)
2731 cmd_blk = scsi_cmd_priv(scmd);
2732 }
2733 if (cmd_blk)
2734 cmd_blk->status = next_stat_mbox->status;
2735 else
2736 dev_err(&cb->pdev->dev,
2737 "Unhandled command completion %d\n", id);
2738
2739 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2740 if (++next_stat_mbox > cb->last_stat_mbox)
2741 next_stat_mbox = cb->first_stat_mbox;
2742
2743 if (cmd_blk) {
2744 if (id < 3)
2745 myrb_handle_cmdblk(cb, cmd_blk);
2746 else
2747 myrb_handle_scsi(cb, cmd_blk, scmd);
2748 }
2749 }
2750 cb->next_stat_mbox = next_stat_mbox;
2751 spin_unlock_irqrestore(&cb->queue_lock, flags);
2752 return IRQ_HANDLED;
2753}
2754
2755static struct myrb_privdata DAC960_LA_privdata = {
2756 .hw_init = DAC960_LA_hw_init,
2757 .irq_handler = DAC960_LA_intr_handler,
2758 .mmio_size = DAC960_LA_mmio_size,
2759};
2760
2761/*
2762 * DAC960 PG Series Controllers
2763 */
2764static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2765{
2766 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2767}
2768
2769static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2770{
2771 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2772}
2773
2774static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2775{
2776 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2777}
2778
2779static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2780{
2781 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2782}
2783
2784static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2785{
2786 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2787
2788 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2789}
2790
2791static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2792{
2793 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2794
2795 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2796}
2797
2798static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2799{
2800 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2801}
2802
2803static inline void DAC960_PG_ack_intr(void __iomem *base)
2804{
2805 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2806 base + DAC960_PG_ODB_OFFSET);
2807}
2808
2809static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2810{
2811 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2812
2813 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2814}
2815
2816static inline void DAC960_PG_enable_intr(void __iomem *base)
2817{
2818 unsigned int imask = (unsigned int)-1;
2819
2820 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2821 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2822}
2823
2824static inline void DAC960_PG_disable_intr(void __iomem *base)
2825{
2826 unsigned int imask = (unsigned int)-1;
2827
2828 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2829}
2830
2831static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2832 union myrb_cmd_mbox *mbox)
2833{
2834 mem_mbox->words[1] = mbox->words[1];
2835 mem_mbox->words[2] = mbox->words[2];
2836 mem_mbox->words[3] = mbox->words[3];
2837 /* Memory barrier to prevent reordering */
2838 wmb();
2839 mem_mbox->words[0] = mbox->words[0];
2840 /* Memory barrier to force PCI access */
2841 mb();
2842}
2843
2844static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2845 union myrb_cmd_mbox *mbox)
2846{
2847 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2848 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2849 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2850 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2851}
2852
2853static inline unsigned short
2854DAC960_PG_read_status(void __iomem *base)
2855{
2856 return readw(base + DAC960_PG_STS_OFFSET);
2857}
2858
2859static inline bool
2860DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2861 unsigned char *param0, unsigned char *param1)
2862{
2863 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2864
2865 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2866 return false;
2867 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2868 *error = errsts;
2869 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2870 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2871 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2872 return true;
2873}
2874
2875static inline unsigned short
2876DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2877 union myrb_cmd_mbox *mbox)
2878{
2879 unsigned short status;
2880 int timeout = 0;
2881
2882 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2883 if (!DAC960_PG_hw_mbox_is_full(base))
2884 break;
2885 udelay(10);
2886 timeout++;
2887 }
2888 if (DAC960_PG_hw_mbox_is_full(base)) {
2889 dev_err(&pdev->dev,
2890 "Timeout waiting for empty mailbox\n");
2891 return MYRB_STATUS_SUBSYS_TIMEOUT;
2892 }
2893 DAC960_PG_write_hw_mbox(base, mbox);
2894 DAC960_PG_hw_mbox_new_cmd(base);
2895
2896 timeout = 0;
2897 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2898 if (DAC960_PG_hw_mbox_status_available(base))
2899 break;
2900 udelay(10);
2901 timeout++;
2902 }
2903 if (!DAC960_PG_hw_mbox_status_available(base)) {
2904 dev_err(&pdev->dev,
2905 "Timeout waiting for mailbox status\n");
2906 return MYRB_STATUS_SUBSYS_TIMEOUT;
2907 }
2908 status = DAC960_PG_read_status(base);
2909 DAC960_PG_ack_hw_mbox_intr(base);
2910 DAC960_PG_ack_hw_mbox_status(base);
2911
2912 return status;
2913}
2914
2915static int DAC960_PG_hw_init(struct pci_dev *pdev,
2916 struct myrb_hba *cb, void __iomem *base)
2917{
2918 int timeout = 0;
2919 unsigned char error, parm0, parm1;
2920
2921 DAC960_PG_disable_intr(base);
2922 DAC960_PG_ack_hw_mbox_status(base);
2923 udelay(1000);
2924 while (DAC960_PG_init_in_progress(base) &&
2925 timeout < MYRB_MAILBOX_TIMEOUT) {
2926 if (DAC960_PG_read_error_status(base, &error,
2927 &parm0, &parm1) &&
2928 myrb_err_status(cb, error, parm0, parm1))
2929 return -EIO;
2930 udelay(10);
2931 timeout++;
2932 }
2933 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2934 dev_err(&pdev->dev,
2935 "Timeout waiting for Controller Initialisation\n");
2936 return -ETIMEDOUT;
2937 }
2938 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2939 dev_err(&pdev->dev,
2940 "Unable to Enable Memory Mailbox Interface\n");
2941 DAC960_PG_reset_ctrl(base);
2942 return -ENODEV;
2943 }
2944 DAC960_PG_enable_intr(base);
2945 cb->qcmd = myrb_qcmd;
2946 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2947 if (cb->dual_mode_interface)
2948 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2949 else
2950 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2951 cb->disable_intr = DAC960_PG_disable_intr;
2952 cb->reset = DAC960_PG_reset_ctrl;
2953
2954 return 0;
2955}
2956
2957static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2958{
2959 struct myrb_hba *cb = arg;
2960 void __iomem *base = cb->io_base;
2961 struct myrb_stat_mbox *next_stat_mbox;
2962 unsigned long flags;
2963
2964 spin_lock_irqsave(&cb->queue_lock, flags);
2965 DAC960_PG_ack_intr(base);
2966 next_stat_mbox = cb->next_stat_mbox;
2967 while (next_stat_mbox->valid) {
2968 unsigned char id = next_stat_mbox->id;
2969 struct scsi_cmnd *scmd = NULL;
2970 struct myrb_cmdblk *cmd_blk = NULL;
2971
2972 if (id == MYRB_DCMD_TAG)
2973 cmd_blk = &cb->dcmd_blk;
2974 else if (id == MYRB_MCMD_TAG)
2975 cmd_blk = &cb->mcmd_blk;
2976 else {
2977 scmd = scsi_host_find_tag(cb->host, id - 3);
2978 if (scmd)
2979 cmd_blk = scsi_cmd_priv(scmd);
2980 }
2981 if (cmd_blk)
2982 cmd_blk->status = next_stat_mbox->status;
2983 else
2984 dev_err(&cb->pdev->dev,
2985 "Unhandled command completion %d\n", id);
2986
2987 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2988 if (++next_stat_mbox > cb->last_stat_mbox)
2989 next_stat_mbox = cb->first_stat_mbox;
2990
2991 if (id < 3)
2992 myrb_handle_cmdblk(cb, cmd_blk);
2993 else
2994 myrb_handle_scsi(cb, cmd_blk, scmd);
2995 }
2996 cb->next_stat_mbox = next_stat_mbox;
2997 spin_unlock_irqrestore(&cb->queue_lock, flags);
2998 return IRQ_HANDLED;
2999}
3000
3001static struct myrb_privdata DAC960_PG_privdata = {
3002 .hw_init = DAC960_PG_hw_init,
3003 .irq_handler = DAC960_PG_intr_handler,
3004 .mmio_size = DAC960_PG_mmio_size,
3005};
3006
3007
3008/*
3009 * DAC960 PD Series Controllers
3010 */
3011
3012static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3013{
3014 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3015}
3016
3017static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3018{
3019 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3020}
3021
3022static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3023{
3024 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3025}
3026
3027static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3028{
3029 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3030
3031 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3032}
3033
3034static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3035{
3036 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3037
3038 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3039}
3040
3041static inline void DAC960_PD_ack_intr(void __iomem *base)
3042{
3043 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3044}
3045
3046static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3047{
3048 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3049
3050 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3051}
3052
3053static inline void DAC960_PD_enable_intr(void __iomem *base)
3054{
3055 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3056}
3057
3058static inline void DAC960_PD_disable_intr(void __iomem *base)
3059{
3060 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3061}
3062
3063static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3064 union myrb_cmd_mbox *mbox)
3065{
3066 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3067 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3068 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3069 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3070}
3071
3072static inline unsigned char
3073DAC960_PD_read_status_cmd_ident(void __iomem *base)
3074{
3075 return readb(base + DAC960_PD_STSID_OFFSET);
3076}
3077
3078static inline unsigned short
3079DAC960_PD_read_status(void __iomem *base)
3080{
3081 return readw(base + DAC960_PD_STS_OFFSET);
3082}
3083
3084static inline bool
3085DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3086 unsigned char *param0, unsigned char *param1)
3087{
3088 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3089
3090 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3091 return false;
3092 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3093 *error = errsts;
3094 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3095 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3096 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3097 return true;
3098}
3099
3100static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3101{
3102 void __iomem *base = cb->io_base;
3103 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3104
3105 while (DAC960_PD_hw_mbox_is_full(base))
3106 udelay(1);
3107 DAC960_PD_write_cmd_mbox(base, mbox);
3108 DAC960_PD_hw_mbox_new_cmd(base);
3109}
3110
3111static int DAC960_PD_hw_init(struct pci_dev *pdev,
3112 struct myrb_hba *cb, void __iomem *base)
3113{
3114 int timeout = 0;
3115 unsigned char error, parm0, parm1;
3116
3117 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3118 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3119 (unsigned long)cb->io_addr);
3120 return -EBUSY;
3121 }
3122 DAC960_PD_disable_intr(base);
3123 DAC960_PD_ack_hw_mbox_status(base);
3124 udelay(1000);
3125 while (DAC960_PD_init_in_progress(base) &&
3126 timeout < MYRB_MAILBOX_TIMEOUT) {
3127 if (DAC960_PD_read_error_status(base, &error,
3128 &parm0, &parm1) &&
3129 myrb_err_status(cb, error, parm0, parm1))
3130 return -EIO;
3131 udelay(10);
3132 timeout++;
3133 }
3134 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3135 dev_err(&pdev->dev,
3136 "Timeout waiting for Controller Initialisation\n");
3137 return -ETIMEDOUT;
3138 }
3139 if (!myrb_enable_mmio(cb, NULL)) {
3140 dev_err(&pdev->dev,
3141 "Unable to Enable Memory Mailbox Interface\n");
3142 DAC960_PD_reset_ctrl(base);
3143 return -ENODEV;
3144 }
3145 DAC960_PD_enable_intr(base);
3146 cb->qcmd = DAC960_PD_qcmd;
3147 cb->disable_intr = DAC960_PD_disable_intr;
3148 cb->reset = DAC960_PD_reset_ctrl;
3149
3150 return 0;
3151}
3152
3153static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3154{
3155 struct myrb_hba *cb = arg;
3156 void __iomem *base = cb->io_base;
3157 unsigned long flags;
3158
3159 spin_lock_irqsave(&cb->queue_lock, flags);
3160 while (DAC960_PD_hw_mbox_status_available(base)) {
3161 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3162 struct scsi_cmnd *scmd = NULL;
3163 struct myrb_cmdblk *cmd_blk = NULL;
3164
3165 if (id == MYRB_DCMD_TAG)
3166 cmd_blk = &cb->dcmd_blk;
3167 else if (id == MYRB_MCMD_TAG)
3168 cmd_blk = &cb->mcmd_blk;
3169 else {
3170 scmd = scsi_host_find_tag(cb->host, id - 3);
3171 if (scmd)
3172 cmd_blk = scsi_cmd_priv(scmd);
3173 }
3174 if (cmd_blk)
3175 cmd_blk->status = DAC960_PD_read_status(base);
3176 else
3177 dev_err(&cb->pdev->dev,
3178 "Unhandled command completion %d\n", id);
3179
3180 DAC960_PD_ack_intr(base);
3181 DAC960_PD_ack_hw_mbox_status(base);
3182
3183 if (id < 3)
3184 myrb_handle_cmdblk(cb, cmd_blk);
3185 else
3186 myrb_handle_scsi(cb, cmd_blk, scmd);
3187 }
3188 spin_unlock_irqrestore(&cb->queue_lock, flags);
3189 return IRQ_HANDLED;
3190}
3191
3192static struct myrb_privdata DAC960_PD_privdata = {
3193 .hw_init = DAC960_PD_hw_init,
3194 .irq_handler = DAC960_PD_intr_handler,
3195 .mmio_size = DAC960_PD_mmio_size,
3196};
3197
3198
3199/*
3200 * DAC960 P Series Controllers
3201 *
3202 * Similar to the DAC960 PD Series Controllers, but some commands have
3203 * to be translated.
3204 */
3205
3206static inline void myrb_translate_enquiry(void *enq)
3207{
3208 memcpy(enq + 132, enq + 36, 64);
3209 memset(enq + 36, 0, 96);
3210}
3211
3212static inline void myrb_translate_devstate(void *state)
3213{
3214 memcpy(state + 2, state + 3, 1);
3215 memmove(state + 4, state + 5, 2);
3216 memmove(state + 6, state + 8, 4);
3217}
3218
3219static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3220{
3221 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3222 int ldev_num = mbox->type5.ld.ldev_num;
3223
3224 mbox->bytes[3] &= 0x7;
3225 mbox->bytes[3] |= mbox->bytes[7] << 6;
3226 mbox->bytes[7] = ldev_num;
3227}
3228
3229static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3230{
3231 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3232 int ldev_num = mbox->bytes[7];
3233
3234 mbox->bytes[7] = mbox->bytes[3] >> 6;
3235 mbox->bytes[3] &= 0x7;
3236 mbox->bytes[3] |= ldev_num << 3;
3237}
3238
3239static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3240{
3241 void __iomem *base = cb->io_base;
3242 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3243
3244 switch (mbox->common.opcode) {
3245 case MYRB_CMD_ENQUIRY:
3246 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3247 break;
3248 case MYRB_CMD_GET_DEVICE_STATE:
3249 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3250 break;
3251 case MYRB_CMD_READ:
3252 mbox->common.opcode = MYRB_CMD_READ_OLD;
3253 myrb_translate_to_rw_command(cmd_blk);
3254 break;
3255 case MYRB_CMD_WRITE:
3256 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3257 myrb_translate_to_rw_command(cmd_blk);
3258 break;
3259 case MYRB_CMD_READ_SG:
3260 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3261 myrb_translate_to_rw_command(cmd_blk);
3262 break;
3263 case MYRB_CMD_WRITE_SG:
3264 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3265 myrb_translate_to_rw_command(cmd_blk);
3266 break;
3267 default:
3268 break;
3269 }
3270 while (DAC960_PD_hw_mbox_is_full(base))
3271 udelay(1);
3272 DAC960_PD_write_cmd_mbox(base, mbox);
3273 DAC960_PD_hw_mbox_new_cmd(base);
3274}
3275
3276
3277static int DAC960_P_hw_init(struct pci_dev *pdev,
3278 struct myrb_hba *cb, void __iomem *base)
3279{
3280 int timeout = 0;
3281 unsigned char error, parm0, parm1;
3282
3283 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3284 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3285 (unsigned long)cb->io_addr);
3286 return -EBUSY;
3287 }
3288 DAC960_PD_disable_intr(base);
3289 DAC960_PD_ack_hw_mbox_status(base);
3290 udelay(1000);
3291 while (DAC960_PD_init_in_progress(base) &&
3292 timeout < MYRB_MAILBOX_TIMEOUT) {
3293 if (DAC960_PD_read_error_status(base, &error,
3294 &parm0, &parm1) &&
3295 myrb_err_status(cb, error, parm0, parm1))
3296 return -EAGAIN;
3297 udelay(10);
3298 timeout++;
3299 }
3300 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3301 dev_err(&pdev->dev,
3302 "Timeout waiting for Controller Initialisation\n");
3303 return -ETIMEDOUT;
3304 }
3305 if (!myrb_enable_mmio(cb, NULL)) {
3306 dev_err(&pdev->dev,
3307 "Unable to allocate DMA mapped memory\n");
3308 DAC960_PD_reset_ctrl(base);
3309 return -ETIMEDOUT;
3310 }
3311 DAC960_PD_enable_intr(base);
3312 cb->qcmd = DAC960_P_qcmd;
3313 cb->disable_intr = DAC960_PD_disable_intr;
3314 cb->reset = DAC960_PD_reset_ctrl;
3315
3316 return 0;
3317}
3318
3319static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3320{
3321 struct myrb_hba *cb = arg;
3322 void __iomem *base = cb->io_base;
3323 unsigned long flags;
3324
3325 spin_lock_irqsave(&cb->queue_lock, flags);
3326 while (DAC960_PD_hw_mbox_status_available(base)) {
3327 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3328 struct scsi_cmnd *scmd = NULL;
3329 struct myrb_cmdblk *cmd_blk = NULL;
3330 union myrb_cmd_mbox *mbox;
3331 enum myrb_cmd_opcode op;
3332
3333
3334 if (id == MYRB_DCMD_TAG)
3335 cmd_blk = &cb->dcmd_blk;
3336 else if (id == MYRB_MCMD_TAG)
3337 cmd_blk = &cb->mcmd_blk;
3338 else {
3339 scmd = scsi_host_find_tag(cb->host, id - 3);
3340 if (scmd)
3341 cmd_blk = scsi_cmd_priv(scmd);
3342 }
3343 if (cmd_blk)
3344 cmd_blk->status = DAC960_PD_read_status(base);
3345 else
3346 dev_err(&cb->pdev->dev,
3347 "Unhandled command completion %d\n", id);
3348
3349 DAC960_PD_ack_intr(base);
3350 DAC960_PD_ack_hw_mbox_status(base);
3351
3352 if (!cmd_blk)
3353 continue;
3354
3355 mbox = &cmd_blk->mbox;
3356 op = mbox->common.opcode;
3357 switch (op) {
3358 case MYRB_CMD_ENQUIRY_OLD:
3359 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3360 myrb_translate_enquiry(cb->enquiry);
3361 break;
3362 case MYRB_CMD_READ_OLD:
3363 mbox->common.opcode = MYRB_CMD_READ;
3364 myrb_translate_from_rw_command(cmd_blk);
3365 break;
3366 case MYRB_CMD_WRITE_OLD:
3367 mbox->common.opcode = MYRB_CMD_WRITE;
3368 myrb_translate_from_rw_command(cmd_blk);
3369 break;
3370 case MYRB_CMD_READ_SG_OLD:
3371 mbox->common.opcode = MYRB_CMD_READ_SG;
3372 myrb_translate_from_rw_command(cmd_blk);
3373 break;
3374 case MYRB_CMD_WRITE_SG_OLD:
3375 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3376 myrb_translate_from_rw_command(cmd_blk);
3377 break;
3378 default:
3379 break;
3380 }
3381 if (id < 3)
3382 myrb_handle_cmdblk(cb, cmd_blk);
3383 else
3384 myrb_handle_scsi(cb, cmd_blk, scmd);
3385 }
3386 spin_unlock_irqrestore(&cb->queue_lock, flags);
3387 return IRQ_HANDLED;
3388}
3389
3390static struct myrb_privdata DAC960_P_privdata = {
3391 .hw_init = DAC960_P_hw_init,
3392 .irq_handler = DAC960_P_intr_handler,
3393 .mmio_size = DAC960_PD_mmio_size,
3394};
3395
3396static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3397 const struct pci_device_id *entry)
3398{
3399 struct myrb_privdata *privdata =
3400 (struct myrb_privdata *)entry->driver_data;
3401 irq_handler_t irq_handler = privdata->irq_handler;
3402 unsigned int mmio_size = privdata->mmio_size;
3403 struct Scsi_Host *shost;
3404 struct myrb_hba *cb = NULL;
3405
3406 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3407 if (!shost) {
3408 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3409 return NULL;
3410 }
3411 shost->max_cmd_len = 12;
3412 shost->max_lun = 256;
3413 cb = shost_priv(shost);
3414 mutex_init(&cb->dcmd_mutex);
3415 mutex_init(&cb->dma_mutex);
3416 cb->pdev = pdev;
3417 cb->host = shost;
3418
3419 if (pci_enable_device(pdev)) {
3420 dev_err(&pdev->dev, "Failed to enable PCI device\n");
3421 scsi_host_put(shost);
3422 return NULL;
3423 }
3424
3425 if (privdata->hw_init == DAC960_PD_hw_init ||
3426 privdata->hw_init == DAC960_P_hw_init) {
3427 cb->io_addr = pci_resource_start(pdev, 0);
3428 cb->pci_addr = pci_resource_start(pdev, 1);
3429 } else
3430 cb->pci_addr = pci_resource_start(pdev, 0);
3431
3432 pci_set_drvdata(pdev, cb);
3433 spin_lock_init(&cb->queue_lock);
3434 if (mmio_size < PAGE_SIZE)
3435 mmio_size = PAGE_SIZE;
3436 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3437 if (cb->mmio_base == NULL) {
3438 dev_err(&pdev->dev,
3439 "Unable to map Controller Register Window\n");
3440 goto failure;
3441 }
3442
3443 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3444 if (privdata->hw_init(pdev, cb, cb->io_base))
3445 goto failure;
3446
3447 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3448 dev_err(&pdev->dev,
3449 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3450 goto failure;
3451 }
3452 cb->irq = pdev->irq;
3453 return cb;
3454
3455failure:
3456 dev_err(&pdev->dev,
3457 "Failed to initialize Controller\n");
3458 myrb_cleanup(cb);
3459 return NULL;
3460}
3461
3462static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3463{
3464 struct myrb_hba *cb;
3465 int ret;
3466
3467 cb = myrb_detect(dev, entry);
3468 if (!cb)
3469 return -ENODEV;
3470
3471 ret = myrb_get_hba_config(cb);
3472 if (ret < 0) {
3473 myrb_cleanup(cb);
3474 return ret;
3475 }
3476
3477 if (!myrb_create_mempools(dev, cb)) {
3478 ret = -ENOMEM;
3479 goto failed;
3480 }
3481
3482 ret = scsi_add_host(cb->host, &dev->dev);
3483 if (ret) {
3484 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3485 myrb_destroy_mempools(cb);
3486 goto failed;
3487 }
3488 scsi_scan_host(cb->host);
3489 return 0;
3490failed:
3491 myrb_cleanup(cb);
3492 return ret;
3493}
3494
3495
3496static void myrb_remove(struct pci_dev *pdev)
3497{
3498 struct myrb_hba *cb = pci_get_drvdata(pdev);
3499
3500 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3501 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3502 myrb_cleanup(cb);
3503 myrb_destroy_mempools(cb);
3504}
3505
3506
3507static const struct pci_device_id myrb_id_table[] = {
3508 {
3509 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3510 PCI_DEVICE_ID_DEC_21285,
3511 PCI_VENDOR_ID_MYLEX,
3512 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3513 .driver_data = (unsigned long) &DAC960_LA_privdata,
3514 },
3515 {
3516 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3517 },
3518 {
3519 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3520 },
3521 {
3522 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3523 },
3524 {0, },
3525};
3526
3527MODULE_DEVICE_TABLE(pci, myrb_id_table);
3528
3529static struct pci_driver myrb_pci_driver = {
3530 .name = "myrb",
3531 .id_table = myrb_id_table,
3532 .probe = myrb_probe,
3533 .remove = myrb_remove,
3534};
3535
3536static int __init myrb_init_module(void)
3537{
3538 int ret;
3539
3540 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3541 if (!myrb_raid_template)
3542 return -ENODEV;
3543
3544 ret = pci_register_driver(&myrb_pci_driver);
3545 if (ret)
3546 raid_class_release(myrb_raid_template);
3547
3548 return ret;
3549}
3550
3551static void __exit myrb_cleanup_module(void)
3552{
3553 pci_unregister_driver(&myrb_pci_driver);
3554 raid_class_release(myrb_raid_template);
3555}
3556
3557module_init(myrb_init_module);
3558module_exit(myrb_cleanup_module);
3559
3560MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3561MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3562MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6 *
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/pci.h>
18#include <linux/raid_class.h>
19#include <asm/unaligned.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_host.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_tcq.h>
25#include "myrb.h"
26
27static struct raid_template *myrb_raid_template;
28
29static void myrb_monitor(struct work_struct *work);
30static inline void myrb_translate_devstate(void *DeviceState);
31
32static inline int myrb_logical_channel(struct Scsi_Host *shost)
33{
34 return shost->max_channel - 1;
35}
36
37static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
39 const char *name;
40} myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
47};
48
49static const char *myrb_devstate_name(enum myrb_devstate state)
50{
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 int i;
53
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
56 return entry[i].name;
57 }
58 return "Unknown";
59}
60
61static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
63 const char *name;
64} myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
71};
72
73static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74{
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 int i;
77
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
80 return entry[i].name;
81 }
82 return NULL;
83}
84
85/**
86 * myrb_create_mempools - allocates auxiliary data structures
87 *
88 * Return: true on success, false otherwise.
89 */
90static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91{
92 size_t elem_size, elem_align;
93
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
101 return false;
102 }
103
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
109 cb->sg_pool = NULL;
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
112 return false;
113 }
114
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 if (!cb->work_q) {
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
122 cb->sg_pool = NULL;
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
125 return false;
126 }
127
128 /*
129 * Initialize the Monitoring Timer.
130 */
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133
134 return true;
135}
136
137/**
138 * myrb_destroy_mempools - tears down the memory pools for the controller
139 */
140static void myrb_destroy_mempools(struct myrb_hba *cb)
141{
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
144
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
147}
148
149/**
150 * myrb_reset_cmd - reset command block
151 */
152static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153{
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 cmd_blk->status = 0;
158}
159
160/**
161 * myrb_qcmd - queues command block for execution
162 */
163static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164{
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
178}
179
180/**
181 * myrb_exec_cmd - executes command block and waits for completion.
182 *
183 * Return: command status
184 */
185static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
187{
188 DECLARE_COMPLETION_ONSTACK(cmpl);
189 unsigned long flags;
190
191 cmd_blk->completion = &cmpl;
192
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
196
197 WARN_ON(in_interrupt());
198 wait_for_completion(&cmpl);
199 return cmd_blk->status;
200}
201
202/**
203 * myrb_exec_type3 - executes a type 3 command and waits for completion.
204 *
205 * Return: command status
206 */
207static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 enum myrb_cmd_opcode op, dma_addr_t addr)
209{
210 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 unsigned short status;
213
214 mutex_lock(&cb->dcmd_mutex);
215 myrb_reset_cmd(cmd_blk);
216 mbox->type3.id = MYRB_DCMD_TAG;
217 mbox->type3.opcode = op;
218 mbox->type3.addr = addr;
219 status = myrb_exec_cmd(cb, cmd_blk);
220 mutex_unlock(&cb->dcmd_mutex);
221 return status;
222}
223
224/**
225 * myrb_exec_type3D - executes a type 3D command and waits for completion.
226 *
227 * Return: command status
228 */
229static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 struct myrb_pdev_state *pdev_info)
232{
233 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 unsigned short status;
236 dma_addr_t pdev_info_addr;
237
238 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 sizeof(struct myrb_pdev_state),
240 DMA_FROM_DEVICE);
241 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 return MYRB_STATUS_SUBSYS_FAILED;
243
244 mutex_lock(&cb->dcmd_mutex);
245 myrb_reset_cmd(cmd_blk);
246 mbox->type3D.id = MYRB_DCMD_TAG;
247 mbox->type3D.opcode = op;
248 mbox->type3D.channel = sdev->channel;
249 mbox->type3D.target = sdev->id;
250 mbox->type3D.addr = pdev_info_addr;
251 status = myrb_exec_cmd(cb, cmd_blk);
252 mutex_unlock(&cb->dcmd_mutex);
253 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 if (status == MYRB_STATUS_SUCCESS &&
256 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 myrb_translate_devstate(pdev_info);
258
259 return status;
260}
261
262static char *myrb_event_msg[] = {
263 "killed because write recovery failed",
264 "killed because of SCSI bus reset failure",
265 "killed because of double check condition",
266 "killed because it was removed",
267 "killed because of gross error on SCSI chip",
268 "killed because of bad tag returned from drive",
269 "killed because of timeout on SCSI command",
270 "killed because of reset SCSI command issued from system",
271 "killed because busy or parity error count exceeded limit",
272 "killed because of 'kill drive' command from system",
273 "killed because of selection timeout",
274 "killed due to SCSI phase sequence error",
275 "killed due to unknown status",
276};
277
278/**
279 * myrb_get_event - get event log from HBA
280 * @cb: pointer to the hba structure
281 * @event: number of the event
282 *
283 * Execute a type 3E command and logs the event message
284 */
285static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286{
287 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 struct myrb_log_entry *ev_buf;
290 dma_addr_t ev_addr;
291 unsigned short status;
292
293 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 sizeof(struct myrb_log_entry),
295 &ev_addr, GFP_KERNEL);
296 if (!ev_buf)
297 return;
298
299 myrb_reset_cmd(cmd_blk);
300 mbox->type3E.id = MYRB_MCMD_TAG;
301 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 mbox->type3E.opqual = 1;
304 mbox->type3E.ev_seq = event;
305 mbox->type3E.addr = ev_addr;
306 status = myrb_exec_cmd(cb, cmd_blk);
307 if (status != MYRB_STATUS_SUCCESS)
308 shost_printk(KERN_INFO, cb->host,
309 "Failed to get event log %d, status %04x\n",
310 event, status);
311
312 else if (ev_buf->seq_num == event) {
313 struct scsi_sense_hdr sshdr;
314
315 memset(&sshdr, 0, sizeof(sshdr));
316 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317
318 if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 sshdr.asc == 0x80 &&
320 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 shost_printk(KERN_CRIT, cb->host,
322 "Physical drive %d:%d: %s\n",
323 ev_buf->channel, ev_buf->target,
324 myrb_event_msg[sshdr.ascq]);
325 else
326 shost_printk(KERN_CRIT, cb->host,
327 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 ev_buf->channel, ev_buf->target,
329 sshdr.sense_key, sshdr.asc, sshdr.ascq);
330 }
331
332 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
333 ev_buf, ev_addr);
334}
335
336/**
337 * myrb_get_errtable - retrieves the error table from the controller
338 *
339 * Executes a type 3 command and logs the error table from the controller.
340 */
341static void myrb_get_errtable(struct myrb_hba *cb)
342{
343 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 unsigned short status;
346 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347
348 memcpy(&old_table, cb->err_table, sizeof(old_table));
349
350 myrb_reset_cmd(cmd_blk);
351 mbox->type3.id = MYRB_MCMD_TAG;
352 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 mbox->type3.addr = cb->err_table_addr;
354 status = myrb_exec_cmd(cb, cmd_blk);
355 if (status == MYRB_STATUS_SUCCESS) {
356 struct myrb_error_entry *table = cb->err_table;
357 struct myrb_error_entry *new, *old;
358 size_t err_table_offset;
359 struct scsi_device *sdev;
360
361 shost_for_each_device(sdev, cb->host) {
362 if (sdev->channel >= myrb_logical_channel(cb->host))
363 continue;
364 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 + sdev->id;
366 new = table + err_table_offset;
367 old = &old_table[err_table_offset];
368 if (new->parity_err == old->parity_err &&
369 new->soft_err == old->soft_err &&
370 new->hard_err == old->hard_err &&
371 new->misc_err == old->misc_err)
372 continue;
373 sdev_printk(KERN_CRIT, sdev,
374 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 new->parity_err, new->soft_err,
376 new->hard_err, new->misc_err);
377 }
378 }
379}
380
381/**
382 * myrb_get_ldev_info - retrieves the logical device table from the controller
383 *
384 * Executes a type 3 command and updates the logical device table.
385 *
386 * Return: command status
387 */
388static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389{
390 unsigned short status;
391 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 struct Scsi_Host *shost = cb->host;
393
394 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 cb->ldev_info_addr);
396 if (status != MYRB_STATUS_SUCCESS)
397 return status;
398
399 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 struct myrb_ldev_info *old = NULL;
401 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 struct scsi_device *sdev;
403
404 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
405 ldev_num, 0);
406 if (!sdev) {
407 if (new->state == MYRB_DEVICE_OFFLINE)
408 continue;
409 shost_printk(KERN_INFO, shost,
410 "Adding Logical Drive %d in state %s\n",
411 ldev_num, myrb_devstate_name(new->state));
412 scsi_add_device(shost, myrb_logical_channel(shost),
413 ldev_num, 0);
414 continue;
415 }
416 old = sdev->hostdata;
417 if (new->state != old->state)
418 shost_printk(KERN_INFO, shost,
419 "Logical Drive %d is now %s\n",
420 ldev_num, myrb_devstate_name(new->state));
421 if (new->wb_enabled != old->wb_enabled)
422 sdev_printk(KERN_INFO, sdev,
423 "Logical Drive is now WRITE %s\n",
424 (new->wb_enabled ? "BACK" : "THRU"));
425 memcpy(old, new, sizeof(*new));
426 scsi_device_put(sdev);
427 }
428 return status;
429}
430
431/**
432 * myrb_get_rbld_progress - get rebuild progress information
433 *
434 * Executes a type 3 command and returns the rebuild progress
435 * information.
436 *
437 * Return: command status
438 */
439static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 struct myrb_rbld_progress *rbld)
441{
442 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 struct myrb_rbld_progress *rbld_buf;
445 dma_addr_t rbld_addr;
446 unsigned short status;
447
448 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 sizeof(struct myrb_rbld_progress),
450 &rbld_addr, GFP_KERNEL);
451 if (!rbld_buf)
452 return MYRB_STATUS_RBLD_NOT_CHECKED;
453
454 myrb_reset_cmd(cmd_blk);
455 mbox->type3.id = MYRB_MCMD_TAG;
456 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 mbox->type3.addr = rbld_addr;
458 status = myrb_exec_cmd(cb, cmd_blk);
459 if (rbld)
460 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 rbld_buf, rbld_addr);
463 return status;
464}
465
466/**
467 * myrb_update_rbld_progress - updates the rebuild status
468 *
469 * Updates the rebuild status for the attached logical devices.
470 *
471 */
472static void myrb_update_rbld_progress(struct myrb_hba *cb)
473{
474 struct myrb_rbld_progress rbld_buf;
475 unsigned short status;
476
477 status = myrb_get_rbld_progress(cb, &rbld_buf);
478 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 status = MYRB_STATUS_RBLD_SUCCESS;
481 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 unsigned int blocks_done =
483 rbld_buf.ldev_size - rbld_buf.blocks_left;
484 struct scsi_device *sdev;
485
486 sdev = scsi_device_lookup(cb->host,
487 myrb_logical_channel(cb->host),
488 rbld_buf.ldev_num, 0);
489 if (!sdev)
490 return;
491
492 switch (status) {
493 case MYRB_STATUS_SUCCESS:
494 sdev_printk(KERN_INFO, sdev,
495 "Rebuild in Progress, %d%% completed\n",
496 (100 * (blocks_done >> 7))
497 / (rbld_buf.ldev_size >> 7));
498 break;
499 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 sdev_printk(KERN_INFO, sdev,
501 "Rebuild Failed due to Logical Drive Failure\n");
502 break;
503 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 sdev_printk(KERN_INFO, sdev,
505 "Rebuild Failed due to Bad Blocks on Other Drives\n");
506 break;
507 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 sdev_printk(KERN_INFO, sdev,
509 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
510 break;
511 case MYRB_STATUS_RBLD_SUCCESS:
512 sdev_printk(KERN_INFO, sdev,
513 "Rebuild Completed Successfully\n");
514 break;
515 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 sdev_printk(KERN_INFO, sdev,
517 "Rebuild Successfully Terminated\n");
518 break;
519 default:
520 break;
521 }
522 scsi_device_put(sdev);
523 }
524 cb->last_rbld_status = status;
525}
526
527/**
528 * myrb_get_cc_progress - retrieve the rebuild status
529 *
530 * Execute a type 3 Command and fetch the rebuild / consistency check
531 * status.
532 */
533static void myrb_get_cc_progress(struct myrb_hba *cb)
534{
535 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 struct myrb_rbld_progress *rbld_buf;
538 dma_addr_t rbld_addr;
539 unsigned short status;
540
541 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 sizeof(struct myrb_rbld_progress),
543 &rbld_addr, GFP_KERNEL);
544 if (!rbld_buf) {
545 cb->need_cc_status = true;
546 return;
547 }
548 myrb_reset_cmd(cmd_blk);
549 mbox->type3.id = MYRB_MCMD_TAG;
550 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 mbox->type3.addr = rbld_addr;
552 status = myrb_exec_cmd(cb, cmd_blk);
553 if (status == MYRB_STATUS_SUCCESS) {
554 unsigned int ldev_num = rbld_buf->ldev_num;
555 unsigned int ldev_size = rbld_buf->ldev_size;
556 unsigned int blocks_done =
557 ldev_size - rbld_buf->blocks_left;
558 struct scsi_device *sdev;
559
560 sdev = scsi_device_lookup(cb->host,
561 myrb_logical_channel(cb->host),
562 ldev_num, 0);
563 if (sdev) {
564 sdev_printk(KERN_INFO, sdev,
565 "Consistency Check in Progress: %d%% completed\n",
566 (100 * (blocks_done >> 7))
567 / (ldev_size >> 7));
568 scsi_device_put(sdev);
569 }
570 }
571 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 rbld_buf, rbld_addr);
573}
574
575/**
576 * myrb_bgi_control - updates background initialisation status
577 *
578 * Executes a type 3B command and updates the background initialisation status
579 */
580static void myrb_bgi_control(struct myrb_hba *cb)
581{
582 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 struct myrb_bgi_status *bgi, *last_bgi;
585 dma_addr_t bgi_addr;
586 struct scsi_device *sdev = NULL;
587 unsigned short status;
588
589 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 &bgi_addr, GFP_KERNEL);
591 if (!bgi) {
592 shost_printk(KERN_ERR, cb->host,
593 "Failed to allocate bgi memory\n");
594 return;
595 }
596 myrb_reset_cmd(cmd_blk);
597 mbox->type3B.id = MYRB_DCMD_TAG;
598 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 mbox->type3B.optype = 0x20;
600 mbox->type3B.addr = bgi_addr;
601 status = myrb_exec_cmd(cb, cmd_blk);
602 last_bgi = &cb->bgi_status;
603 sdev = scsi_device_lookup(cb->host,
604 myrb_logical_channel(cb->host),
605 bgi->ldev_num, 0);
606 switch (status) {
607 case MYRB_STATUS_SUCCESS:
608 switch (bgi->status) {
609 case MYRB_BGI_INVALID:
610 break;
611 case MYRB_BGI_STARTED:
612 if (!sdev)
613 break;
614 sdev_printk(KERN_INFO, sdev,
615 "Background Initialization Started\n");
616 break;
617 case MYRB_BGI_INPROGRESS:
618 if (!sdev)
619 break;
620 if (bgi->blocks_done == last_bgi->blocks_done &&
621 bgi->ldev_num == last_bgi->ldev_num)
622 break;
623 sdev_printk(KERN_INFO, sdev,
624 "Background Initialization in Progress: %d%% completed\n",
625 (100 * (bgi->blocks_done >> 7))
626 / (bgi->ldev_size >> 7));
627 break;
628 case MYRB_BGI_SUSPENDED:
629 if (!sdev)
630 break;
631 sdev_printk(KERN_INFO, sdev,
632 "Background Initialization Suspended\n");
633 break;
634 case MYRB_BGI_CANCELLED:
635 if (!sdev)
636 break;
637 sdev_printk(KERN_INFO, sdev,
638 "Background Initialization Cancelled\n");
639 break;
640 }
641 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642 break;
643 case MYRB_STATUS_BGI_SUCCESS:
644 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 sdev_printk(KERN_INFO, sdev,
646 "Background Initialization Completed Successfully\n");
647 cb->bgi_status.status = MYRB_BGI_INVALID;
648 break;
649 case MYRB_STATUS_BGI_ABORTED:
650 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 sdev_printk(KERN_INFO, sdev,
652 "Background Initialization Aborted\n");
653 fallthrough;
654 case MYRB_STATUS_NO_BGI_INPROGRESS:
655 cb->bgi_status.status = MYRB_BGI_INVALID;
656 break;
657 }
658 if (sdev)
659 scsi_device_put(sdev);
660 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
661 bgi, bgi_addr);
662}
663
664/**
665 * myrb_hba_enquiry - updates the controller status
666 *
667 * Executes a DAC_V1_Enquiry command and updates the controller status.
668 *
669 * Return: command status
670 */
671static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
672{
673 struct myrb_enquiry old, *new;
674 unsigned short status;
675
676 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
677
678 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 if (status != MYRB_STATUS_SUCCESS)
680 return status;
681
682 new = cb->enquiry;
683 if (new->ldev_count > old.ldev_count) {
684 int ldev_num = old.ldev_count - 1;
685
686 while (++ldev_num < new->ldev_count)
687 shost_printk(KERN_CRIT, cb->host,
688 "Logical Drive %d Now Exists\n",
689 ldev_num);
690 }
691 if (new->ldev_count < old.ldev_count) {
692 int ldev_num = new->ldev_count - 1;
693
694 while (++ldev_num < old.ldev_count)
695 shost_printk(KERN_CRIT, cb->host,
696 "Logical Drive %d No Longer Exists\n",
697 ldev_num);
698 }
699 if (new->status.deferred != old.status.deferred)
700 shost_printk(KERN_CRIT, cb->host,
701 "Deferred Write Error Flag is now %s\n",
702 (new->status.deferred ? "TRUE" : "FALSE"));
703 if (new->ev_seq != old.ev_seq) {
704 cb->new_ev_seq = new->ev_seq;
705 cb->need_err_info = true;
706 shost_printk(KERN_INFO, cb->host,
707 "Event log %d/%d (%d/%d) available\n",
708 cb->old_ev_seq, cb->new_ev_seq,
709 old.ev_seq, new->ev_seq);
710 }
711 if ((new->ldev_critical > 0 &&
712 new->ldev_critical != old.ldev_critical) ||
713 (new->ldev_offline > 0 &&
714 new->ldev_offline != old.ldev_offline) ||
715 (new->ldev_count != old.ldev_count)) {
716 shost_printk(KERN_INFO, cb->host,
717 "Logical drive count changed (%d/%d/%d)\n",
718 new->ldev_critical,
719 new->ldev_offline,
720 new->ldev_count);
721 cb->need_ldev_info = true;
722 }
723 if (new->pdev_dead > 0 ||
724 new->pdev_dead != old.pdev_dead ||
725 time_after_eq(jiffies, cb->secondary_monitor_time
726 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 cb->need_bgi_status = cb->bgi_status_supported;
728 cb->secondary_monitor_time = jiffies;
729 }
730 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 cb->need_rbld = true;
735 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
736 }
737 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
738 switch (new->rbld) {
739 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 shost_printk(KERN_INFO, cb->host,
741 "Consistency Check Completed Successfully\n");
742 break;
743 case MYRB_STDBY_RBLD_IN_PROGRESS:
744 case MYRB_BG_RBLD_IN_PROGRESS:
745 break;
746 case MYRB_BG_CHECK_IN_PROGRESS:
747 cb->need_cc_status = true;
748 break;
749 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 shost_printk(KERN_INFO, cb->host,
751 "Consistency Check Completed with Error\n");
752 break;
753 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 shost_printk(KERN_INFO, cb->host,
755 "Consistency Check Failed - Physical Device Failed\n");
756 break;
757 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 shost_printk(KERN_INFO, cb->host,
759 "Consistency Check Failed - Logical Drive Failed\n");
760 break;
761 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 shost_printk(KERN_INFO, cb->host,
763 "Consistency Check Failed - Other Causes\n");
764 break;
765 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 shost_printk(KERN_INFO, cb->host,
767 "Consistency Check Successfully Terminated\n");
768 break;
769 }
770 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 cb->need_cc_status = true;
772
773 return MYRB_STATUS_SUCCESS;
774}
775
776/**
777 * myrb_set_pdev_state - sets the device state for a physical device
778 *
779 * Return: command status
780 */
781static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 struct scsi_device *sdev, enum myrb_devstate state)
783{
784 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 unsigned short status;
787
788 mutex_lock(&cb->dcmd_mutex);
789 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 mbox->type3D.id = MYRB_DCMD_TAG;
791 mbox->type3D.channel = sdev->channel;
792 mbox->type3D.target = sdev->id;
793 mbox->type3D.state = state & 0x1F;
794 status = myrb_exec_cmd(cb, cmd_blk);
795 mutex_unlock(&cb->dcmd_mutex);
796
797 return status;
798}
799
800/**
801 * myrb_enable_mmio - enables the Memory Mailbox Interface
802 *
803 * PD and P controller types have no memory mailbox, but still need the
804 * other dma mapped memory.
805 *
806 * Return: true on success, false otherwise.
807 */
808static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
809{
810 void __iomem *base = cb->io_base;
811 struct pci_dev *pdev = cb->pdev;
812 size_t err_table_size;
813 size_t ldev_info_size;
814 union myrb_cmd_mbox *cmd_mbox_mem;
815 struct myrb_stat_mbox *stat_mbox_mem;
816 union myrb_cmd_mbox mbox;
817 unsigned short status;
818
819 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
820
821 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 dev_err(&pdev->dev, "DMA mask out of range\n");
823 return false;
824 }
825
826 cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 sizeof(struct myrb_enquiry),
828 &cb->enquiry_addr, GFP_KERNEL);
829 if (!cb->enquiry)
830 return false;
831
832 err_table_size = sizeof(struct myrb_error_entry) *
833 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 &cb->err_table_addr, GFP_KERNEL);
836 if (!cb->err_table)
837 return false;
838
839 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 &cb->ldev_info_addr, GFP_KERNEL);
842 if (!cb->ldev_info_buf)
843 return false;
844
845 /*
846 * Skip mailbox initialisation for PD and P Controllers
847 */
848 if (!mmio_init_fn)
849 return true;
850
851 /* These are the base addresses for the command memory mailbox array */
852 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854 cb->cmd_mbox_size,
855 &cb->cmd_mbox_addr,
856 GFP_KERNEL);
857 if (!cb->first_cmd_mbox)
858 return false;
859
860 cmd_mbox_mem = cb->first_cmd_mbox;
861 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 cb->last_cmd_mbox = cmd_mbox_mem;
863 cb->next_cmd_mbox = cb->first_cmd_mbox;
864 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866
867 /* These are the base addresses for the status memory mailbox array */
868 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 sizeof(struct myrb_stat_mbox);
870 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871 cb->stat_mbox_size,
872 &cb->stat_mbox_addr,
873 GFP_KERNEL);
874 if (!cb->first_stat_mbox)
875 return false;
876
877 stat_mbox_mem = cb->first_stat_mbox;
878 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 cb->last_stat_mbox = stat_mbox_mem;
880 cb->next_stat_mbox = cb->first_stat_mbox;
881
882 /* Enable the Memory Mailbox Interface. */
883 cb->dual_mode_interface = true;
884 mbox.typeX.opcode = 0x2B;
885 mbox.typeX.id = 0;
886 mbox.typeX.opcode2 = 0x14;
887 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
889
890 status = mmio_init_fn(pdev, base, &mbox);
891 if (status != MYRB_STATUS_SUCCESS) {
892 cb->dual_mode_interface = false;
893 mbox.typeX.opcode2 = 0x10;
894 status = mmio_init_fn(pdev, base, &mbox);
895 if (status != MYRB_STATUS_SUCCESS) {
896 dev_err(&pdev->dev,
897 "Failed to enable mailbox, statux %02X\n",
898 status);
899 return false;
900 }
901 }
902 return true;
903}
904
905/**
906 * myrb_get_hba_config - reads the configuration information
907 *
908 * Reads the configuration information from the controller and
909 * initializes the controller structure.
910 *
911 * Return: 0 on success, errno otherwise
912 */
913static int myrb_get_hba_config(struct myrb_hba *cb)
914{
915 struct myrb_enquiry2 *enquiry2;
916 dma_addr_t enquiry2_addr;
917 struct myrb_config2 *config2;
918 dma_addr_t config2_addr;
919 struct Scsi_Host *shost = cb->host;
920 struct pci_dev *pdev = cb->pdev;
921 int pchan_max = 0, pchan_cur = 0;
922 unsigned short status;
923 int ret = -ENODEV, memsize = 0;
924
925 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 &enquiry2_addr, GFP_KERNEL);
927 if (!enquiry2) {
928 shost_printk(KERN_ERR, cb->host,
929 "Failed to allocate V1 enquiry2 memory\n");
930 return -ENOMEM;
931 }
932 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 &config2_addr, GFP_KERNEL);
934 if (!config2) {
935 shost_printk(KERN_ERR, cb->host,
936 "Failed to allocate V1 config2 memory\n");
937 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 enquiry2, enquiry2_addr);
939 return -ENOMEM;
940 }
941 mutex_lock(&cb->dma_mutex);
942 status = myrb_hba_enquiry(cb);
943 mutex_unlock(&cb->dma_mutex);
944 if (status != MYRB_STATUS_SUCCESS) {
945 shost_printk(KERN_WARNING, cb->host,
946 "Failed it issue V1 Enquiry\n");
947 goto out_free;
948 }
949
950 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 if (status != MYRB_STATUS_SUCCESS) {
952 shost_printk(KERN_WARNING, cb->host,
953 "Failed to issue V1 Enquiry2\n");
954 goto out_free;
955 }
956
957 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 if (status != MYRB_STATUS_SUCCESS) {
959 shost_printk(KERN_WARNING, cb->host,
960 "Failed to issue ReadConfig2\n");
961 goto out_free;
962 }
963
964 status = myrb_get_ldev_info(cb);
965 if (status != MYRB_STATUS_SUCCESS) {
966 shost_printk(KERN_WARNING, cb->host,
967 "Failed to get logical drive information\n");
968 goto out_free;
969 }
970
971 /*
972 * Initialize the Controller Model Name and Full Model Name fields.
973 */
974 switch (enquiry2->hw.sub_model) {
975 case DAC960_V1_P_PD_PU:
976 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 strcpy(cb->model_name, "DAC960PU");
978 else
979 strcpy(cb->model_name, "DAC960PD");
980 break;
981 case DAC960_V1_PL:
982 strcpy(cb->model_name, "DAC960PL");
983 break;
984 case DAC960_V1_PG:
985 strcpy(cb->model_name, "DAC960PG");
986 break;
987 case DAC960_V1_PJ:
988 strcpy(cb->model_name, "DAC960PJ");
989 break;
990 case DAC960_V1_PR:
991 strcpy(cb->model_name, "DAC960PR");
992 break;
993 case DAC960_V1_PT:
994 strcpy(cb->model_name, "DAC960PT");
995 break;
996 case DAC960_V1_PTL0:
997 strcpy(cb->model_name, "DAC960PTL0");
998 break;
999 case DAC960_V1_PRL:
1000 strcpy(cb->model_name, "DAC960PRL");
1001 break;
1002 case DAC960_V1_PTL1:
1003 strcpy(cb->model_name, "DAC960PTL1");
1004 break;
1005 case DAC960_V1_1164P:
1006 strcpy(cb->model_name, "eXtremeRAID 1100");
1007 break;
1008 default:
1009 shost_printk(KERN_WARNING, cb->host,
1010 "Unknown Model %X\n",
1011 enquiry2->hw.sub_model);
1012 goto out;
1013 }
1014 /*
1015 * Initialize the Controller Firmware Version field and verify that it
1016 * is a supported firmware version.
1017 * The supported firmware versions are:
1018 *
1019 * DAC1164P 5.06 and above
1020 * DAC960PTL/PRL/PJ/PG 4.06 and above
1021 * DAC960PU/PD/PL 3.51 and above
1022 * DAC960PU/PD/PL/P 2.73 and above
1023 */
1024#if defined(CONFIG_ALPHA)
1025 /*
1026 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 * the last custom FW revision to be released by DEC for these older
1029 * controllers, appears to work quite well with this driver.
1030 *
1031 * Cards tested successfully were several versions each of the PD and
1032 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 * back of the board, of:
1035 *
1036 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1037 * or D040349 (3-channel)
1038 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1039 * or D040397 (3-channel)
1040 */
1041# define FIRMWARE_27X "2.70"
1042#else
1043# define FIRMWARE_27X "2.73"
1044#endif
1045
1046 if (enquiry2->fw.major_version == 0) {
1047 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 enquiry2->fw.firmware_type = '0';
1050 enquiry2->fw.turn_id = 0;
1051 }
1052 snprintf(cb->fw_version, sizeof(cb->fw_version),
1053 "%d.%02d-%c-%02d",
1054 enquiry2->fw.major_version,
1055 enquiry2->fw.minor_version,
1056 enquiry2->fw.firmware_type,
1057 enquiry2->fw.turn_id);
1058 if (!((enquiry2->fw.major_version == 5 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 4 &&
1061 enquiry2->fw.minor_version >= 6) ||
1062 (enquiry2->fw.major_version == 3 &&
1063 enquiry2->fw.minor_version >= 51) ||
1064 (enquiry2->fw.major_version == 2 &&
1065 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066 shost_printk(KERN_WARNING, cb->host,
1067 "Firmware Version '%s' unsupported\n",
1068 cb->fw_version);
1069 goto out;
1070 }
1071 /*
1072 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1073 * Enclosure Management Enabled fields.
1074 */
1075 switch (enquiry2->hw.model) {
1076 case MYRB_5_CHANNEL_BOARD:
1077 pchan_max = 5;
1078 break;
1079 case MYRB_3_CHANNEL_BOARD:
1080 case MYRB_3_CHANNEL_ASIC_DAC:
1081 pchan_max = 3;
1082 break;
1083 case MYRB_2_CHANNEL_BOARD:
1084 pchan_max = 2;
1085 break;
1086 default:
1087 pchan_max = enquiry2->cfg_chan;
1088 break;
1089 }
1090 pchan_cur = enquiry2->cur_chan;
1091 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1092 cb->bus_width = 32;
1093 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1094 cb->bus_width = 16;
1095 else
1096 cb->bus_width = 8;
1097 cb->ldev_block_size = enquiry2->ldev_block_size;
1098 shost->max_channel = pchan_cur;
1099 shost->max_id = enquiry2->max_targets;
1100 memsize = enquiry2->mem_size >> 20;
1101 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1102 /*
1103 * Initialize the Controller Queue Depth, Driver Queue Depth,
1104 * Logical Drive Count, Maximum Blocks per Command, Controller
1105 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1106 * The Driver Queue Depth must be at most one less than the
1107 * Controller Queue Depth to allow for an automatic drive
1108 * rebuild operation.
1109 */
1110 shost->can_queue = cb->enquiry->max_tcq;
1111 if (shost->can_queue < 3)
1112 shost->can_queue = enquiry2->max_cmds;
1113 if (shost->can_queue < 3)
1114 /* Play safe and disable TCQ */
1115 shost->can_queue = 1;
1116
1117 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1118 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1119 shost->max_sectors = enquiry2->max_sectors;
1120 shost->sg_tablesize = enquiry2->max_sge;
1121 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1122 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1123 /*
1124 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1125 */
1126 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129 >> (10 - MYRB_BLKSIZE_BITS);
1130 /* Assume 255/63 translation */
1131 cb->ldev_geom_heads = 255;
1132 cb->ldev_geom_sectors = 63;
1133 if (config2->drive_geometry) {
1134 cb->ldev_geom_heads = 128;
1135 cb->ldev_geom_sectors = 32;
1136 }
1137
1138 /*
1139 * Initialize the Background Initialization Status.
1140 */
1141 if ((cb->fw_version[0] == '4' &&
1142 strcmp(cb->fw_version, "4.08") >= 0) ||
1143 (cb->fw_version[0] == '5' &&
1144 strcmp(cb->fw_version, "5.08") >= 0)) {
1145 cb->bgi_status_supported = true;
1146 myrb_bgi_control(cb);
1147 }
1148 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1149 ret = 0;
1150
1151out:
1152 shost_printk(KERN_INFO, cb->host,
1153 "Configuring %s PCI RAID Controller\n", cb->model_name);
1154 shost_printk(KERN_INFO, cb->host,
1155 " Firmware Version: %s, Memory Size: %dMB\n",
1156 cb->fw_version, memsize);
1157 if (cb->io_addr == 0)
1158 shost_printk(KERN_INFO, cb->host,
1159 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1160 (unsigned long)cb->pci_addr, cb->irq);
1161 else
1162 shost_printk(KERN_INFO, cb->host,
1163 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1164 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165 cb->irq);
1166 shost_printk(KERN_INFO, cb->host,
1167 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1168 cb->host->can_queue, cb->host->max_sectors);
1169 shost_printk(KERN_INFO, cb->host,
1170 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1171 cb->host->can_queue, cb->host->sg_tablesize,
1172 MYRB_SCATTER_GATHER_LIMIT);
1173 shost_printk(KERN_INFO, cb->host,
1174 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1175 cb->stripe_size, cb->segment_size,
1176 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177 cb->safte_enabled ?
1178 " SAF-TE Enclosure Management Enabled" : "");
1179 shost_printk(KERN_INFO, cb->host,
1180 " Physical: %d/%d channels %d/%d/%d devices\n",
1181 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 cb->host->max_id);
1183
1184 shost_printk(KERN_INFO, cb->host,
1185 " Logical: 1/1 channels, %d/%d disks\n",
1186 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187
1188out_free:
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1190 enquiry2, enquiry2_addr);
1191 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1192 config2, config2_addr);
1193
1194 return ret;
1195}
1196
1197/**
1198 * myrb_unmap - unmaps controller structures
1199 */
1200static void myrb_unmap(struct myrb_hba *cb)
1201{
1202 if (cb->ldev_info_buf) {
1203 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1204 MYRB_MAX_LDEVS;
1205 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206 cb->ldev_info_buf, cb->ldev_info_addr);
1207 cb->ldev_info_buf = NULL;
1208 }
1209 if (cb->err_table) {
1210 size_t err_table_size = sizeof(struct myrb_error_entry) *
1211 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1212 dma_free_coherent(&cb->pdev->dev, err_table_size,
1213 cb->err_table, cb->err_table_addr);
1214 cb->err_table = NULL;
1215 }
1216 if (cb->enquiry) {
1217 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218 cb->enquiry, cb->enquiry_addr);
1219 cb->enquiry = NULL;
1220 }
1221 if (cb->first_stat_mbox) {
1222 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223 cb->first_stat_mbox, cb->stat_mbox_addr);
1224 cb->first_stat_mbox = NULL;
1225 }
1226 if (cb->first_cmd_mbox) {
1227 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229 cb->first_cmd_mbox = NULL;
1230 }
1231}
1232
1233/**
1234 * myrb_cleanup - cleanup controller structures
1235 */
1236static void myrb_cleanup(struct myrb_hba *cb)
1237{
1238 struct pci_dev *pdev = cb->pdev;
1239
1240 /* Free the memory mailbox, status, and related structures */
1241 myrb_unmap(cb);
1242
1243 if (cb->mmio_base) {
1244 cb->disable_intr(cb->io_base);
1245 iounmap(cb->mmio_base);
1246 }
1247 if (cb->irq)
1248 free_irq(cb->irq, cb);
1249 if (cb->io_addr)
1250 release_region(cb->io_addr, 0x80);
1251 pci_set_drvdata(pdev, NULL);
1252 pci_disable_device(pdev);
1253 scsi_host_put(cb->host);
1254}
1255
1256static int myrb_host_reset(struct scsi_cmnd *scmd)
1257{
1258 struct Scsi_Host *shost = scmd->device->host;
1259 struct myrb_hba *cb = shost_priv(shost);
1260
1261 cb->reset(cb->io_base);
1262 return SUCCESS;
1263}
1264
1265static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1266 struct scsi_cmnd *scmd)
1267{
1268 struct myrb_hba *cb = shost_priv(shost);
1269 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 struct myrb_dcdb *dcdb;
1272 dma_addr_t dcdb_addr;
1273 struct scsi_device *sdev = scmd->device;
1274 struct scatterlist *sgl;
1275 unsigned long flags;
1276 int nsge;
1277
1278 myrb_reset_cmd(cmd_blk);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 if (!dcdb)
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282 nsge = scsi_dma_map(scmd);
1283 if (nsge > 1) {
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 scmd->result = (DID_ERROR << 16);
1286 scmd->scsi_done(scmd);
1287 return 0;
1288 }
1289
1290 mbox->type3.opcode = MYRB_CMD_DCDB;
1291 mbox->type3.id = scmd->request->tag + 3;
1292 mbox->type3.addr = dcdb_addr;
1293 dcdb->channel = sdev->channel;
1294 dcdb->target = sdev->id;
1295 switch (scmd->sc_data_direction) {
1296 case DMA_NONE:
1297 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298 break;
1299 case DMA_TO_DEVICE:
1300 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 break;
1302 case DMA_FROM_DEVICE:
1303 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304 break;
1305 default:
1306 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307 break;
1308 }
1309 dcdb->early_status = false;
1310 if (scmd->request->timeout <= 10)
1311 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 else if (scmd->request->timeout <= 60)
1313 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 else if (scmd->request->timeout <= 600)
1315 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 else
1317 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 dcdb->no_autosense = false;
1319 dcdb->allow_disconnect = true;
1320 sgl = scsi_sglist(scmd);
1321 dcdb->dma_addr = sg_dma_address(sgl);
1322 if (sg_dma_len(sgl) > USHRT_MAX) {
1323 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 } else {
1326 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 dcdb->xfer_len_hi4 = 0;
1328 }
1329 dcdb->cdb_len = scmd->cmd_len;
1330 dcdb->sense_len = sizeof(dcdb->sense);
1331 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1336 return 0;
1337}
1338
1339static void myrb_inquiry(struct myrb_hba *cb,
1340 struct scsi_cmnd *scmd)
1341{
1342 unsigned char inq[36] = {
1343 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 0x20, 0x20, 0x20, 0x20,
1348 };
1349
1350 if (cb->bus_width > 16)
1351 inq[7] |= 1 << 6;
1352 if (cb->bus_width > 8)
1353 inq[7] |= 1 << 5;
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1358
1359 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360}
1361
1362static void
1363myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 struct myrb_ldev_info *ldev_info)
1365{
1366 unsigned char modes[32], *mode_pg;
1367 bool dbd;
1368 size_t mode_len;
1369
1370 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371 if (dbd) {
1372 mode_len = 24;
1373 mode_pg = &modes[4];
1374 } else {
1375 mode_len = 32;
1376 mode_pg = &modes[12];
1377 }
1378 memset(modes, 0, sizeof(modes));
1379 modes[0] = mode_len - 1;
1380 if (!dbd) {
1381 unsigned char *block_desc = &modes[4];
1382
1383 modes[3] = 8;
1384 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386 }
1387 mode_pg[0] = 0x08;
1388 mode_pg[1] = 0x12;
1389 if (ldev_info->wb_enabled)
1390 mode_pg[2] |= 0x04;
1391 if (cb->segment_size) {
1392 mode_pg[2] |= 0x08;
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394 }
1395
1396 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397}
1398
1399static void myrb_request_sense(struct myrb_hba *cb,
1400 struct scsi_cmnd *scmd)
1401{
1402 scsi_build_sense_buffer(0, scmd->sense_buffer,
1403 NO_SENSE, 0, 0);
1404 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1405 SCSI_SENSE_BUFFERSIZE);
1406}
1407
1408static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1409 struct myrb_ldev_info *ldev_info)
1410{
1411 unsigned char data[8];
1412
1413 dev_dbg(&scmd->device->sdev_gendev,
1414 "Capacity %u, blocksize %u\n",
1415 ldev_info->size, cb->ldev_block_size);
1416 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1417 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1418 scsi_sg_copy_from_buffer(scmd, data, 8);
1419}
1420
1421static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1422 struct scsi_cmnd *scmd)
1423{
1424 struct myrb_hba *cb = shost_priv(shost);
1425 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1426 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1427 struct myrb_ldev_info *ldev_info;
1428 struct scsi_device *sdev = scmd->device;
1429 struct scatterlist *sgl;
1430 unsigned long flags;
1431 u64 lba;
1432 u32 block_cnt;
1433 int nsge;
1434
1435 ldev_info = sdev->hostdata;
1436 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1437 ldev_info->state != MYRB_DEVICE_WO) {
1438 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1439 sdev->id, ldev_info ? ldev_info->state : 0xff);
1440 scmd->result = (DID_BAD_TARGET << 16);
1441 scmd->scsi_done(scmd);
1442 return 0;
1443 }
1444 switch (scmd->cmnd[0]) {
1445 case TEST_UNIT_READY:
1446 scmd->result = (DID_OK << 16);
1447 scmd->scsi_done(scmd);
1448 return 0;
1449 case INQUIRY:
1450 if (scmd->cmnd[1] & 1) {
1451 /* Illegal request, invalid field in CDB */
1452 scsi_build_sense_buffer(0, scmd->sense_buffer,
1453 ILLEGAL_REQUEST, 0x24, 0);
1454 scmd->result = (DRIVER_SENSE << 24) |
1455 SAM_STAT_CHECK_CONDITION;
1456 } else {
1457 myrb_inquiry(cb, scmd);
1458 scmd->result = (DID_OK << 16);
1459 }
1460 scmd->scsi_done(scmd);
1461 return 0;
1462 case SYNCHRONIZE_CACHE:
1463 scmd->result = (DID_OK << 16);
1464 scmd->scsi_done(scmd);
1465 return 0;
1466 case MODE_SENSE:
1467 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1468 (scmd->cmnd[2] & 0x3F) != 0x08) {
1469 /* Illegal request, invalid field in CDB */
1470 scsi_build_sense_buffer(0, scmd->sense_buffer,
1471 ILLEGAL_REQUEST, 0x24, 0);
1472 scmd->result = (DRIVER_SENSE << 24) |
1473 SAM_STAT_CHECK_CONDITION;
1474 } else {
1475 myrb_mode_sense(cb, scmd, ldev_info);
1476 scmd->result = (DID_OK << 16);
1477 }
1478 scmd->scsi_done(scmd);
1479 return 0;
1480 case READ_CAPACITY:
1481 if ((scmd->cmnd[1] & 1) ||
1482 (scmd->cmnd[8] & 1)) {
1483 /* Illegal request, invalid field in CDB */
1484 scsi_build_sense_buffer(0, scmd->sense_buffer,
1485 ILLEGAL_REQUEST, 0x24, 0);
1486 scmd->result = (DRIVER_SENSE << 24) |
1487 SAM_STAT_CHECK_CONDITION;
1488 scmd->scsi_done(scmd);
1489 return 0;
1490 }
1491 lba = get_unaligned_be32(&scmd->cmnd[2]);
1492 if (lba) {
1493 /* Illegal request, invalid field in CDB */
1494 scsi_build_sense_buffer(0, scmd->sense_buffer,
1495 ILLEGAL_REQUEST, 0x24, 0);
1496 scmd->result = (DRIVER_SENSE << 24) |
1497 SAM_STAT_CHECK_CONDITION;
1498 scmd->scsi_done(scmd);
1499 return 0;
1500 }
1501 myrb_read_capacity(cb, scmd, ldev_info);
1502 scmd->scsi_done(scmd);
1503 return 0;
1504 case REQUEST_SENSE:
1505 myrb_request_sense(cb, scmd);
1506 scmd->result = (DID_OK << 16);
1507 return 0;
1508 case SEND_DIAGNOSTIC:
1509 if (scmd->cmnd[1] != 0x04) {
1510 /* Illegal request, invalid field in CDB */
1511 scsi_build_sense_buffer(0, scmd->sense_buffer,
1512 ILLEGAL_REQUEST, 0x24, 0);
1513 scmd->result = (DRIVER_SENSE << 24) |
1514 SAM_STAT_CHECK_CONDITION;
1515 } else {
1516 /* Assume good status */
1517 scmd->result = (DID_OK << 16);
1518 }
1519 scmd->scsi_done(scmd);
1520 return 0;
1521 case READ_6:
1522 if (ldev_info->state == MYRB_DEVICE_WO) {
1523 /* Data protect, attempt to read invalid data */
1524 scsi_build_sense_buffer(0, scmd->sense_buffer,
1525 DATA_PROTECT, 0x21, 0x06);
1526 scmd->result = (DRIVER_SENSE << 24) |
1527 SAM_STAT_CHECK_CONDITION;
1528 scmd->scsi_done(scmd);
1529 return 0;
1530 }
1531 fallthrough;
1532 case WRITE_6:
1533 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1534 (scmd->cmnd[2] << 8) |
1535 scmd->cmnd[3]);
1536 block_cnt = scmd->cmnd[4];
1537 break;
1538 case READ_10:
1539 if (ldev_info->state == MYRB_DEVICE_WO) {
1540 /* Data protect, attempt to read invalid data */
1541 scsi_build_sense_buffer(0, scmd->sense_buffer,
1542 DATA_PROTECT, 0x21, 0x06);
1543 scmd->result = (DRIVER_SENSE << 24) |
1544 SAM_STAT_CHECK_CONDITION;
1545 scmd->scsi_done(scmd);
1546 return 0;
1547 }
1548 fallthrough;
1549 case WRITE_10:
1550 case VERIFY: /* 0x2F */
1551 case WRITE_VERIFY: /* 0x2E */
1552 lba = get_unaligned_be32(&scmd->cmnd[2]);
1553 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1554 break;
1555 case READ_12:
1556 if (ldev_info->state == MYRB_DEVICE_WO) {
1557 /* Data protect, attempt to read invalid data */
1558 scsi_build_sense_buffer(0, scmd->sense_buffer,
1559 DATA_PROTECT, 0x21, 0x06);
1560 scmd->result = (DRIVER_SENSE << 24) |
1561 SAM_STAT_CHECK_CONDITION;
1562 scmd->scsi_done(scmd);
1563 return 0;
1564 }
1565 fallthrough;
1566 case WRITE_12:
1567 case VERIFY_12: /* 0xAF */
1568 case WRITE_VERIFY_12: /* 0xAE */
1569 lba = get_unaligned_be32(&scmd->cmnd[2]);
1570 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1571 break;
1572 default:
1573 /* Illegal request, invalid opcode */
1574 scsi_build_sense_buffer(0, scmd->sense_buffer,
1575 ILLEGAL_REQUEST, 0x20, 0);
1576 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1577 scmd->scsi_done(scmd);
1578 return 0;
1579 }
1580
1581 myrb_reset_cmd(cmd_blk);
1582 mbox->type5.id = scmd->request->tag + 3;
1583 if (scmd->sc_data_direction == DMA_NONE)
1584 goto submit;
1585 nsge = scsi_dma_map(scmd);
1586 if (nsge == 1) {
1587 sgl = scsi_sglist(scmd);
1588 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1589 mbox->type5.opcode = MYRB_CMD_READ;
1590 else
1591 mbox->type5.opcode = MYRB_CMD_WRITE;
1592
1593 mbox->type5.ld.xfer_len = block_cnt;
1594 mbox->type5.ld.ldev_num = sdev->id;
1595 mbox->type5.lba = lba;
1596 mbox->type5.addr = (u32)sg_dma_address(sgl);
1597 } else {
1598 struct myrb_sge *hw_sgl;
1599 dma_addr_t hw_sgl_addr;
1600 int i;
1601
1602 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1603 if (!hw_sgl)
1604 return SCSI_MLQUEUE_HOST_BUSY;
1605
1606 cmd_blk->sgl = hw_sgl;
1607 cmd_blk->sgl_addr = hw_sgl_addr;
1608
1609 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1610 mbox->type5.opcode = MYRB_CMD_READ_SG;
1611 else
1612 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1613
1614 mbox->type5.ld.xfer_len = block_cnt;
1615 mbox->type5.ld.ldev_num = sdev->id;
1616 mbox->type5.lba = lba;
1617 mbox->type5.addr = hw_sgl_addr;
1618 mbox->type5.sg_count = nsge;
1619
1620 scsi_for_each_sg(scmd, sgl, nsge, i) {
1621 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1622 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1623 hw_sgl++;
1624 }
1625 }
1626submit:
1627 spin_lock_irqsave(&cb->queue_lock, flags);
1628 cb->qcmd(cb, cmd_blk);
1629 spin_unlock_irqrestore(&cb->queue_lock, flags);
1630
1631 return 0;
1632}
1633
1634static int myrb_queuecommand(struct Scsi_Host *shost,
1635 struct scsi_cmnd *scmd)
1636{
1637 struct scsi_device *sdev = scmd->device;
1638
1639 if (sdev->channel > myrb_logical_channel(shost)) {
1640 scmd->result = (DID_BAD_TARGET << 16);
1641 scmd->scsi_done(scmd);
1642 return 0;
1643 }
1644 if (sdev->channel == myrb_logical_channel(shost))
1645 return myrb_ldev_queuecommand(shost, scmd);
1646
1647 return myrb_pthru_queuecommand(shost, scmd);
1648}
1649
1650static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1651{
1652 struct myrb_hba *cb = shost_priv(sdev->host);
1653 struct myrb_ldev_info *ldev_info;
1654 unsigned short ldev_num = sdev->id;
1655 enum raid_level level;
1656
1657 ldev_info = cb->ldev_info_buf + ldev_num;
1658 if (!ldev_info)
1659 return -ENXIO;
1660
1661 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1662 if (!sdev->hostdata)
1663 return -ENOMEM;
1664 dev_dbg(&sdev->sdev_gendev,
1665 "slave alloc ldev %d state %x\n",
1666 ldev_num, ldev_info->state);
1667 memcpy(sdev->hostdata, ldev_info,
1668 sizeof(*ldev_info));
1669 switch (ldev_info->raid_level) {
1670 case MYRB_RAID_LEVEL0:
1671 level = RAID_LEVEL_LINEAR;
1672 break;
1673 case MYRB_RAID_LEVEL1:
1674 level = RAID_LEVEL_1;
1675 break;
1676 case MYRB_RAID_LEVEL3:
1677 level = RAID_LEVEL_3;
1678 break;
1679 case MYRB_RAID_LEVEL5:
1680 level = RAID_LEVEL_5;
1681 break;
1682 case MYRB_RAID_LEVEL6:
1683 level = RAID_LEVEL_6;
1684 break;
1685 case MYRB_RAID_JBOD:
1686 level = RAID_LEVEL_JBOD;
1687 break;
1688 default:
1689 level = RAID_LEVEL_UNKNOWN;
1690 break;
1691 }
1692 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1693 return 0;
1694}
1695
1696static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1697{
1698 struct myrb_hba *cb = shost_priv(sdev->host);
1699 struct myrb_pdev_state *pdev_info;
1700 unsigned short status;
1701
1702 if (sdev->id > MYRB_MAX_TARGETS)
1703 return -ENXIO;
1704
1705 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1706 if (!pdev_info)
1707 return -ENOMEM;
1708
1709 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1710 sdev, pdev_info);
1711 if (status != MYRB_STATUS_SUCCESS) {
1712 dev_dbg(&sdev->sdev_gendev,
1713 "Failed to get device state, status %x\n",
1714 status);
1715 kfree(pdev_info);
1716 return -ENXIO;
1717 }
1718 if (!pdev_info->present) {
1719 dev_dbg(&sdev->sdev_gendev,
1720 "device not present, skip\n");
1721 kfree(pdev_info);
1722 return -ENXIO;
1723 }
1724 dev_dbg(&sdev->sdev_gendev,
1725 "slave alloc pdev %d:%d state %x\n",
1726 sdev->channel, sdev->id, pdev_info->state);
1727 sdev->hostdata = pdev_info;
1728
1729 return 0;
1730}
1731
1732static int myrb_slave_alloc(struct scsi_device *sdev)
1733{
1734 if (sdev->channel > myrb_logical_channel(sdev->host))
1735 return -ENXIO;
1736
1737 if (sdev->lun > 0)
1738 return -ENXIO;
1739
1740 if (sdev->channel == myrb_logical_channel(sdev->host))
1741 return myrb_ldev_slave_alloc(sdev);
1742
1743 return myrb_pdev_slave_alloc(sdev);
1744}
1745
1746static int myrb_slave_configure(struct scsi_device *sdev)
1747{
1748 struct myrb_ldev_info *ldev_info;
1749
1750 if (sdev->channel > myrb_logical_channel(sdev->host))
1751 return -ENXIO;
1752
1753 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1754 sdev->no_uld_attach = 1;
1755 return 0;
1756 }
1757 if (sdev->lun != 0)
1758 return -ENXIO;
1759
1760 ldev_info = sdev->hostdata;
1761 if (!ldev_info)
1762 return -ENXIO;
1763 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1764 sdev_printk(KERN_INFO, sdev,
1765 "Logical drive is %s\n",
1766 myrb_devstate_name(ldev_info->state));
1767
1768 sdev->tagged_supported = 1;
1769 return 0;
1770}
1771
1772static void myrb_slave_destroy(struct scsi_device *sdev)
1773{
1774 kfree(sdev->hostdata);
1775}
1776
1777static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1778 sector_t capacity, int geom[])
1779{
1780 struct myrb_hba *cb = shost_priv(sdev->host);
1781
1782 geom[0] = cb->ldev_geom_heads;
1783 geom[1] = cb->ldev_geom_sectors;
1784 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1785
1786 return 0;
1787}
1788
1789static ssize_t raid_state_show(struct device *dev,
1790 struct device_attribute *attr, char *buf)
1791{
1792 struct scsi_device *sdev = to_scsi_device(dev);
1793 struct myrb_hba *cb = shost_priv(sdev->host);
1794 int ret;
1795
1796 if (!sdev->hostdata)
1797 return snprintf(buf, 16, "Unknown\n");
1798
1799 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1800 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1801 const char *name;
1802
1803 name = myrb_devstate_name(ldev_info->state);
1804 if (name)
1805 ret = snprintf(buf, 32, "%s\n", name);
1806 else
1807 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1808 ldev_info->state);
1809 } else {
1810 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1811 unsigned short status;
1812 const char *name;
1813
1814 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1815 sdev, pdev_info);
1816 if (status != MYRB_STATUS_SUCCESS)
1817 sdev_printk(KERN_INFO, sdev,
1818 "Failed to get device state, status %x\n",
1819 status);
1820
1821 if (!pdev_info->present)
1822 name = "Removed";
1823 else
1824 name = myrb_devstate_name(pdev_info->state);
1825 if (name)
1826 ret = snprintf(buf, 32, "%s\n", name);
1827 else
1828 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1829 pdev_info->state);
1830 }
1831 return ret;
1832}
1833
1834static ssize_t raid_state_store(struct device *dev,
1835 struct device_attribute *attr, const char *buf, size_t count)
1836{
1837 struct scsi_device *sdev = to_scsi_device(dev);
1838 struct myrb_hba *cb = shost_priv(sdev->host);
1839 struct myrb_pdev_state *pdev_info;
1840 enum myrb_devstate new_state;
1841 unsigned short status;
1842
1843 if (!strncmp(buf, "kill", 4) ||
1844 !strncmp(buf, "offline", 7))
1845 new_state = MYRB_DEVICE_DEAD;
1846 else if (!strncmp(buf, "online", 6))
1847 new_state = MYRB_DEVICE_ONLINE;
1848 else if (!strncmp(buf, "standby", 7))
1849 new_state = MYRB_DEVICE_STANDBY;
1850 else
1851 return -EINVAL;
1852
1853 pdev_info = sdev->hostdata;
1854 if (!pdev_info) {
1855 sdev_printk(KERN_INFO, sdev,
1856 "Failed - no physical device information\n");
1857 return -ENXIO;
1858 }
1859 if (!pdev_info->present) {
1860 sdev_printk(KERN_INFO, sdev,
1861 "Failed - device not present\n");
1862 return -ENXIO;
1863 }
1864
1865 if (pdev_info->state == new_state)
1866 return count;
1867
1868 status = myrb_set_pdev_state(cb, sdev, new_state);
1869 switch (status) {
1870 case MYRB_STATUS_SUCCESS:
1871 break;
1872 case MYRB_STATUS_START_DEVICE_FAILED:
1873 sdev_printk(KERN_INFO, sdev,
1874 "Failed - Unable to Start Device\n");
1875 count = -EAGAIN;
1876 break;
1877 case MYRB_STATUS_NO_DEVICE:
1878 sdev_printk(KERN_INFO, sdev,
1879 "Failed - No Device at Address\n");
1880 count = -ENODEV;
1881 break;
1882 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1883 sdev_printk(KERN_INFO, sdev,
1884 "Failed - Invalid Channel or Target or Modifier\n");
1885 count = -EINVAL;
1886 break;
1887 case MYRB_STATUS_CHANNEL_BUSY:
1888 sdev_printk(KERN_INFO, sdev,
1889 "Failed - Channel Busy\n");
1890 count = -EBUSY;
1891 break;
1892 default:
1893 sdev_printk(KERN_INFO, sdev,
1894 "Failed - Unexpected Status %04X\n", status);
1895 count = -EIO;
1896 break;
1897 }
1898 return count;
1899}
1900static DEVICE_ATTR_RW(raid_state);
1901
1902static ssize_t raid_level_show(struct device *dev,
1903 struct device_attribute *attr, char *buf)
1904{
1905 struct scsi_device *sdev = to_scsi_device(dev);
1906
1907 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1908 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1909 const char *name;
1910
1911 if (!ldev_info)
1912 return -ENXIO;
1913
1914 name = myrb_raidlevel_name(ldev_info->raid_level);
1915 if (!name)
1916 return snprintf(buf, 32, "Invalid (%02X)\n",
1917 ldev_info->state);
1918 return snprintf(buf, 32, "%s\n", name);
1919 }
1920 return snprintf(buf, 32, "Physical Drive\n");
1921}
1922static DEVICE_ATTR_RO(raid_level);
1923
1924static ssize_t rebuild_show(struct device *dev,
1925 struct device_attribute *attr, char *buf)
1926{
1927 struct scsi_device *sdev = to_scsi_device(dev);
1928 struct myrb_hba *cb = shost_priv(sdev->host);
1929 struct myrb_rbld_progress rbld_buf;
1930 unsigned char status;
1931
1932 if (sdev->channel < myrb_logical_channel(sdev->host))
1933 return snprintf(buf, 32, "physical device - not rebuilding\n");
1934
1935 status = myrb_get_rbld_progress(cb, &rbld_buf);
1936
1937 if (rbld_buf.ldev_num != sdev->id ||
1938 status != MYRB_STATUS_SUCCESS)
1939 return snprintf(buf, 32, "not rebuilding\n");
1940
1941 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1942 rbld_buf.ldev_size - rbld_buf.blocks_left,
1943 rbld_buf.ldev_size);
1944}
1945
1946static ssize_t rebuild_store(struct device *dev,
1947 struct device_attribute *attr, const char *buf, size_t count)
1948{
1949 struct scsi_device *sdev = to_scsi_device(dev);
1950 struct myrb_hba *cb = shost_priv(sdev->host);
1951 struct myrb_cmdblk *cmd_blk;
1952 union myrb_cmd_mbox *mbox;
1953 unsigned short status;
1954 int rc, start;
1955 const char *msg;
1956
1957 rc = kstrtoint(buf, 0, &start);
1958 if (rc)
1959 return rc;
1960
1961 if (sdev->channel >= myrb_logical_channel(sdev->host))
1962 return -ENXIO;
1963
1964 status = myrb_get_rbld_progress(cb, NULL);
1965 if (start) {
1966 if (status == MYRB_STATUS_SUCCESS) {
1967 sdev_printk(KERN_INFO, sdev,
1968 "Rebuild Not Initiated; already in progress\n");
1969 return -EALREADY;
1970 }
1971 mutex_lock(&cb->dcmd_mutex);
1972 cmd_blk = &cb->dcmd_blk;
1973 myrb_reset_cmd(cmd_blk);
1974 mbox = &cmd_blk->mbox;
1975 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1976 mbox->type3D.id = MYRB_DCMD_TAG;
1977 mbox->type3D.channel = sdev->channel;
1978 mbox->type3D.target = sdev->id;
1979 status = myrb_exec_cmd(cb, cmd_blk);
1980 mutex_unlock(&cb->dcmd_mutex);
1981 } else {
1982 struct pci_dev *pdev = cb->pdev;
1983 unsigned char *rate;
1984 dma_addr_t rate_addr;
1985
1986 if (status != MYRB_STATUS_SUCCESS) {
1987 sdev_printk(KERN_INFO, sdev,
1988 "Rebuild Not Cancelled; not in progress\n");
1989 return 0;
1990 }
1991
1992 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1993 &rate_addr, GFP_KERNEL);
1994 if (rate == NULL) {
1995 sdev_printk(KERN_INFO, sdev,
1996 "Cancellation of Rebuild Failed - Out of Memory\n");
1997 return -ENOMEM;
1998 }
1999 mutex_lock(&cb->dcmd_mutex);
2000 cmd_blk = &cb->dcmd_blk;
2001 myrb_reset_cmd(cmd_blk);
2002 mbox = &cmd_blk->mbox;
2003 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2004 mbox->type3R.id = MYRB_DCMD_TAG;
2005 mbox->type3R.rbld_rate = 0xFF;
2006 mbox->type3R.addr = rate_addr;
2007 status = myrb_exec_cmd(cb, cmd_blk);
2008 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2009 mutex_unlock(&cb->dcmd_mutex);
2010 }
2011 if (status == MYRB_STATUS_SUCCESS) {
2012 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2013 start ? "Initiated" : "Cancelled");
2014 return count;
2015 }
2016 if (!start) {
2017 sdev_printk(KERN_INFO, sdev,
2018 "Rebuild Not Cancelled, status 0x%x\n",
2019 status);
2020 return -EIO;
2021 }
2022
2023 switch (status) {
2024 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2025 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2026 break;
2027 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2028 msg = "New Disk Failed During Rebuild";
2029 break;
2030 case MYRB_STATUS_INVALID_ADDRESS:
2031 msg = "Invalid Device Address";
2032 break;
2033 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2034 msg = "Already in Progress";
2035 break;
2036 default:
2037 msg = NULL;
2038 break;
2039 }
2040 if (msg)
2041 sdev_printk(KERN_INFO, sdev,
2042 "Rebuild Failed - %s\n", msg);
2043 else
2044 sdev_printk(KERN_INFO, sdev,
2045 "Rebuild Failed, status 0x%x\n", status);
2046
2047 return -EIO;
2048}
2049static DEVICE_ATTR_RW(rebuild);
2050
2051static ssize_t consistency_check_store(struct device *dev,
2052 struct device_attribute *attr, const char *buf, size_t count)
2053{
2054 struct scsi_device *sdev = to_scsi_device(dev);
2055 struct myrb_hba *cb = shost_priv(sdev->host);
2056 struct myrb_rbld_progress rbld_buf;
2057 struct myrb_cmdblk *cmd_blk;
2058 union myrb_cmd_mbox *mbox;
2059 unsigned short ldev_num = 0xFFFF;
2060 unsigned short status;
2061 int rc, start;
2062 const char *msg;
2063
2064 rc = kstrtoint(buf, 0, &start);
2065 if (rc)
2066 return rc;
2067
2068 if (sdev->channel < myrb_logical_channel(sdev->host))
2069 return -ENXIO;
2070
2071 status = myrb_get_rbld_progress(cb, &rbld_buf);
2072 if (start) {
2073 if (status == MYRB_STATUS_SUCCESS) {
2074 sdev_printk(KERN_INFO, sdev,
2075 "Check Consistency Not Initiated; already in progress\n");
2076 return -EALREADY;
2077 }
2078 mutex_lock(&cb->dcmd_mutex);
2079 cmd_blk = &cb->dcmd_blk;
2080 myrb_reset_cmd(cmd_blk);
2081 mbox = &cmd_blk->mbox;
2082 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2083 mbox->type3C.id = MYRB_DCMD_TAG;
2084 mbox->type3C.ldev_num = sdev->id;
2085 mbox->type3C.auto_restore = true;
2086
2087 status = myrb_exec_cmd(cb, cmd_blk);
2088 mutex_unlock(&cb->dcmd_mutex);
2089 } else {
2090 struct pci_dev *pdev = cb->pdev;
2091 unsigned char *rate;
2092 dma_addr_t rate_addr;
2093
2094 if (ldev_num != sdev->id) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "Check Consistency Not Cancelled; not in progress\n");
2097 return 0;
2098 }
2099 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2100 &rate_addr, GFP_KERNEL);
2101 if (rate == NULL) {
2102 sdev_printk(KERN_INFO, sdev,
2103 "Cancellation of Check Consistency Failed - Out of Memory\n");
2104 return -ENOMEM;
2105 }
2106 mutex_lock(&cb->dcmd_mutex);
2107 cmd_blk = &cb->dcmd_blk;
2108 myrb_reset_cmd(cmd_blk);
2109 mbox = &cmd_blk->mbox;
2110 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2111 mbox->type3R.id = MYRB_DCMD_TAG;
2112 mbox->type3R.rbld_rate = 0xFF;
2113 mbox->type3R.addr = rate_addr;
2114 status = myrb_exec_cmd(cb, cmd_blk);
2115 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2116 mutex_unlock(&cb->dcmd_mutex);
2117 }
2118 if (status == MYRB_STATUS_SUCCESS) {
2119 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2120 start ? "Initiated" : "Cancelled");
2121 return count;
2122 }
2123 if (!start) {
2124 sdev_printk(KERN_INFO, sdev,
2125 "Check Consistency Not Cancelled, status 0x%x\n",
2126 status);
2127 return -EIO;
2128 }
2129
2130 switch (status) {
2131 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2132 msg = "Dependent Physical Device is DEAD";
2133 break;
2134 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2135 msg = "New Disk Failed During Rebuild";
2136 break;
2137 case MYRB_STATUS_INVALID_ADDRESS:
2138 msg = "Invalid or Nonredundant Logical Drive";
2139 break;
2140 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2141 msg = "Already in Progress";
2142 break;
2143 default:
2144 msg = NULL;
2145 break;
2146 }
2147 if (msg)
2148 sdev_printk(KERN_INFO, sdev,
2149 "Check Consistency Failed - %s\n", msg);
2150 else
2151 sdev_printk(KERN_INFO, sdev,
2152 "Check Consistency Failed, status 0x%x\n", status);
2153
2154 return -EIO;
2155}
2156
2157static ssize_t consistency_check_show(struct device *dev,
2158 struct device_attribute *attr, char *buf)
2159{
2160 return rebuild_show(dev, attr, buf);
2161}
2162static DEVICE_ATTR_RW(consistency_check);
2163
2164static ssize_t ctlr_num_show(struct device *dev,
2165 struct device_attribute *attr, char *buf)
2166{
2167 struct Scsi_Host *shost = class_to_shost(dev);
2168 struct myrb_hba *cb = shost_priv(shost);
2169
2170 return snprintf(buf, 20, "%d\n", cb->ctlr_num);
2171}
2172static DEVICE_ATTR_RO(ctlr_num);
2173
2174static ssize_t firmware_show(struct device *dev,
2175 struct device_attribute *attr, char *buf)
2176{
2177 struct Scsi_Host *shost = class_to_shost(dev);
2178 struct myrb_hba *cb = shost_priv(shost);
2179
2180 return snprintf(buf, 16, "%s\n", cb->fw_version);
2181}
2182static DEVICE_ATTR_RO(firmware);
2183
2184static ssize_t model_show(struct device *dev,
2185 struct device_attribute *attr, char *buf)
2186{
2187 struct Scsi_Host *shost = class_to_shost(dev);
2188 struct myrb_hba *cb = shost_priv(shost);
2189
2190 return snprintf(buf, 16, "%s\n", cb->model_name);
2191}
2192static DEVICE_ATTR_RO(model);
2193
2194static ssize_t flush_cache_store(struct device *dev,
2195 struct device_attribute *attr, const char *buf, size_t count)
2196{
2197 struct Scsi_Host *shost = class_to_shost(dev);
2198 struct myrb_hba *cb = shost_priv(shost);
2199 unsigned short status;
2200
2201 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2202 if (status == MYRB_STATUS_SUCCESS) {
2203 shost_printk(KERN_INFO, shost,
2204 "Cache Flush Completed\n");
2205 return count;
2206 }
2207 shost_printk(KERN_INFO, shost,
2208 "Cache Flush Failed, status %x\n", status);
2209 return -EIO;
2210}
2211static DEVICE_ATTR_WO(flush_cache);
2212
2213static struct device_attribute *myrb_sdev_attrs[] = {
2214 &dev_attr_rebuild,
2215 &dev_attr_consistency_check,
2216 &dev_attr_raid_state,
2217 &dev_attr_raid_level,
2218 NULL,
2219};
2220
2221static struct device_attribute *myrb_shost_attrs[] = {
2222 &dev_attr_ctlr_num,
2223 &dev_attr_model,
2224 &dev_attr_firmware,
2225 &dev_attr_flush_cache,
2226 NULL,
2227};
2228
2229struct scsi_host_template myrb_template = {
2230 .module = THIS_MODULE,
2231 .name = "DAC960",
2232 .proc_name = "myrb",
2233 .queuecommand = myrb_queuecommand,
2234 .eh_host_reset_handler = myrb_host_reset,
2235 .slave_alloc = myrb_slave_alloc,
2236 .slave_configure = myrb_slave_configure,
2237 .slave_destroy = myrb_slave_destroy,
2238 .bios_param = myrb_biosparam,
2239 .cmd_size = sizeof(struct myrb_cmdblk),
2240 .shost_attrs = myrb_shost_attrs,
2241 .sdev_attrs = myrb_sdev_attrs,
2242 .this_id = -1,
2243};
2244
2245/**
2246 * myrb_is_raid - return boolean indicating device is raid volume
2247 * @dev the device struct object
2248 */
2249static int myrb_is_raid(struct device *dev)
2250{
2251 struct scsi_device *sdev = to_scsi_device(dev);
2252
2253 return sdev->channel == myrb_logical_channel(sdev->host);
2254}
2255
2256/**
2257 * myrb_get_resync - get raid volume resync percent complete
2258 * @dev the device struct object
2259 */
2260static void myrb_get_resync(struct device *dev)
2261{
2262 struct scsi_device *sdev = to_scsi_device(dev);
2263 struct myrb_hba *cb = shost_priv(sdev->host);
2264 struct myrb_rbld_progress rbld_buf;
2265 unsigned int percent_complete = 0;
2266 unsigned short status;
2267 unsigned int ldev_size = 0, remaining = 0;
2268
2269 if (sdev->channel < myrb_logical_channel(sdev->host))
2270 return;
2271 status = myrb_get_rbld_progress(cb, &rbld_buf);
2272 if (status == MYRB_STATUS_SUCCESS) {
2273 if (rbld_buf.ldev_num == sdev->id) {
2274 ldev_size = rbld_buf.ldev_size;
2275 remaining = rbld_buf.blocks_left;
2276 }
2277 }
2278 if (remaining && ldev_size)
2279 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2280 raid_set_resync(myrb_raid_template, dev, percent_complete);
2281}
2282
2283/**
2284 * myrb_get_state - get raid volume status
2285 * @dev the device struct object
2286 */
2287static void myrb_get_state(struct device *dev)
2288{
2289 struct scsi_device *sdev = to_scsi_device(dev);
2290 struct myrb_hba *cb = shost_priv(sdev->host);
2291 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2292 enum raid_state state = RAID_STATE_UNKNOWN;
2293 unsigned short status;
2294
2295 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2296 state = RAID_STATE_UNKNOWN;
2297 else {
2298 status = myrb_get_rbld_progress(cb, NULL);
2299 if (status == MYRB_STATUS_SUCCESS)
2300 state = RAID_STATE_RESYNCING;
2301 else {
2302 switch (ldev_info->state) {
2303 case MYRB_DEVICE_ONLINE:
2304 state = RAID_STATE_ACTIVE;
2305 break;
2306 case MYRB_DEVICE_WO:
2307 case MYRB_DEVICE_CRITICAL:
2308 state = RAID_STATE_DEGRADED;
2309 break;
2310 default:
2311 state = RAID_STATE_OFFLINE;
2312 }
2313 }
2314 }
2315 raid_set_state(myrb_raid_template, dev, state);
2316}
2317
2318struct raid_function_template myrb_raid_functions = {
2319 .cookie = &myrb_template,
2320 .is_raid = myrb_is_raid,
2321 .get_resync = myrb_get_resync,
2322 .get_state = myrb_get_state,
2323};
2324
2325static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2326 struct scsi_cmnd *scmd)
2327{
2328 unsigned short status;
2329
2330 if (!cmd_blk)
2331 return;
2332
2333 scsi_dma_unmap(scmd);
2334
2335 if (cmd_blk->dcdb) {
2336 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2337 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2338 cmd_blk->dcdb_addr);
2339 cmd_blk->dcdb = NULL;
2340 }
2341 if (cmd_blk->sgl) {
2342 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2343 cmd_blk->sgl = NULL;
2344 cmd_blk->sgl_addr = 0;
2345 }
2346 status = cmd_blk->status;
2347 switch (status) {
2348 case MYRB_STATUS_SUCCESS:
2349 case MYRB_STATUS_DEVICE_BUSY:
2350 scmd->result = (DID_OK << 16) | status;
2351 break;
2352 case MYRB_STATUS_BAD_DATA:
2353 dev_dbg(&scmd->device->sdev_gendev,
2354 "Bad Data Encountered\n");
2355 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2356 /* Unrecovered read error */
2357 scsi_build_sense_buffer(0, scmd->sense_buffer,
2358 MEDIUM_ERROR, 0x11, 0);
2359 else
2360 /* Write error */
2361 scsi_build_sense_buffer(0, scmd->sense_buffer,
2362 MEDIUM_ERROR, 0x0C, 0);
2363 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2364 break;
2365 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2366 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2367 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2368 /* Unrecovered read error, auto-reallocation failed */
2369 scsi_build_sense_buffer(0, scmd->sense_buffer,
2370 MEDIUM_ERROR, 0x11, 0x04);
2371 else
2372 /* Write error, auto-reallocation failed */
2373 scsi_build_sense_buffer(0, scmd->sense_buffer,
2374 MEDIUM_ERROR, 0x0C, 0x02);
2375 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2376 break;
2377 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2378 dev_dbg(&scmd->device->sdev_gendev,
2379 "Logical Drive Nonexistent or Offline");
2380 scmd->result = (DID_BAD_TARGET << 16);
2381 break;
2382 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2383 dev_dbg(&scmd->device->sdev_gendev,
2384 "Attempt to Access Beyond End of Logical Drive");
2385 /* Logical block address out of range */
2386 scsi_build_sense_buffer(0, scmd->sense_buffer,
2387 NOT_READY, 0x21, 0);
2388 break;
2389 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2390 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2391 scmd->result = (DID_BAD_TARGET << 16);
2392 break;
2393 default:
2394 scmd_printk(KERN_ERR, scmd,
2395 "Unexpected Error Status %04X", status);
2396 scmd->result = (DID_ERROR << 16);
2397 break;
2398 }
2399 scmd->scsi_done(scmd);
2400}
2401
2402static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2403{
2404 if (!cmd_blk)
2405 return;
2406
2407 if (cmd_blk->completion) {
2408 complete(cmd_blk->completion);
2409 cmd_blk->completion = NULL;
2410 }
2411}
2412
2413static void myrb_monitor(struct work_struct *work)
2414{
2415 struct myrb_hba *cb = container_of(work,
2416 struct myrb_hba, monitor_work.work);
2417 struct Scsi_Host *shost = cb->host;
2418 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2419
2420 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2421
2422 if (cb->new_ev_seq > cb->old_ev_seq) {
2423 int event = cb->old_ev_seq;
2424
2425 dev_dbg(&shost->shost_gendev,
2426 "get event log no %d/%d\n",
2427 cb->new_ev_seq, event);
2428 myrb_get_event(cb, event);
2429 cb->old_ev_seq = event + 1;
2430 interval = 10;
2431 } else if (cb->need_err_info) {
2432 cb->need_err_info = false;
2433 dev_dbg(&shost->shost_gendev, "get error table\n");
2434 myrb_get_errtable(cb);
2435 interval = 10;
2436 } else if (cb->need_rbld && cb->rbld_first) {
2437 cb->need_rbld = false;
2438 dev_dbg(&shost->shost_gendev,
2439 "get rebuild progress\n");
2440 myrb_update_rbld_progress(cb);
2441 interval = 10;
2442 } else if (cb->need_ldev_info) {
2443 cb->need_ldev_info = false;
2444 dev_dbg(&shost->shost_gendev,
2445 "get logical drive info\n");
2446 myrb_get_ldev_info(cb);
2447 interval = 10;
2448 } else if (cb->need_rbld) {
2449 cb->need_rbld = false;
2450 dev_dbg(&shost->shost_gendev,
2451 "get rebuild progress\n");
2452 myrb_update_rbld_progress(cb);
2453 interval = 10;
2454 } else if (cb->need_cc_status) {
2455 cb->need_cc_status = false;
2456 dev_dbg(&shost->shost_gendev,
2457 "get consistency check progress\n");
2458 myrb_get_cc_progress(cb);
2459 interval = 10;
2460 } else if (cb->need_bgi_status) {
2461 cb->need_bgi_status = false;
2462 dev_dbg(&shost->shost_gendev, "get background init status\n");
2463 myrb_bgi_control(cb);
2464 interval = 10;
2465 } else {
2466 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2467 mutex_lock(&cb->dma_mutex);
2468 myrb_hba_enquiry(cb);
2469 mutex_unlock(&cb->dma_mutex);
2470 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2471 cb->need_err_info || cb->need_rbld ||
2472 cb->need_ldev_info || cb->need_cc_status ||
2473 cb->need_bgi_status) {
2474 dev_dbg(&shost->shost_gendev,
2475 "reschedule monitor\n");
2476 interval = 0;
2477 }
2478 }
2479 if (interval > 1)
2480 cb->primary_monitor_time = jiffies;
2481 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2482}
2483
2484/**
2485 * myrb_err_status - reports controller BIOS messages
2486 *
2487 * Controller BIOS messages are passed through the Error Status Register
2488 * when the driver performs the BIOS handshaking.
2489 *
2490 * Return: true for fatal errors and false otherwise.
2491 */
2492bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2493 unsigned char parm0, unsigned char parm1)
2494{
2495 struct pci_dev *pdev = cb->pdev;
2496
2497 switch (error) {
2498 case 0x00:
2499 dev_info(&pdev->dev,
2500 "Physical Device %d:%d Not Responding\n",
2501 parm1, parm0);
2502 break;
2503 case 0x08:
2504 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2505 break;
2506 case 0x30:
2507 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2508 break;
2509 case 0x60:
2510 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2511 break;
2512 case 0x70:
2513 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2514 break;
2515 case 0x90:
2516 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2517 parm1, parm0);
2518 break;
2519 case 0xA0:
2520 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2521 break;
2522 case 0xB0:
2523 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2524 break;
2525 case 0xD0:
2526 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2527 break;
2528 case 0xF0:
2529 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2530 return true;
2531 default:
2532 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2533 error);
2534 return true;
2535 }
2536 return false;
2537}
2538
2539/*
2540 * Hardware-specific functions
2541 */
2542
2543/*
2544 * DAC960 LA Series Controllers
2545 */
2546
2547static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2548{
2549 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2550}
2551
2552static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2553{
2554 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2555}
2556
2557static inline void DAC960_LA_gen_intr(void __iomem *base)
2558{
2559 writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2560}
2561
2562static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2563{
2564 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2565}
2566
2567static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2568{
2569 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2570}
2571
2572static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2573{
2574 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2575
2576 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2577}
2578
2579static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2580{
2581 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2582
2583 return !(idb & DAC960_LA_IDB_INIT_DONE);
2584}
2585
2586static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2587{
2588 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2589}
2590
2591static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2592{
2593 writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2594}
2595
2596static inline void DAC960_LA_ack_intr(void __iomem *base)
2597{
2598 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2599 base + DAC960_LA_ODB_OFFSET);
2600}
2601
2602static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2603{
2604 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2605
2606 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2607}
2608
2609static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2610{
2611 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2612
2613 return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2614}
2615
2616static inline void DAC960_LA_enable_intr(void __iomem *base)
2617{
2618 unsigned char odb = 0xFF;
2619
2620 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2621 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2622}
2623
2624static inline void DAC960_LA_disable_intr(void __iomem *base)
2625{
2626 unsigned char odb = 0xFF;
2627
2628 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2629 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2630}
2631
2632static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2633{
2634 unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2635
2636 return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2637}
2638
2639static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2640 union myrb_cmd_mbox *mbox)
2641{
2642 mem_mbox->words[1] = mbox->words[1];
2643 mem_mbox->words[2] = mbox->words[2];
2644 mem_mbox->words[3] = mbox->words[3];
2645 /* Memory barrier to prevent reordering */
2646 wmb();
2647 mem_mbox->words[0] = mbox->words[0];
2648 /* Memory barrier to force PCI access */
2649 mb();
2650}
2651
2652static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2653 union myrb_cmd_mbox *mbox)
2654{
2655 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2656 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2657 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2658 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2659}
2660
2661static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2662{
2663 return readb(base + DAC960_LA_STSID_OFFSET);
2664}
2665
2666static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2667{
2668 return readw(base + DAC960_LA_STS_OFFSET);
2669}
2670
2671static inline bool
2672DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2673 unsigned char *param0, unsigned char *param1)
2674{
2675 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2676
2677 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2678 return false;
2679 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2680
2681 *error = errsts;
2682 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2683 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2684 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2685 return true;
2686}
2687
2688static inline unsigned short
2689DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2690 union myrb_cmd_mbox *mbox)
2691{
2692 unsigned short status;
2693 int timeout = 0;
2694
2695 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2696 if (!DAC960_LA_hw_mbox_is_full(base))
2697 break;
2698 udelay(10);
2699 timeout++;
2700 }
2701 if (DAC960_LA_hw_mbox_is_full(base)) {
2702 dev_err(&pdev->dev,
2703 "Timeout waiting for empty mailbox\n");
2704 return MYRB_STATUS_SUBSYS_TIMEOUT;
2705 }
2706 DAC960_LA_write_hw_mbox(base, mbox);
2707 DAC960_LA_hw_mbox_new_cmd(base);
2708 timeout = 0;
2709 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2710 if (DAC960_LA_hw_mbox_status_available(base))
2711 break;
2712 udelay(10);
2713 timeout++;
2714 }
2715 if (!DAC960_LA_hw_mbox_status_available(base)) {
2716 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2717 return MYRB_STATUS_SUBSYS_TIMEOUT;
2718 }
2719 status = DAC960_LA_read_status(base);
2720 DAC960_LA_ack_hw_mbox_intr(base);
2721 DAC960_LA_ack_hw_mbox_status(base);
2722
2723 return status;
2724}
2725
2726static int DAC960_LA_hw_init(struct pci_dev *pdev,
2727 struct myrb_hba *cb, void __iomem *base)
2728{
2729 int timeout = 0;
2730 unsigned char error, parm0, parm1;
2731
2732 DAC960_LA_disable_intr(base);
2733 DAC960_LA_ack_hw_mbox_status(base);
2734 udelay(1000);
2735 timeout = 0;
2736 while (DAC960_LA_init_in_progress(base) &&
2737 timeout < MYRB_MAILBOX_TIMEOUT) {
2738 if (DAC960_LA_read_error_status(base, &error,
2739 &parm0, &parm1) &&
2740 myrb_err_status(cb, error, parm0, parm1))
2741 return -ENODEV;
2742 udelay(10);
2743 timeout++;
2744 }
2745 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2746 dev_err(&pdev->dev,
2747 "Timeout waiting for Controller Initialisation\n");
2748 return -ETIMEDOUT;
2749 }
2750 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2751 dev_err(&pdev->dev,
2752 "Unable to Enable Memory Mailbox Interface\n");
2753 DAC960_LA_reset_ctrl(base);
2754 return -ENODEV;
2755 }
2756 DAC960_LA_enable_intr(base);
2757 cb->qcmd = myrb_qcmd;
2758 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2759 if (cb->dual_mode_interface)
2760 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2761 else
2762 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2763 cb->disable_intr = DAC960_LA_disable_intr;
2764 cb->reset = DAC960_LA_reset_ctrl;
2765
2766 return 0;
2767}
2768
2769static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2770{
2771 struct myrb_hba *cb = arg;
2772 void __iomem *base = cb->io_base;
2773 struct myrb_stat_mbox *next_stat_mbox;
2774 unsigned long flags;
2775
2776 spin_lock_irqsave(&cb->queue_lock, flags);
2777 DAC960_LA_ack_intr(base);
2778 next_stat_mbox = cb->next_stat_mbox;
2779 while (next_stat_mbox->valid) {
2780 unsigned char id = next_stat_mbox->id;
2781 struct scsi_cmnd *scmd = NULL;
2782 struct myrb_cmdblk *cmd_blk = NULL;
2783
2784 if (id == MYRB_DCMD_TAG)
2785 cmd_blk = &cb->dcmd_blk;
2786 else if (id == MYRB_MCMD_TAG)
2787 cmd_blk = &cb->mcmd_blk;
2788 else {
2789 scmd = scsi_host_find_tag(cb->host, id - 3);
2790 if (scmd)
2791 cmd_blk = scsi_cmd_priv(scmd);
2792 }
2793 if (cmd_blk)
2794 cmd_blk->status = next_stat_mbox->status;
2795 else
2796 dev_err(&cb->pdev->dev,
2797 "Unhandled command completion %d\n", id);
2798
2799 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2800 if (++next_stat_mbox > cb->last_stat_mbox)
2801 next_stat_mbox = cb->first_stat_mbox;
2802
2803 if (cmd_blk) {
2804 if (id < 3)
2805 myrb_handle_cmdblk(cb, cmd_blk);
2806 else
2807 myrb_handle_scsi(cb, cmd_blk, scmd);
2808 }
2809 }
2810 cb->next_stat_mbox = next_stat_mbox;
2811 spin_unlock_irqrestore(&cb->queue_lock, flags);
2812 return IRQ_HANDLED;
2813}
2814
2815struct myrb_privdata DAC960_LA_privdata = {
2816 .hw_init = DAC960_LA_hw_init,
2817 .irq_handler = DAC960_LA_intr_handler,
2818 .mmio_size = DAC960_LA_mmio_size,
2819};
2820
2821/*
2822 * DAC960 PG Series Controllers
2823 */
2824static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2825{
2826 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2827}
2828
2829static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2830{
2831 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2832}
2833
2834static inline void DAC960_PG_gen_intr(void __iomem *base)
2835{
2836 writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2837}
2838
2839static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2840{
2841 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2842}
2843
2844static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2845{
2846 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2847}
2848
2849static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2850{
2851 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2852
2853 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2854}
2855
2856static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2857{
2858 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2859
2860 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2861}
2862
2863static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2864{
2865 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2866}
2867
2868static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2869{
2870 writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2871}
2872
2873static inline void DAC960_PG_ack_intr(void __iomem *base)
2874{
2875 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2876 base + DAC960_PG_ODB_OFFSET);
2877}
2878
2879static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2880{
2881 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2882
2883 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2884}
2885
2886static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2887{
2888 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2889
2890 return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2891}
2892
2893static inline void DAC960_PG_enable_intr(void __iomem *base)
2894{
2895 unsigned int imask = (unsigned int)-1;
2896
2897 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2898 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2899}
2900
2901static inline void DAC960_PG_disable_intr(void __iomem *base)
2902{
2903 unsigned int imask = (unsigned int)-1;
2904
2905 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2906}
2907
2908static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2909{
2910 unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2911
2912 return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2913}
2914
2915static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2916 union myrb_cmd_mbox *mbox)
2917{
2918 mem_mbox->words[1] = mbox->words[1];
2919 mem_mbox->words[2] = mbox->words[2];
2920 mem_mbox->words[3] = mbox->words[3];
2921 /* Memory barrier to prevent reordering */
2922 wmb();
2923 mem_mbox->words[0] = mbox->words[0];
2924 /* Memory barrier to force PCI access */
2925 mb();
2926}
2927
2928static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2929 union myrb_cmd_mbox *mbox)
2930{
2931 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2932 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2933 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2934 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2935}
2936
2937static inline unsigned char
2938DAC960_PG_read_status_cmd_ident(void __iomem *base)
2939{
2940 return readb(base + DAC960_PG_STSID_OFFSET);
2941}
2942
2943static inline unsigned short
2944DAC960_PG_read_status(void __iomem *base)
2945{
2946 return readw(base + DAC960_PG_STS_OFFSET);
2947}
2948
2949static inline bool
2950DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2951 unsigned char *param0, unsigned char *param1)
2952{
2953 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2954
2955 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2956 return false;
2957 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2958 *error = errsts;
2959 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2960 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2961 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2962 return true;
2963}
2964
2965static inline unsigned short
2966DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2967 union myrb_cmd_mbox *mbox)
2968{
2969 unsigned short status;
2970 int timeout = 0;
2971
2972 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2973 if (!DAC960_PG_hw_mbox_is_full(base))
2974 break;
2975 udelay(10);
2976 timeout++;
2977 }
2978 if (DAC960_PG_hw_mbox_is_full(base)) {
2979 dev_err(&pdev->dev,
2980 "Timeout waiting for empty mailbox\n");
2981 return MYRB_STATUS_SUBSYS_TIMEOUT;
2982 }
2983 DAC960_PG_write_hw_mbox(base, mbox);
2984 DAC960_PG_hw_mbox_new_cmd(base);
2985
2986 timeout = 0;
2987 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2988 if (DAC960_PG_hw_mbox_status_available(base))
2989 break;
2990 udelay(10);
2991 timeout++;
2992 }
2993 if (!DAC960_PG_hw_mbox_status_available(base)) {
2994 dev_err(&pdev->dev,
2995 "Timeout waiting for mailbox status\n");
2996 return MYRB_STATUS_SUBSYS_TIMEOUT;
2997 }
2998 status = DAC960_PG_read_status(base);
2999 DAC960_PG_ack_hw_mbox_intr(base);
3000 DAC960_PG_ack_hw_mbox_status(base);
3001
3002 return status;
3003}
3004
3005static int DAC960_PG_hw_init(struct pci_dev *pdev,
3006 struct myrb_hba *cb, void __iomem *base)
3007{
3008 int timeout = 0;
3009 unsigned char error, parm0, parm1;
3010
3011 DAC960_PG_disable_intr(base);
3012 DAC960_PG_ack_hw_mbox_status(base);
3013 udelay(1000);
3014 while (DAC960_PG_init_in_progress(base) &&
3015 timeout < MYRB_MAILBOX_TIMEOUT) {
3016 if (DAC960_PG_read_error_status(base, &error,
3017 &parm0, &parm1) &&
3018 myrb_err_status(cb, error, parm0, parm1))
3019 return -EIO;
3020 udelay(10);
3021 timeout++;
3022 }
3023 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3024 dev_err(&pdev->dev,
3025 "Timeout waiting for Controller Initialisation\n");
3026 return -ETIMEDOUT;
3027 }
3028 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3029 dev_err(&pdev->dev,
3030 "Unable to Enable Memory Mailbox Interface\n");
3031 DAC960_PG_reset_ctrl(base);
3032 return -ENODEV;
3033 }
3034 DAC960_PG_enable_intr(base);
3035 cb->qcmd = myrb_qcmd;
3036 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3037 if (cb->dual_mode_interface)
3038 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3039 else
3040 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3041 cb->disable_intr = DAC960_PG_disable_intr;
3042 cb->reset = DAC960_PG_reset_ctrl;
3043
3044 return 0;
3045}
3046
3047static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3048{
3049 struct myrb_hba *cb = arg;
3050 void __iomem *base = cb->io_base;
3051 struct myrb_stat_mbox *next_stat_mbox;
3052 unsigned long flags;
3053
3054 spin_lock_irqsave(&cb->queue_lock, flags);
3055 DAC960_PG_ack_intr(base);
3056 next_stat_mbox = cb->next_stat_mbox;
3057 while (next_stat_mbox->valid) {
3058 unsigned char id = next_stat_mbox->id;
3059 struct scsi_cmnd *scmd = NULL;
3060 struct myrb_cmdblk *cmd_blk = NULL;
3061
3062 if (id == MYRB_DCMD_TAG)
3063 cmd_blk = &cb->dcmd_blk;
3064 else if (id == MYRB_MCMD_TAG)
3065 cmd_blk = &cb->mcmd_blk;
3066 else {
3067 scmd = scsi_host_find_tag(cb->host, id - 3);
3068 if (scmd)
3069 cmd_blk = scsi_cmd_priv(scmd);
3070 }
3071 if (cmd_blk)
3072 cmd_blk->status = next_stat_mbox->status;
3073 else
3074 dev_err(&cb->pdev->dev,
3075 "Unhandled command completion %d\n", id);
3076
3077 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3078 if (++next_stat_mbox > cb->last_stat_mbox)
3079 next_stat_mbox = cb->first_stat_mbox;
3080
3081 if (id < 3)
3082 myrb_handle_cmdblk(cb, cmd_blk);
3083 else
3084 myrb_handle_scsi(cb, cmd_blk, scmd);
3085 }
3086 cb->next_stat_mbox = next_stat_mbox;
3087 spin_unlock_irqrestore(&cb->queue_lock, flags);
3088 return IRQ_HANDLED;
3089}
3090
3091struct myrb_privdata DAC960_PG_privdata = {
3092 .hw_init = DAC960_PG_hw_init,
3093 .irq_handler = DAC960_PG_intr_handler,
3094 .mmio_size = DAC960_PG_mmio_size,
3095};
3096
3097
3098/*
3099 * DAC960 PD Series Controllers
3100 */
3101
3102static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3103{
3104 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3105}
3106
3107static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3108{
3109 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3110}
3111
3112static inline void DAC960_PD_gen_intr(void __iomem *base)
3113{
3114 writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3115}
3116
3117static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3118{
3119 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3120}
3121
3122static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3123{
3124 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3125
3126 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3127}
3128
3129static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3130{
3131 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3132
3133 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3134}
3135
3136static inline void DAC960_PD_ack_intr(void __iomem *base)
3137{
3138 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3139}
3140
3141static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3142{
3143 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3144
3145 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3146}
3147
3148static inline void DAC960_PD_enable_intr(void __iomem *base)
3149{
3150 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3151}
3152
3153static inline void DAC960_PD_disable_intr(void __iomem *base)
3154{
3155 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3156}
3157
3158static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3159{
3160 unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3161
3162 return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3163}
3164
3165static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3166 union myrb_cmd_mbox *mbox)
3167{
3168 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3169 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3170 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3171 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3172}
3173
3174static inline unsigned char
3175DAC960_PD_read_status_cmd_ident(void __iomem *base)
3176{
3177 return readb(base + DAC960_PD_STSID_OFFSET);
3178}
3179
3180static inline unsigned short
3181DAC960_PD_read_status(void __iomem *base)
3182{
3183 return readw(base + DAC960_PD_STS_OFFSET);
3184}
3185
3186static inline bool
3187DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3188 unsigned char *param0, unsigned char *param1)
3189{
3190 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3191
3192 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3193 return false;
3194 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3195 *error = errsts;
3196 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3197 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3198 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3199 return true;
3200}
3201
3202static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3203{
3204 void __iomem *base = cb->io_base;
3205 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3206
3207 while (DAC960_PD_hw_mbox_is_full(base))
3208 udelay(1);
3209 DAC960_PD_write_cmd_mbox(base, mbox);
3210 DAC960_PD_hw_mbox_new_cmd(base);
3211}
3212
3213static int DAC960_PD_hw_init(struct pci_dev *pdev,
3214 struct myrb_hba *cb, void __iomem *base)
3215{
3216 int timeout = 0;
3217 unsigned char error, parm0, parm1;
3218
3219 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3220 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3221 (unsigned long)cb->io_addr);
3222 return -EBUSY;
3223 }
3224 DAC960_PD_disable_intr(base);
3225 DAC960_PD_ack_hw_mbox_status(base);
3226 udelay(1000);
3227 while (DAC960_PD_init_in_progress(base) &&
3228 timeout < MYRB_MAILBOX_TIMEOUT) {
3229 if (DAC960_PD_read_error_status(base, &error,
3230 &parm0, &parm1) &&
3231 myrb_err_status(cb, error, parm0, parm1))
3232 return -EIO;
3233 udelay(10);
3234 timeout++;
3235 }
3236 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3237 dev_err(&pdev->dev,
3238 "Timeout waiting for Controller Initialisation\n");
3239 return -ETIMEDOUT;
3240 }
3241 if (!myrb_enable_mmio(cb, NULL)) {
3242 dev_err(&pdev->dev,
3243 "Unable to Enable Memory Mailbox Interface\n");
3244 DAC960_PD_reset_ctrl(base);
3245 return -ENODEV;
3246 }
3247 DAC960_PD_enable_intr(base);
3248 cb->qcmd = DAC960_PD_qcmd;
3249 cb->disable_intr = DAC960_PD_disable_intr;
3250 cb->reset = DAC960_PD_reset_ctrl;
3251
3252 return 0;
3253}
3254
3255static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3256{
3257 struct myrb_hba *cb = arg;
3258 void __iomem *base = cb->io_base;
3259 unsigned long flags;
3260
3261 spin_lock_irqsave(&cb->queue_lock, flags);
3262 while (DAC960_PD_hw_mbox_status_available(base)) {
3263 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3264 struct scsi_cmnd *scmd = NULL;
3265 struct myrb_cmdblk *cmd_blk = NULL;
3266
3267 if (id == MYRB_DCMD_TAG)
3268 cmd_blk = &cb->dcmd_blk;
3269 else if (id == MYRB_MCMD_TAG)
3270 cmd_blk = &cb->mcmd_blk;
3271 else {
3272 scmd = scsi_host_find_tag(cb->host, id - 3);
3273 if (scmd)
3274 cmd_blk = scsi_cmd_priv(scmd);
3275 }
3276 if (cmd_blk)
3277 cmd_blk->status = DAC960_PD_read_status(base);
3278 else
3279 dev_err(&cb->pdev->dev,
3280 "Unhandled command completion %d\n", id);
3281
3282 DAC960_PD_ack_intr(base);
3283 DAC960_PD_ack_hw_mbox_status(base);
3284
3285 if (id < 3)
3286 myrb_handle_cmdblk(cb, cmd_blk);
3287 else
3288 myrb_handle_scsi(cb, cmd_blk, scmd);
3289 }
3290 spin_unlock_irqrestore(&cb->queue_lock, flags);
3291 return IRQ_HANDLED;
3292}
3293
3294struct myrb_privdata DAC960_PD_privdata = {
3295 .hw_init = DAC960_PD_hw_init,
3296 .irq_handler = DAC960_PD_intr_handler,
3297 .mmio_size = DAC960_PD_mmio_size,
3298};
3299
3300
3301/*
3302 * DAC960 P Series Controllers
3303 *
3304 * Similar to the DAC960 PD Series Controllers, but some commands have
3305 * to be translated.
3306 */
3307
3308static inline void myrb_translate_enquiry(void *enq)
3309{
3310 memcpy(enq + 132, enq + 36, 64);
3311 memset(enq + 36, 0, 96);
3312}
3313
3314static inline void myrb_translate_devstate(void *state)
3315{
3316 memcpy(state + 2, state + 3, 1);
3317 memmove(state + 4, state + 5, 2);
3318 memmove(state + 6, state + 8, 4);
3319}
3320
3321static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3322{
3323 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3324 int ldev_num = mbox->type5.ld.ldev_num;
3325
3326 mbox->bytes[3] &= 0x7;
3327 mbox->bytes[3] |= mbox->bytes[7] << 6;
3328 mbox->bytes[7] = ldev_num;
3329}
3330
3331static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3332{
3333 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3334 int ldev_num = mbox->bytes[7];
3335
3336 mbox->bytes[7] = mbox->bytes[3] >> 6;
3337 mbox->bytes[3] &= 0x7;
3338 mbox->bytes[3] |= ldev_num << 3;
3339}
3340
3341static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3342{
3343 void __iomem *base = cb->io_base;
3344 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3345
3346 switch (mbox->common.opcode) {
3347 case MYRB_CMD_ENQUIRY:
3348 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3349 break;
3350 case MYRB_CMD_GET_DEVICE_STATE:
3351 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3352 break;
3353 case MYRB_CMD_READ:
3354 mbox->common.opcode = MYRB_CMD_READ_OLD;
3355 myrb_translate_to_rw_command(cmd_blk);
3356 break;
3357 case MYRB_CMD_WRITE:
3358 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3359 myrb_translate_to_rw_command(cmd_blk);
3360 break;
3361 case MYRB_CMD_READ_SG:
3362 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3363 myrb_translate_to_rw_command(cmd_blk);
3364 break;
3365 case MYRB_CMD_WRITE_SG:
3366 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3367 myrb_translate_to_rw_command(cmd_blk);
3368 break;
3369 default:
3370 break;
3371 }
3372 while (DAC960_PD_hw_mbox_is_full(base))
3373 udelay(1);
3374 DAC960_PD_write_cmd_mbox(base, mbox);
3375 DAC960_PD_hw_mbox_new_cmd(base);
3376}
3377
3378
3379static int DAC960_P_hw_init(struct pci_dev *pdev,
3380 struct myrb_hba *cb, void __iomem *base)
3381{
3382 int timeout = 0;
3383 unsigned char error, parm0, parm1;
3384
3385 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3386 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3387 (unsigned long)cb->io_addr);
3388 return -EBUSY;
3389 }
3390 DAC960_PD_disable_intr(base);
3391 DAC960_PD_ack_hw_mbox_status(base);
3392 udelay(1000);
3393 while (DAC960_PD_init_in_progress(base) &&
3394 timeout < MYRB_MAILBOX_TIMEOUT) {
3395 if (DAC960_PD_read_error_status(base, &error,
3396 &parm0, &parm1) &&
3397 myrb_err_status(cb, error, parm0, parm1))
3398 return -EAGAIN;
3399 udelay(10);
3400 timeout++;
3401 }
3402 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3403 dev_err(&pdev->dev,
3404 "Timeout waiting for Controller Initialisation\n");
3405 return -ETIMEDOUT;
3406 }
3407 if (!myrb_enable_mmio(cb, NULL)) {
3408 dev_err(&pdev->dev,
3409 "Unable to allocate DMA mapped memory\n");
3410 DAC960_PD_reset_ctrl(base);
3411 return -ETIMEDOUT;
3412 }
3413 DAC960_PD_enable_intr(base);
3414 cb->qcmd = DAC960_P_qcmd;
3415 cb->disable_intr = DAC960_PD_disable_intr;
3416 cb->reset = DAC960_PD_reset_ctrl;
3417
3418 return 0;
3419}
3420
3421static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3422{
3423 struct myrb_hba *cb = arg;
3424 void __iomem *base = cb->io_base;
3425 unsigned long flags;
3426
3427 spin_lock_irqsave(&cb->queue_lock, flags);
3428 while (DAC960_PD_hw_mbox_status_available(base)) {
3429 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3430 struct scsi_cmnd *scmd = NULL;
3431 struct myrb_cmdblk *cmd_blk = NULL;
3432 union myrb_cmd_mbox *mbox;
3433 enum myrb_cmd_opcode op;
3434
3435
3436 if (id == MYRB_DCMD_TAG)
3437 cmd_blk = &cb->dcmd_blk;
3438 else if (id == MYRB_MCMD_TAG)
3439 cmd_blk = &cb->mcmd_blk;
3440 else {
3441 scmd = scsi_host_find_tag(cb->host, id - 3);
3442 if (scmd)
3443 cmd_blk = scsi_cmd_priv(scmd);
3444 }
3445 if (cmd_blk)
3446 cmd_blk->status = DAC960_PD_read_status(base);
3447 else
3448 dev_err(&cb->pdev->dev,
3449 "Unhandled command completion %d\n", id);
3450
3451 DAC960_PD_ack_intr(base);
3452 DAC960_PD_ack_hw_mbox_status(base);
3453
3454 if (!cmd_blk)
3455 continue;
3456
3457 mbox = &cmd_blk->mbox;
3458 op = mbox->common.opcode;
3459 switch (op) {
3460 case MYRB_CMD_ENQUIRY_OLD:
3461 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3462 myrb_translate_enquiry(cb->enquiry);
3463 break;
3464 case MYRB_CMD_READ_OLD:
3465 mbox->common.opcode = MYRB_CMD_READ;
3466 myrb_translate_from_rw_command(cmd_blk);
3467 break;
3468 case MYRB_CMD_WRITE_OLD:
3469 mbox->common.opcode = MYRB_CMD_WRITE;
3470 myrb_translate_from_rw_command(cmd_blk);
3471 break;
3472 case MYRB_CMD_READ_SG_OLD:
3473 mbox->common.opcode = MYRB_CMD_READ_SG;
3474 myrb_translate_from_rw_command(cmd_blk);
3475 break;
3476 case MYRB_CMD_WRITE_SG_OLD:
3477 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3478 myrb_translate_from_rw_command(cmd_blk);
3479 break;
3480 default:
3481 break;
3482 }
3483 if (id < 3)
3484 myrb_handle_cmdblk(cb, cmd_blk);
3485 else
3486 myrb_handle_scsi(cb, cmd_blk, scmd);
3487 }
3488 spin_unlock_irqrestore(&cb->queue_lock, flags);
3489 return IRQ_HANDLED;
3490}
3491
3492struct myrb_privdata DAC960_P_privdata = {
3493 .hw_init = DAC960_P_hw_init,
3494 .irq_handler = DAC960_P_intr_handler,
3495 .mmio_size = DAC960_PD_mmio_size,
3496};
3497
3498static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3499 const struct pci_device_id *entry)
3500{
3501 struct myrb_privdata *privdata =
3502 (struct myrb_privdata *)entry->driver_data;
3503 irq_handler_t irq_handler = privdata->irq_handler;
3504 unsigned int mmio_size = privdata->mmio_size;
3505 struct Scsi_Host *shost;
3506 struct myrb_hba *cb = NULL;
3507
3508 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3509 if (!shost) {
3510 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3511 return NULL;
3512 }
3513 shost->max_cmd_len = 12;
3514 shost->max_lun = 256;
3515 cb = shost_priv(shost);
3516 mutex_init(&cb->dcmd_mutex);
3517 mutex_init(&cb->dma_mutex);
3518 cb->pdev = pdev;
3519
3520 if (pci_enable_device(pdev))
3521 goto failure;
3522
3523 if (privdata->hw_init == DAC960_PD_hw_init ||
3524 privdata->hw_init == DAC960_P_hw_init) {
3525 cb->io_addr = pci_resource_start(pdev, 0);
3526 cb->pci_addr = pci_resource_start(pdev, 1);
3527 } else
3528 cb->pci_addr = pci_resource_start(pdev, 0);
3529
3530 pci_set_drvdata(pdev, cb);
3531 spin_lock_init(&cb->queue_lock);
3532 if (mmio_size < PAGE_SIZE)
3533 mmio_size = PAGE_SIZE;
3534 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3535 if (cb->mmio_base == NULL) {
3536 dev_err(&pdev->dev,
3537 "Unable to map Controller Register Window\n");
3538 goto failure;
3539 }
3540
3541 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3542 if (privdata->hw_init(pdev, cb, cb->io_base))
3543 goto failure;
3544
3545 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3546 dev_err(&pdev->dev,
3547 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3548 goto failure;
3549 }
3550 cb->irq = pdev->irq;
3551 return cb;
3552
3553failure:
3554 dev_err(&pdev->dev,
3555 "Failed to initialize Controller\n");
3556 myrb_cleanup(cb);
3557 return NULL;
3558}
3559
3560static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3561{
3562 struct myrb_hba *cb;
3563 int ret;
3564
3565 cb = myrb_detect(dev, entry);
3566 if (!cb)
3567 return -ENODEV;
3568
3569 ret = myrb_get_hba_config(cb);
3570 if (ret < 0) {
3571 myrb_cleanup(cb);
3572 return ret;
3573 }
3574
3575 if (!myrb_create_mempools(dev, cb)) {
3576 ret = -ENOMEM;
3577 goto failed;
3578 }
3579
3580 ret = scsi_add_host(cb->host, &dev->dev);
3581 if (ret) {
3582 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3583 myrb_destroy_mempools(cb);
3584 goto failed;
3585 }
3586 scsi_scan_host(cb->host);
3587 return 0;
3588failed:
3589 myrb_cleanup(cb);
3590 return ret;
3591}
3592
3593
3594static void myrb_remove(struct pci_dev *pdev)
3595{
3596 struct myrb_hba *cb = pci_get_drvdata(pdev);
3597
3598 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3599 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3600 myrb_cleanup(cb);
3601 myrb_destroy_mempools(cb);
3602}
3603
3604
3605static const struct pci_device_id myrb_id_table[] = {
3606 {
3607 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3608 PCI_DEVICE_ID_DEC_21285,
3609 PCI_VENDOR_ID_MYLEX,
3610 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3611 .driver_data = (unsigned long) &DAC960_LA_privdata,
3612 },
3613 {
3614 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3615 },
3616 {
3617 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3618 },
3619 {
3620 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3621 },
3622 {0, },
3623};
3624
3625MODULE_DEVICE_TABLE(pci, myrb_id_table);
3626
3627static struct pci_driver myrb_pci_driver = {
3628 .name = "myrb",
3629 .id_table = myrb_id_table,
3630 .probe = myrb_probe,
3631 .remove = myrb_remove,
3632};
3633
3634static int __init myrb_init_module(void)
3635{
3636 int ret;
3637
3638 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3639 if (!myrb_raid_template)
3640 return -ENODEV;
3641
3642 ret = pci_register_driver(&myrb_pci_driver);
3643 if (ret)
3644 raid_class_release(myrb_raid_template);
3645
3646 return ret;
3647}
3648
3649static void __exit myrb_cleanup_module(void)
3650{
3651 pci_unregister_driver(&myrb_pci_driver);
3652 raid_class_release(myrb_raid_template);
3653}
3654
3655module_init(myrb_init_module);
3656module_exit(myrb_cleanup_module);
3657
3658MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3659MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3660MODULE_LICENSE("GPL");