Loading...
1/******************************************************************************
2* QLOGIC LINUX SOFTWARE
3*
4* QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5* Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6* Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7* Copyright (C) 2003-2004 Christoph Hellwig
8*
9* This program is free software; you can redistribute it and/or modify it
10* under the terms of the GNU General Public License as published by the
11* Free Software Foundation; either version 2, or (at your option) any
12* later version.
13*
14* This program is distributed in the hope that it will be useful, but
15* WITHOUT ANY WARRANTY; without even the implied warranty of
16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17* General Public License for more details.
18*
19******************************************************************************/
20#define QLA1280_VERSION "3.27.1"
21/*****************************************************************************
22 Revision History:
23 Rev 3.27.1, February 8, 2010, Michael Reed
24 - Retain firmware image for error recovery.
25 Rev 3.27, February 10, 2009, Michael Reed
26 - General code cleanup.
27 - Improve error recovery.
28 Rev 3.26, January 16, 2006 Jes Sorensen
29 - Ditch all < 2.6 support
30 Rev 3.25.1, February 10, 2005 Christoph Hellwig
31 - use pci_map_single to map non-S/G requests
32 - remove qla1280_proc_info
33 Rev 3.25, September 28, 2004, Christoph Hellwig
34 - add support for ISP1020/1040
35 - don't include "scsi.h" anymore for 2.6.x
36 Rev 3.24.4 June 7, 2004 Christoph Hellwig
37 - restructure firmware loading, cleanup initialization code
38 - prepare support for ISP1020/1040 chips
39 Rev 3.24.3 January 19, 2004, Jes Sorensen
40 - Handle PCI DMA mask settings correctly
41 - Correct order of error handling in probe_one, free_irq should not
42 be called if request_irq failed
43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
44 - Big endian fixes (James)
45 - Remove bogus IOCB content on zero data transfer commands (Andrew)
46 Rev 3.24.1 January 5, 2004, Jes Sorensen
47 - Initialize completion queue to avoid OOPS on probe
48 - Handle interrupts during mailbox testing
49 Rev 3.24 November 17, 2003, Christoph Hellwig
50 - use struct list_head for completion queue
51 - avoid old Scsi_FOO typedefs
52 - cleanup 2.4 compat glue a bit
53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
54 - make initialization for memory mapped vs port I/O more similar
55 - remove broken pci config space manipulation
56 - kill more cruft
57 - this is an almost perfect 2.6 scsi driver now! ;)
58 Rev 3.23.39 December 17, 2003, Jes Sorensen
59 - Delete completion queue from srb if mailbox command failed to
60 to avoid qla1280_done completeting qla1280_error_action's
61 obsolete context
62 - Reduce arguments for qla1280_done
63 Rev 3.23.38 October 18, 2003, Christoph Hellwig
64 - Convert to new-style hotplugable driver for 2.6
65 - Fix missing scsi_unregister/scsi_host_put on HBA removal
66 - Kill some more cruft
67 Rev 3.23.37 October 1, 2003, Jes Sorensen
68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
69 random CONFIG option
70 - Clean up locking in probe path
71 Rev 3.23.36 October 1, 2003, Christoph Hellwig
72 - queuecommand only ever receives new commands - clear flags
73 - Reintegrate lost fixes from Linux 2.5
74 Rev 3.23.35 August 14, 2003, Jes Sorensen
75 - Build against 2.6
76 Rev 3.23.34 July 23, 2003, Jes Sorensen
77 - Remove pointless TRUE/FALSE macros
78 - Clean up vchan handling
79 Rev 3.23.33 July 3, 2003, Jes Sorensen
80 - Don't define register access macros before define determining MMIO.
81 This just happened to work out on ia64 but not elsewhere.
82 - Don't try and read from the card while it is in reset as
83 it won't respond and causes an MCA
84 Rev 3.23.32 June 23, 2003, Jes Sorensen
85 - Basic support for boot time arguments
86 Rev 3.23.31 June 8, 2003, Jes Sorensen
87 - Reduce boot time messages
88 Rev 3.23.30 June 6, 2003, Jes Sorensen
89 - Do not enable sync/wide/ppr before it has been determined
90 that the target device actually supports it
91 - Enable DMA arbitration for multi channel controllers
92 Rev 3.23.29 June 3, 2003, Jes Sorensen
93 - Port to 2.5.69
94 Rev 3.23.28 June 3, 2003, Jes Sorensen
95 - Eliminate duplicate marker commands on bus resets
96 - Handle outstanding commands appropriately on bus/device resets
97 Rev 3.23.27 May 28, 2003, Jes Sorensen
98 - Remove bogus input queue code, let the Linux SCSI layer do the work
99 - Clean up NVRAM handling, only read it once from the card
100 - Add a number of missing default nvram parameters
101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
102 - Use completion queue for mailbox commands instead of busy wait
103 Rev 3.23.25 Beta May 27, 2003, James Bottomley
104 - Migrate to use new error handling code
105 Rev 3.23.24 Beta May 21, 2003, James Bottomley
106 - Big endian support
107 - Cleanup data direction code
108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
109 - Switch to using MMIO instead of PIO
110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
111 - Fix PCI parity problem with 12160 during reset.
112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
113 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
115 - Remove < 2.4.x support
116 - Introduce HOST_LOCK to make the spin lock changes portable.
117 - Remove a bunch of idiotic and unnecessary typedef's
118 - Kill all leftovers of target-mode support which never worked anyway
119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
120 - Do qla1280_pci_config() before calling request_irq() and
121 request_region()
122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
123 of large shifts
124 - Hand correct arguments to free_irq() in case of failure
125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
126 - Run source through Lindent and clean up the output
127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
130 - Rely on mailbox commands generating interrupts - do not
131 run qla1280_isr() from ql1280_mailbox_command()
132 - Remove device_reg_t
133 - Integrate ql12160_set_target_parameters() with 1280 version
134 - Make qla1280_setup() non static
135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
136 sent to the card - this command pauses the firmware!!!
137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
139 - Remove a pile of pointless and confusing (srb_t **) and
140 (scsi_lu_t *) typecasts
141 - Explicit mark that we do not use the new error handling (for now)
142 - Remove scsi_qla_host_t and use 'struct' instead
143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
144 pci_64bit_slot flags which weren't used for anything anyway
145 - Grab host->host_lock while calling qla1280_isr() from abort()
146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
147 do not need to save/restore flags in the interrupt handler
148 - Enable interrupts early (before any mailbox access) in preparation
149 for cleaning up the mailbox handling
150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
152 it with proper use of dprintk().
153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
154 a debug level argument to determine if data is to be printed
155 - Add KERN_* info to printk()
156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
157 - Significant cosmetic cleanups
158 - Change debug code to use dprintk() and remove #if mess
159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
160 - More cosmetic cleanups, fix places treating return as function
161 - use cpu_relax() in qla1280_debounce_register()
162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
163 - Make it compile under 2.5.5
164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
165 - Do no typecast short * to long * in QL1280BoardTbl, this
166 broke miserably on big endian boxes
167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
170 unsigned int to match the types from struct scsi_cmnd
171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
172 - Remove bogus timer_t typedef from qla1280.h
173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
174 for PCI_ values, call pci_set_master()
175 - Fix memleak of qla1280_buffer on module unload
176 - Only compile module parsing code #ifdef MODULE - should be
177 changed to use individual MODULE_PARM's later
178 - Remove dummy_buffer that was never modified nor printed
179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
181 - Remove \r from print statements, this is Linux, not DOS
182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
183 dummy macros
184 - Remove C++ compile hack in header file as Linux driver are not
185 supposed to be compiled as C++
186 - Kill MS_64BITS macro as it makes the code more readable
187 - Remove unnecessary flags.in_interrupts bit
188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
189 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
190 - Check whether the interrupt was generated by the QLA1280 before
191 doing any processing
192 - qla1280_status_entry(): Only zero out part of sense_buffer that
193 is not being copied into
194 - Remove more superflouous typecasts
195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
197 - Don't walk the entire list in qla1280_putq_t() just to directly
198 grab the pointer to the last element afterwards
199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
202 - Set dev->max_sectors to 1024
203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
204 - Provide compat macros for pci_enable_device(), pci_find_subsys()
205 and scsi_set_pci_device()
206 - Call scsi_set_pci_device() for all devices
207 - Reduce size of kernel version dependent device probe code
208 - Move duplicate probe/init code to separate function
209 - Handle error if qla1280_mem_alloc() fails
210 - Kill OFFSET() macro and use Linux's PCI definitions instead
211 - Kill private structure defining PCI config space (struct config_reg)
212 - Only allocate I/O port region if not in MMIO mode
213 - Remove duplicate (unused) sanity check of sife of srb_t
214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
215 - Change home-brew memset() implementations to use memset()
216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
217 port directly is not legal under Linux.
218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
219 - Remove pre 2.2 kernel support
220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
221 - Fix MMIO access to use readl/writel instead of directly
222 dereferencing pointers
223 - Nuke MSDOS debugging code
224 - Change true/false data types to int from uint8_t
225 - Use int for counters instead of uint8_t etc.
226 - Clean up size & byte order conversion macro usage
227 Rev 3.23 Beta January 11, 2001 BN Qlogic
228 - Added check of device_id when handling non
229 QLA12160s during detect().
230 Rev 3.22 Beta January 5, 2001 BN Qlogic
231 - Changed queue_task() to schedule_task()
232 for kernels 2.4.0 and higher.
233 Note: 2.4.0-testxx kernels released prior to
234 the actual 2.4.0 kernel release on January 2001
235 will get compile/link errors with schedule_task().
236 Please update your kernel to released 2.4.0 level,
237 or comment lines in this file flagged with 3.22
238 to resolve compile/link error of schedule_task().
239 - Added -DCONFIG_SMP in addition to -D__SMP__
240 in Makefile for 2.4.0 builds of driver as module.
241 Rev 3.21 Beta January 4, 2001 BN Qlogic
242 - Changed criteria of 64/32 Bit mode of HBA
243 operation according to BITS_PER_LONG rather
244 than HBA's NVRAM setting of >4Gig memory bit;
245 so that the HBA auto-configures without the need
246 to setup each system individually.
247 Rev 3.20 Beta December 5, 2000 BN Qlogic
248 - Added priority handling to IA-64 onboard SCSI
249 ISP12160 chip for kernels greater than 2.3.18.
250 - Added irqrestore for qla1280_intr_handler.
251 - Enabled /proc/scsi/qla1280 interface.
252 - Clear /proc/scsi/qla1280 counters in detect().
253 Rev 3.19 Beta October 13, 2000 BN Qlogic
254 - Declare driver_template for new kernel
255 (2.4.0 and greater) scsi initialization scheme.
256 - Update /proc/scsi entry for 2.3.18 kernels and
257 above as qla1280
258 Rev 3.18 Beta October 10, 2000 BN Qlogic
259 - Changed scan order of adapters to map
260 the QLA12160 followed by the QLA1280.
261 Rev 3.17 Beta September 18, 2000 BN Qlogic
262 - Removed warnings for 32 bit 2.4.x compiles
263 - Corrected declared size for request and response
264 DMA addresses that are kept in each ha
265 Rev. 3.16 Beta August 25, 2000 BN Qlogic
266 - Corrected 64 bit addressing issue on IA-64
267 where the upper 32 bits were not properly
268 passed to the RISC engine.
269 Rev. 3.15 Beta August 22, 2000 BN Qlogic
270 - Modified qla1280_setup_chip to properly load
271 ISP firmware for greater that 4 Gig memory on IA-64
272 Rev. 3.14 Beta August 16, 2000 BN Qlogic
273 - Added setting of dma_mask to full 64 bit
274 if flags.enable_64bit_addressing is set in NVRAM
275 Rev. 3.13 Beta August 16, 2000 BN Qlogic
276 - Use new PCI DMA mapping APIs for 2.4.x kernel
277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
278 - Added check of pci_enable_device to detect() for 2.3.x
279 - Use pci_resource_start() instead of
280 pdev->resource[0].start in detect() for 2.3.x
281 - Updated driver version
282 Rev. 3.11 July 14, 2000 BN Qlogic
283 - Updated SCSI Firmware to following versions:
284 qla1x80: 8.13.08
285 qla1x160: 10.04.08
286 - Updated driver version to 3.11
287 Rev. 3.10 June 23, 2000 BN Qlogic
288 - Added filtering of AMI SubSys Vendor ID devices
289 Rev. 3.9
290 - DEBUG_QLA1280 undefined and new version BN Qlogic
291 Rev. 3.08b May 9, 2000 MD Dell
292 - Added logic to check against AMI subsystem vendor ID
293 Rev. 3.08 May 4, 2000 DG Qlogic
294 - Added logic to check for PCI subsystem ID.
295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
296 - Updated SCSI Firmware to following versions:
297 qla12160: 10.01.19
298 qla1280: 8.09.00
299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
300 - Internal revision; not released
301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
302 - Edit correction for virt_to_bus and PROC.
303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
304 - Merge changes from ia64 port.
305 Rev. 3.03 Mar 28, 2000 BN Qlogic
306 - Increase version to reflect new code drop with compile fix
307 of issue with inclusion of linux/spinlock for 2.3 kernels
308 Rev. 3.02 Mar 15, 2000 BN Qlogic
309 - Merge qla1280_proc_info from 2.10 code base
310 Rev. 3.01 Feb 10, 2000 BN Qlogic
311 - Corrected code to compile on a 2.2.x kernel.
312 Rev. 3.00 Jan 17, 2000 DG Qlogic
313 - Added 64-bit support.
314 Rev. 2.07 Nov 9, 1999 DG Qlogic
315 - Added new routine to set target parameters for ISP12160.
316 Rev. 2.06 Sept 10, 1999 DG Qlogic
317 - Added support for ISP12160 Ultra 3 chip.
318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
319 - Modified code to remove errors generated when compiling with
320 Cygnus IA64 Compiler.
321 - Changed conversion of pointers to unsigned longs instead of integers.
322 - Changed type of I/O port variables from uint32_t to unsigned long.
323 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
324 - Changed sprintf and printk format specifiers for pointers to %p.
325 - Changed some int to long type casts where needed in sprintf & printk.
326 - Added l modifiers to sprintf and printk format specifiers for longs.
327 - Removed unused local variables.
328 Rev. 1.20 June 8, 1999 DG, Qlogic
329 Changes to support RedHat release 6.0 (kernel 2.2.5).
330 - Added SCSI exclusive access lock (io_request_lock) when accessing
331 the adapter.
332 - Added changes for the new LINUX interface template. Some new error
333 handling routines have been added to the template, but for now we
334 will use the old ones.
335 - Initial Beta Release.
336*****************************************************************************/
337
338
339#include <linux/module.h>
340
341#include <linux/types.h>
342#include <linux/string.h>
343#include <linux/errno.h>
344#include <linux/kernel.h>
345#include <linux/ioport.h>
346#include <linux/delay.h>
347#include <linux/timer.h>
348#include <linux/pci.h>
349#include <linux/proc_fs.h>
350#include <linux/stat.h>
351#include <linux/pci_ids.h>
352#include <linux/interrupt.h>
353#include <linux/init.h>
354#include <linux/dma-mapping.h>
355#include <linux/firmware.h>
356
357#include <asm/io.h>
358#include <asm/irq.h>
359#include <asm/byteorder.h>
360#include <asm/processor.h>
361#include <asm/types.h>
362#include <asm/system.h>
363
364#include <scsi/scsi.h>
365#include <scsi/scsi_cmnd.h>
366#include <scsi/scsi_device.h>
367#include <scsi/scsi_host.h>
368#include <scsi/scsi_tcq.h>
369
370#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
371#include <asm/sn/io.h>
372#endif
373
374
375/*
376 * Compile time Options:
377 * 0 - Disable and 1 - Enable
378 */
379#define DEBUG_QLA1280_INTR 0
380#define DEBUG_PRINT_NVRAM 0
381#define DEBUG_QLA1280 0
382
383/*
384 * The SGI VISWS is broken and doesn't support MMIO ;-(
385 */
386#ifdef CONFIG_X86_VISWS
387#define MEMORY_MAPPED_IO 0
388#else
389#define MEMORY_MAPPED_IO 1
390#endif
391
392#include "qla1280.h"
393
394#ifndef BITS_PER_LONG
395#error "BITS_PER_LONG not defined!"
396#endif
397#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
398#define QLA_64BIT_PTR 1
399#endif
400
401#ifdef QLA_64BIT_PTR
402#define pci_dma_hi32(a) ((a >> 16) >> 16)
403#else
404#define pci_dma_hi32(a) 0
405#endif
406#define pci_dma_lo32(a) (a & 0xffffffff)
407
408#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
409
410#if defined(__ia64__) && !defined(ia64_platform_is)
411#define ia64_platform_is(foo) (!strcmp(x, platform_name))
412#endif
413
414
415#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
416#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
417 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
418#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
419 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
420
421
422static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
423static void qla1280_remove_one(struct pci_dev *);
424
425/*
426 * QLogic Driver Support Function Prototypes.
427 */
428static void qla1280_done(struct scsi_qla_host *);
429static int qla1280_get_token(char *);
430static int qla1280_setup(char *s) __init;
431
432/*
433 * QLogic ISP1280 Hardware Support Function Prototypes.
434 */
435static int qla1280_load_firmware(struct scsi_qla_host *);
436static int qla1280_init_rings(struct scsi_qla_host *);
437static int qla1280_nvram_config(struct scsi_qla_host *);
438static int qla1280_mailbox_command(struct scsi_qla_host *,
439 uint8_t, uint16_t *);
440static int qla1280_bus_reset(struct scsi_qla_host *, int);
441static int qla1280_device_reset(struct scsi_qla_host *, int, int);
442static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
443static int qla1280_abort_isp(struct scsi_qla_host *);
444#ifdef QLA_64BIT_PTR
445static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
446#else
447static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
448#endif
449static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
450static void qla1280_poll(struct scsi_qla_host *);
451static void qla1280_reset_adapter(struct scsi_qla_host *);
452static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
453static void qla1280_isp_cmd(struct scsi_qla_host *);
454static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
455static void qla1280_rst_aen(struct scsi_qla_host *);
456static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
457 struct list_head *);
458static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
459 struct list_head *);
460static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
461static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
462static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
463static request_t *qla1280_req_pkt(struct scsi_qla_host *);
464static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
465 unsigned int);
466static void qla1280_get_target_parameters(struct scsi_qla_host *,
467 struct scsi_device *);
468static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
469
470
471static struct qla_driver_setup driver_setup;
472
473/*
474 * convert scsi data direction to request_t control flags
475 */
476static inline uint16_t
477qla1280_data_direction(struct scsi_cmnd *cmnd)
478{
479 switch(cmnd->sc_data_direction) {
480 case DMA_FROM_DEVICE:
481 return BIT_5;
482 case DMA_TO_DEVICE:
483 return BIT_6;
484 case DMA_BIDIRECTIONAL:
485 return BIT_5 | BIT_6;
486 /*
487 * We could BUG() on default here if one of the four cases aren't
488 * met, but then again if we receive something like that from the
489 * SCSI layer we have more serious problems. This shuts up GCC.
490 */
491 case DMA_NONE:
492 default:
493 return 0;
494 }
495}
496
497#if DEBUG_QLA1280
498static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
499static void __qla1280_dump_buffer(char *, int);
500#endif
501
502
503/*
504 * insmod needs to find the variable and make it point to something
505 */
506#ifdef MODULE
507static char *qla1280;
508
509/* insmod qla1280 options=verbose" */
510module_param(qla1280, charp, 0);
511#else
512__setup("qla1280=", qla1280_setup);
513#endif
514
515
516/*
517 * We use the scsi_pointer structure that's included with each scsi_command
518 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
519 * bigger than a scsi_pointer.
520 */
521
522#define CMD_SP(Cmnd) &Cmnd->SCp
523#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
524#define CMD_CDBP(Cmnd) Cmnd->cmnd
525#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
526#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
527#define CMD_RESULT(Cmnd) Cmnd->result
528#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
529#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
530
531#define CMD_HOST(Cmnd) Cmnd->device->host
532#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
533#define SCSI_TCN_32(Cmnd) Cmnd->device->id
534#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
535
536
537/*****************************************/
538/* ISP Boards supported by this driver */
539/*****************************************/
540
541struct qla_boards {
542 char *name; /* Board ID String */
543 int numPorts; /* Number of SCSI ports */
544 int fw_index; /* index into qla1280_fw_tbl for firmware */
545};
546
547/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
548static struct pci_device_id qla1280_pci_tbl[] = {
549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
553 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
554 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
555 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
556 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
557 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
558 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
559 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
560 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
561 {0,}
562};
563MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
564
565DEFINE_MUTEX(qla1280_firmware_mutex);
566
567struct qla_fw {
568 char *fwname;
569 const struct firmware *fw;
570};
571
572#define QL_NUM_FW_IMAGES 3
573
574struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
575 {"qlogic/1040.bin", NULL}, /* image 0 */
576 {"qlogic/1280.bin", NULL}, /* image 1 */
577 {"qlogic/12160.bin", NULL}, /* image 2 */
578};
579
580/* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
581static struct qla_boards ql1280_board_tbl[] = {
582 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
583 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
584 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
585 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
586 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
587 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
588 {.name = " ", .numPorts = 0, .fw_index = -1},
589};
590
591static int qla1280_verbose = 1;
592
593#if DEBUG_QLA1280
594static int ql_debug_level = 1;
595#define dprintk(level, format, a...) \
596 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
597#define qla1280_dump_buffer(level, buf, size) \
598 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
599#define qla1280_print_scsi_cmd(level, cmd) \
600 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
601#else
602#define ql_debug_level 0
603#define dprintk(level, format, a...) do{}while(0)
604#define qla1280_dump_buffer(a, b, c) do{}while(0)
605#define qla1280_print_scsi_cmd(a, b) do{}while(0)
606#endif
607
608#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
609#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
610#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
611#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
612
613
614static int qla1280_read_nvram(struct scsi_qla_host *ha)
615{
616 uint16_t *wptr;
617 uint8_t chksum;
618 int cnt, i;
619 struct nvram *nv;
620
621 ENTER("qla1280_read_nvram");
622
623 if (driver_setup.no_nvram)
624 return 1;
625
626 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
627
628 wptr = (uint16_t *)&ha->nvram;
629 nv = &ha->nvram;
630 chksum = 0;
631 for (cnt = 0; cnt < 3; cnt++) {
632 *wptr = qla1280_get_nvram_word(ha, cnt);
633 chksum += *wptr & 0xff;
634 chksum += (*wptr >> 8) & 0xff;
635 wptr++;
636 }
637
638 if (nv->id0 != 'I' || nv->id1 != 'S' ||
639 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
640 dprintk(2, "Invalid nvram ID or version!\n");
641 chksum = 1;
642 } else {
643 for (; cnt < sizeof(struct nvram); cnt++) {
644 *wptr = qla1280_get_nvram_word(ha, cnt);
645 chksum += *wptr & 0xff;
646 chksum += (*wptr >> 8) & 0xff;
647 wptr++;
648 }
649 }
650
651 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
652 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
653 nv->version);
654
655
656 if (chksum) {
657 if (!driver_setup.no_nvram)
658 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
659 "validate NVRAM checksum, using default "
660 "settings\n", ha->host_no);
661 ha->nvram_valid = 0;
662 } else
663 ha->nvram_valid = 1;
664
665 /* The firmware interface is, um, interesting, in that the
666 * actual firmware image on the chip is little endian, thus,
667 * the process of taking that image to the CPU would end up
668 * little endian. However, the firmware interface requires it
669 * to be read a word (two bytes) at a time.
670 *
671 * The net result of this would be that the word (and
672 * doubleword) quantites in the firmware would be correct, but
673 * the bytes would be pairwise reversed. Since most of the
674 * firmware quantites are, in fact, bytes, we do an extra
675 * le16_to_cpu() in the firmware read routine.
676 *
677 * The upshot of all this is that the bytes in the firmware
678 * are in the correct places, but the 16 and 32 bit quantites
679 * are still in little endian format. We fix that up below by
680 * doing extra reverses on them */
681 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
682 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
683 for(i = 0; i < MAX_BUSES; i++) {
684 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
685 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
686 }
687 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
688 LEAVE("qla1280_read_nvram");
689
690 return chksum;
691}
692
693/**************************************************************************
694 * qla1280_info
695 * Return a string describing the driver.
696 **************************************************************************/
697static const char *
698qla1280_info(struct Scsi_Host *host)
699{
700 static char qla1280_scsi_name_buffer[125];
701 char *bp;
702 struct scsi_qla_host *ha;
703 struct qla_boards *bdp;
704
705 bp = &qla1280_scsi_name_buffer[0];
706 ha = (struct scsi_qla_host *)host->hostdata;
707 bdp = &ql1280_board_tbl[ha->devnum];
708 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
709
710 sprintf (bp,
711 "QLogic %s PCI to SCSI Host Adapter\n"
712 " Firmware version: %2d.%02d.%02d, Driver version %s",
713 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
714 QLA1280_VERSION);
715 return bp;
716}
717
718/**************************************************************************
719 * qla1280_queuecommand
720 * Queue a command to the controller.
721 *
722 * Note:
723 * The mid-level driver tries to ensures that queuecommand never gets invoked
724 * concurrently with itself or the interrupt handler (although the
725 * interrupt handler may call this routine as part of request-completion
726 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
727 * context which is a big NO! NO!.
728 **************************************************************************/
729static int
730qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
731{
732 struct Scsi_Host *host = cmd->device->host;
733 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
734 struct srb *sp = (struct srb *)CMD_SP(cmd);
735 int status;
736
737 cmd->scsi_done = fn;
738 sp->cmd = cmd;
739 sp->flags = 0;
740 sp->wait = NULL;
741 CMD_HANDLE(cmd) = (unsigned char *)NULL;
742
743 qla1280_print_scsi_cmd(5, cmd);
744
745#ifdef QLA_64BIT_PTR
746 /*
747 * Using 64 bit commands if the PCI bridge doesn't support it is a
748 * bit wasteful, however this should really only happen if one's
749 * PCI controller is completely broken, like the BCM1250. For
750 * sane hardware this is not an issue.
751 */
752 status = qla1280_64bit_start_scsi(ha, sp);
753#else
754 status = qla1280_32bit_start_scsi(ha, sp);
755#endif
756 return status;
757}
758
759static DEF_SCSI_QCMD(qla1280_queuecommand)
760
761enum action {
762 ABORT_COMMAND,
763 DEVICE_RESET,
764 BUS_RESET,
765 ADAPTER_RESET,
766};
767
768
769static void qla1280_mailbox_timeout(unsigned long __data)
770{
771 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
772 struct device_reg __iomem *reg;
773 reg = ha->iobase;
774
775 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
776 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
777 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
778 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
779 complete(ha->mailbox_wait);
780}
781
782static int
783_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
784 struct completion *wait)
785{
786 int status = FAILED;
787 struct scsi_cmnd *cmd = sp->cmd;
788
789 spin_unlock_irq(ha->host->host_lock);
790 wait_for_completion_timeout(wait, 4*HZ);
791 spin_lock_irq(ha->host->host_lock);
792 sp->wait = NULL;
793 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
794 status = SUCCESS;
795 (*cmd->scsi_done)(cmd);
796 }
797 return status;
798}
799
800static int
801qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
802{
803 DECLARE_COMPLETION_ONSTACK(wait);
804
805 sp->wait = &wait;
806 return _qla1280_wait_for_single_command(ha, sp, &wait);
807}
808
809static int
810qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
811{
812 int cnt;
813 int status;
814 struct srb *sp;
815 struct scsi_cmnd *cmd;
816
817 status = SUCCESS;
818
819 /*
820 * Wait for all commands with the designated bus/target
821 * to be completed by the firmware
822 */
823 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
824 sp = ha->outstanding_cmds[cnt];
825 if (sp) {
826 cmd = sp->cmd;
827
828 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
829 continue;
830 if (target >= 0 && SCSI_TCN_32(cmd) != target)
831 continue;
832
833 status = qla1280_wait_for_single_command(ha, sp);
834 if (status == FAILED)
835 break;
836 }
837 }
838 return status;
839}
840
841/**************************************************************************
842 * qla1280_error_action
843 * The function will attempt to perform a specified error action and
844 * wait for the results (or time out).
845 *
846 * Input:
847 * cmd = Linux SCSI command packet of the command that cause the
848 * bus reset.
849 * action = error action to take (see action_t)
850 *
851 * Returns:
852 * SUCCESS or FAILED
853 *
854 **************************************************************************/
855static int
856qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
857{
858 struct scsi_qla_host *ha;
859 int bus, target, lun;
860 struct srb *sp;
861 int i, found;
862 int result=FAILED;
863 int wait_for_bus=-1;
864 int wait_for_target = -1;
865 DECLARE_COMPLETION_ONSTACK(wait);
866
867 ENTER("qla1280_error_action");
868
869 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
870 sp = (struct srb *)CMD_SP(cmd);
871 bus = SCSI_BUS_32(cmd);
872 target = SCSI_TCN_32(cmd);
873 lun = SCSI_LUN_32(cmd);
874
875 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
876 RD_REG_WORD(&ha->iobase->istatus));
877
878 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
879 RD_REG_WORD(&ha->iobase->host_cmd),
880 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
881
882 if (qla1280_verbose)
883 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
884 "Handle=0x%p, action=0x%x\n",
885 ha->host_no, cmd, CMD_HANDLE(cmd), action);
886
887 /*
888 * Check to see if we have the command in the outstanding_cmds[]
889 * array. If not then it must have completed before this error
890 * action was initiated. If the error_action isn't ABORT_COMMAND
891 * then the driver must proceed with the requested action.
892 */
893 found = -1;
894 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
895 if (sp == ha->outstanding_cmds[i]) {
896 found = i;
897 sp->wait = &wait; /* we'll wait for it to complete */
898 break;
899 }
900 }
901
902 if (found < 0) { /* driver doesn't have command */
903 result = SUCCESS;
904 if (qla1280_verbose) {
905 printk(KERN_INFO
906 "scsi(%ld:%d:%d:%d): specified command has "
907 "already completed.\n", ha->host_no, bus,
908 target, lun);
909 }
910 }
911
912 switch (action) {
913
914 case ABORT_COMMAND:
915 dprintk(1, "qla1280: RISC aborting command\n");
916 /*
917 * The abort might fail due to race when the host_lock
918 * is released to issue the abort. As such, we
919 * don't bother to check the return status.
920 */
921 if (found >= 0)
922 qla1280_abort_command(ha, sp, found);
923 break;
924
925 case DEVICE_RESET:
926 if (qla1280_verbose)
927 printk(KERN_INFO
928 "scsi(%ld:%d:%d:%d): Queueing device reset "
929 "command.\n", ha->host_no, bus, target, lun);
930 if (qla1280_device_reset(ha, bus, target) == 0) {
931 /* issued device reset, set wait conditions */
932 wait_for_bus = bus;
933 wait_for_target = target;
934 }
935 break;
936
937 case BUS_RESET:
938 if (qla1280_verbose)
939 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
940 "reset.\n", ha->host_no, bus);
941 if (qla1280_bus_reset(ha, bus) == 0) {
942 /* issued bus reset, set wait conditions */
943 wait_for_bus = bus;
944 }
945 break;
946
947 case ADAPTER_RESET:
948 default:
949 if (qla1280_verbose) {
950 printk(KERN_INFO
951 "scsi(%ld): Issued ADAPTER RESET\n",
952 ha->host_no);
953 printk(KERN_INFO "scsi(%ld): I/O processing will "
954 "continue automatically\n", ha->host_no);
955 }
956 ha->flags.reset_active = 1;
957
958 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
959 result = FAILED;
960 }
961
962 ha->flags.reset_active = 0;
963 }
964
965 /*
966 * At this point, the host_lock has been released and retaken
967 * by the issuance of the mailbox command.
968 * Wait for the command passed in by the mid-layer if it
969 * was found by the driver. It might have been returned
970 * between eh recovery steps, hence the check of the "found"
971 * variable.
972 */
973
974 if (found >= 0)
975 result = _qla1280_wait_for_single_command(ha, sp, &wait);
976
977 if (action == ABORT_COMMAND && result != SUCCESS) {
978 printk(KERN_WARNING
979 "scsi(%li:%i:%i:%i): "
980 "Unable to abort command!\n",
981 ha->host_no, bus, target, lun);
982 }
983
984 /*
985 * If the command passed in by the mid-layer has been
986 * returned by the board, then wait for any additional
987 * commands which are supposed to complete based upon
988 * the error action.
989 *
990 * All commands are unconditionally returned during a
991 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
992 * to wait for them.
993 */
994 if (result == SUCCESS && wait_for_bus >= 0) {
995 result = qla1280_wait_for_pending_commands(ha,
996 wait_for_bus, wait_for_target);
997 }
998
999 dprintk(1, "RESET returning %d\n", result);
1000
1001 LEAVE("qla1280_error_action");
1002 return result;
1003}
1004
1005/**************************************************************************
1006 * qla1280_abort
1007 * Abort the specified SCSI command(s).
1008 **************************************************************************/
1009static int
1010qla1280_eh_abort(struct scsi_cmnd * cmd)
1011{
1012 int rc;
1013
1014 spin_lock_irq(cmd->device->host->host_lock);
1015 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1016 spin_unlock_irq(cmd->device->host->host_lock);
1017
1018 return rc;
1019}
1020
1021/**************************************************************************
1022 * qla1280_device_reset
1023 * Reset the specified SCSI device
1024 **************************************************************************/
1025static int
1026qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1027{
1028 int rc;
1029
1030 spin_lock_irq(cmd->device->host->host_lock);
1031 rc = qla1280_error_action(cmd, DEVICE_RESET);
1032 spin_unlock_irq(cmd->device->host->host_lock);
1033
1034 return rc;
1035}
1036
1037/**************************************************************************
1038 * qla1280_bus_reset
1039 * Reset the specified bus.
1040 **************************************************************************/
1041static int
1042qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1043{
1044 int rc;
1045
1046 spin_lock_irq(cmd->device->host->host_lock);
1047 rc = qla1280_error_action(cmd, BUS_RESET);
1048 spin_unlock_irq(cmd->device->host->host_lock);
1049
1050 return rc;
1051}
1052
1053/**************************************************************************
1054 * qla1280_adapter_reset
1055 * Reset the specified adapter (both channels)
1056 **************************************************************************/
1057static int
1058qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1059{
1060 int rc;
1061
1062 spin_lock_irq(cmd->device->host->host_lock);
1063 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1064 spin_unlock_irq(cmd->device->host->host_lock);
1065
1066 return rc;
1067}
1068
1069static int
1070qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1071 sector_t capacity, int geom[])
1072{
1073 int heads, sectors, cylinders;
1074
1075 heads = 64;
1076 sectors = 32;
1077 cylinders = (unsigned long)capacity / (heads * sectors);
1078 if (cylinders > 1024) {
1079 heads = 255;
1080 sectors = 63;
1081 cylinders = (unsigned long)capacity / (heads * sectors);
1082 /* if (cylinders > 1023)
1083 cylinders = 1023; */
1084 }
1085
1086 geom[0] = heads;
1087 geom[1] = sectors;
1088 geom[2] = cylinders;
1089
1090 return 0;
1091}
1092
1093
1094/* disable risc and host interrupts */
1095static inline void
1096qla1280_disable_intrs(struct scsi_qla_host *ha)
1097{
1098 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1099 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1100}
1101
1102/* enable risc and host interrupts */
1103static inline void
1104qla1280_enable_intrs(struct scsi_qla_host *ha)
1105{
1106 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1107 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1108}
1109
1110/**************************************************************************
1111 * qla1280_intr_handler
1112 * Handles the H/W interrupt
1113 **************************************************************************/
1114static irqreturn_t
1115qla1280_intr_handler(int irq, void *dev_id)
1116{
1117 struct scsi_qla_host *ha;
1118 struct device_reg __iomem *reg;
1119 u16 data;
1120 int handled = 0;
1121
1122 ENTER_INTR ("qla1280_intr_handler");
1123 ha = (struct scsi_qla_host *)dev_id;
1124
1125 spin_lock(ha->host->host_lock);
1126
1127 ha->isr_count++;
1128 reg = ha->iobase;
1129
1130 qla1280_disable_intrs(ha);
1131
1132 data = qla1280_debounce_register(®->istatus);
1133 /* Check for pending interrupts. */
1134 if (data & RISC_INT) {
1135 qla1280_isr(ha, &ha->done_q);
1136 handled = 1;
1137 }
1138 if (!list_empty(&ha->done_q))
1139 qla1280_done(ha);
1140
1141 spin_unlock(ha->host->host_lock);
1142
1143 qla1280_enable_intrs(ha);
1144
1145 LEAVE_INTR("qla1280_intr_handler");
1146 return IRQ_RETVAL(handled);
1147}
1148
1149
1150static int
1151qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1152{
1153 uint8_t mr;
1154 uint16_t mb[MAILBOX_REGISTER_COUNT];
1155 struct nvram *nv;
1156 int status, lun;
1157
1158 nv = &ha->nvram;
1159
1160 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1161
1162 /* Set Target Parameters. */
1163 mb[0] = MBC_SET_TARGET_PARAMETERS;
1164 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1165 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1166 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1167 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1168 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1169 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1170 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1171 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1172 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1173
1174 if (IS_ISP1x160(ha)) {
1175 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1176 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1177 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1178 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1179 mr |= BIT_6;
1180 } else {
1181 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1182 }
1183 mb[3] |= nv->bus[bus].target[target].sync_period;
1184
1185 status = qla1280_mailbox_command(ha, mr, mb);
1186
1187 /* Set Device Queue Parameters. */
1188 for (lun = 0; lun < MAX_LUNS; lun++) {
1189 mb[0] = MBC_SET_DEVICE_QUEUE;
1190 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1191 mb[1] |= lun;
1192 mb[2] = nv->bus[bus].max_queue_depth;
1193 mb[3] = nv->bus[bus].target[target].execution_throttle;
1194 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1195 }
1196
1197 if (status)
1198 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1199 "qla1280_set_target_parameters() failed\n",
1200 ha->host_no, bus, target);
1201 return status;
1202}
1203
1204
1205/**************************************************************************
1206 * qla1280_slave_configure
1207 *
1208 * Description:
1209 * Determines the queue depth for a given device. There are two ways
1210 * a queue depth can be obtained for a tagged queueing device. One
1211 * way is the default queue depth which is determined by whether
1212 * If it is defined, then it is used
1213 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1214 * default queue depth (dependent on the number of hardware SCBs).
1215 **************************************************************************/
1216static int
1217qla1280_slave_configure(struct scsi_device *device)
1218{
1219 struct scsi_qla_host *ha;
1220 int default_depth = 3;
1221 int bus = device->channel;
1222 int target = device->id;
1223 int status = 0;
1224 struct nvram *nv;
1225 unsigned long flags;
1226
1227 ha = (struct scsi_qla_host *)device->host->hostdata;
1228 nv = &ha->nvram;
1229
1230 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1231 return 1;
1232
1233 if (device->tagged_supported &&
1234 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1235 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1236 ha->bus_settings[bus].hiwat);
1237 } else {
1238 scsi_adjust_queue_depth(device, 0, default_depth);
1239 }
1240
1241 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1242 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1243 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1244
1245 if (driver_setup.no_sync ||
1246 (driver_setup.sync_mask &&
1247 (~driver_setup.sync_mask & (1 << target))))
1248 nv->bus[bus].target[target].parameter.enable_sync = 0;
1249 if (driver_setup.no_wide ||
1250 (driver_setup.wide_mask &&
1251 (~driver_setup.wide_mask & (1 << target))))
1252 nv->bus[bus].target[target].parameter.enable_wide = 0;
1253 if (IS_ISP1x160(ha)) {
1254 if (driver_setup.no_ppr ||
1255 (driver_setup.ppr_mask &&
1256 (~driver_setup.ppr_mask & (1 << target))))
1257 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1258 }
1259
1260 spin_lock_irqsave(ha->host->host_lock, flags);
1261 if (nv->bus[bus].target[target].parameter.enable_sync)
1262 status = qla1280_set_target_parameters(ha, bus, target);
1263 qla1280_get_target_parameters(ha, device);
1264 spin_unlock_irqrestore(ha->host->host_lock, flags);
1265 return status;
1266}
1267
1268
1269/*
1270 * qla1280_done
1271 * Process completed commands.
1272 *
1273 * Input:
1274 * ha = adapter block pointer.
1275 */
1276static void
1277qla1280_done(struct scsi_qla_host *ha)
1278{
1279 struct srb *sp;
1280 struct list_head *done_q;
1281 int bus, target, lun;
1282 struct scsi_cmnd *cmd;
1283
1284 ENTER("qla1280_done");
1285
1286 done_q = &ha->done_q;
1287
1288 while (!list_empty(done_q)) {
1289 sp = list_entry(done_q->next, struct srb, list);
1290
1291 list_del(&sp->list);
1292
1293 cmd = sp->cmd;
1294 bus = SCSI_BUS_32(cmd);
1295 target = SCSI_TCN_32(cmd);
1296 lun = SCSI_LUN_32(cmd);
1297
1298 switch ((CMD_RESULT(cmd) >> 16)) {
1299 case DID_RESET:
1300 /* Issue marker command. */
1301 if (!ha->flags.abort_isp_active)
1302 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1303 break;
1304 case DID_ABORT:
1305 sp->flags &= ~SRB_ABORT_PENDING;
1306 sp->flags |= SRB_ABORTED;
1307 break;
1308 default:
1309 break;
1310 }
1311
1312 /* Release memory used for this I/O */
1313 scsi_dma_unmap(cmd);
1314
1315 /* Call the mid-level driver interrupt handler */
1316 ha->actthreads--;
1317
1318 if (sp->wait == NULL)
1319 (*(cmd)->scsi_done)(cmd);
1320 else
1321 complete(sp->wait);
1322 }
1323 LEAVE("qla1280_done");
1324}
1325
1326/*
1327 * Translates a ISP error to a Linux SCSI error
1328 */
1329static int
1330qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1331{
1332 int host_status = DID_ERROR;
1333 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1334 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1335 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1336 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1337#if DEBUG_QLA1280_INTR
1338 static char *reason[] = {
1339 "DID_OK",
1340 "DID_NO_CONNECT",
1341 "DID_BUS_BUSY",
1342 "DID_TIME_OUT",
1343 "DID_BAD_TARGET",
1344 "DID_ABORT",
1345 "DID_PARITY",
1346 "DID_ERROR",
1347 "DID_RESET",
1348 "DID_BAD_INTR"
1349 };
1350#endif /* DEBUG_QLA1280_INTR */
1351
1352 ENTER("qla1280_return_status");
1353
1354#if DEBUG_QLA1280_INTR
1355 /*
1356 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1357 comp_status);
1358 */
1359#endif
1360
1361 switch (comp_status) {
1362 case CS_COMPLETE:
1363 host_status = DID_OK;
1364 break;
1365
1366 case CS_INCOMPLETE:
1367 if (!(state_flags & SF_GOT_BUS))
1368 host_status = DID_NO_CONNECT;
1369 else if (!(state_flags & SF_GOT_TARGET))
1370 host_status = DID_BAD_TARGET;
1371 else if (!(state_flags & SF_SENT_CDB))
1372 host_status = DID_ERROR;
1373 else if (!(state_flags & SF_TRANSFERRED_DATA))
1374 host_status = DID_ERROR;
1375 else if (!(state_flags & SF_GOT_STATUS))
1376 host_status = DID_ERROR;
1377 else if (!(state_flags & SF_GOT_SENSE))
1378 host_status = DID_ERROR;
1379 break;
1380
1381 case CS_RESET:
1382 host_status = DID_RESET;
1383 break;
1384
1385 case CS_ABORTED:
1386 host_status = DID_ABORT;
1387 break;
1388
1389 case CS_TIMEOUT:
1390 host_status = DID_TIME_OUT;
1391 break;
1392
1393 case CS_DATA_OVERRUN:
1394 dprintk(2, "Data overrun 0x%x\n", residual_length);
1395 dprintk(2, "qla1280_return_status: response packet data\n");
1396 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1397 host_status = DID_ERROR;
1398 break;
1399
1400 case CS_DATA_UNDERRUN:
1401 if ((scsi_bufflen(cp) - residual_length) <
1402 cp->underflow) {
1403 printk(KERN_WARNING
1404 "scsi: Underflow detected - retrying "
1405 "command.\n");
1406 host_status = DID_ERROR;
1407 } else {
1408 scsi_set_resid(cp, residual_length);
1409 host_status = DID_OK;
1410 }
1411 break;
1412
1413 default:
1414 host_status = DID_ERROR;
1415 break;
1416 }
1417
1418#if DEBUG_QLA1280_INTR
1419 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1420 reason[host_status], scsi_status);
1421#endif
1422
1423 LEAVE("qla1280_return_status");
1424
1425 return (scsi_status & 0xff) | (host_status << 16);
1426}
1427
1428/****************************************************************************/
1429/* QLogic ISP1280 Hardware Support Functions. */
1430/****************************************************************************/
1431
1432/*
1433 * qla1280_initialize_adapter
1434 * Initialize board.
1435 *
1436 * Input:
1437 * ha = adapter block pointer.
1438 *
1439 * Returns:
1440 * 0 = success
1441 */
1442static int __devinit
1443qla1280_initialize_adapter(struct scsi_qla_host *ha)
1444{
1445 struct device_reg __iomem *reg;
1446 int status;
1447 int bus;
1448 unsigned long flags;
1449
1450 ENTER("qla1280_initialize_adapter");
1451
1452 /* Clear adapter flags. */
1453 ha->flags.online = 0;
1454 ha->flags.disable_host_adapter = 0;
1455 ha->flags.reset_active = 0;
1456 ha->flags.abort_isp_active = 0;
1457
1458#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1459 if (ia64_platform_is("sn2")) {
1460 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1461 "dual channel lockup workaround\n", ha->host_no);
1462 ha->flags.use_pci_vchannel = 1;
1463 driver_setup.no_nvram = 1;
1464 }
1465#endif
1466
1467 /* TODO: implement support for the 1040 nvram format */
1468 if (IS_ISP1040(ha))
1469 driver_setup.no_nvram = 1;
1470
1471 dprintk(1, "Configure PCI space for adapter...\n");
1472
1473 reg = ha->iobase;
1474
1475 /* Insure mailbox registers are free. */
1476 WRT_REG_WORD(®->semaphore, 0);
1477 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1478 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1479 RD_REG_WORD(®->host_cmd);
1480
1481 if (qla1280_read_nvram(ha)) {
1482 dprintk(2, "qla1280_initialize_adapter: failed to read "
1483 "NVRAM\n");
1484 }
1485
1486 /*
1487 * It's necessary to grab the spin here as qla1280_mailbox_command
1488 * needs to be able to drop the lock unconditionally to wait
1489 * for completion.
1490 */
1491 spin_lock_irqsave(ha->host->host_lock, flags);
1492
1493 status = qla1280_load_firmware(ha);
1494 if (status) {
1495 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1496 ha->host_no);
1497 goto out;
1498 }
1499
1500 /* Setup adapter based on NVRAM parameters. */
1501 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1502 qla1280_nvram_config(ha);
1503
1504 if (ha->flags.disable_host_adapter) {
1505 status = 1;
1506 goto out;
1507 }
1508
1509 status = qla1280_init_rings(ha);
1510 if (status)
1511 goto out;
1512
1513 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1514 for (bus = 0; bus < ha->ports; bus++) {
1515 if (!ha->bus_settings[bus].disable_scsi_reset &&
1516 qla1280_bus_reset(ha, bus) &&
1517 qla1280_bus_reset(ha, bus))
1518 ha->bus_settings[bus].scsi_bus_dead = 1;
1519 }
1520
1521 ha->flags.online = 1;
1522 out:
1523 spin_unlock_irqrestore(ha->host->host_lock, flags);
1524
1525 if (status)
1526 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1527
1528 LEAVE("qla1280_initialize_adapter");
1529 return status;
1530}
1531
1532/*
1533 * qla1280_request_firmware
1534 * Acquire firmware for chip. Retain in memory
1535 * for error recovery.
1536 *
1537 * Input:
1538 * ha = adapter block pointer.
1539 *
1540 * Returns:
1541 * Pointer to firmware image or an error code
1542 * cast to pointer via ERR_PTR().
1543 */
1544static const struct firmware *
1545qla1280_request_firmware(struct scsi_qla_host *ha)
1546{
1547 const struct firmware *fw;
1548 int err;
1549 int index;
1550 char *fwname;
1551
1552 spin_unlock_irq(ha->host->host_lock);
1553 mutex_lock(&qla1280_firmware_mutex);
1554
1555 index = ql1280_board_tbl[ha->devnum].fw_index;
1556 fw = qla1280_fw_tbl[index].fw;
1557 if (fw)
1558 goto out;
1559
1560 fwname = qla1280_fw_tbl[index].fwname;
1561 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1562
1563 if (err) {
1564 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1565 fwname, err);
1566 fw = ERR_PTR(err);
1567 goto unlock;
1568 }
1569 if ((fw->size % 2) || (fw->size < 6)) {
1570 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1571 fw->size, fwname);
1572 release_firmware(fw);
1573 fw = ERR_PTR(-EINVAL);
1574 goto unlock;
1575 }
1576
1577 qla1280_fw_tbl[index].fw = fw;
1578
1579 out:
1580 ha->fwver1 = fw->data[0];
1581 ha->fwver2 = fw->data[1];
1582 ha->fwver3 = fw->data[2];
1583 unlock:
1584 mutex_unlock(&qla1280_firmware_mutex);
1585 spin_lock_irq(ha->host->host_lock);
1586 return fw;
1587}
1588
1589/*
1590 * Chip diagnostics
1591 * Test chip for proper operation.
1592 *
1593 * Input:
1594 * ha = adapter block pointer.
1595 *
1596 * Returns:
1597 * 0 = success.
1598 */
1599static int
1600qla1280_chip_diag(struct scsi_qla_host *ha)
1601{
1602 uint16_t mb[MAILBOX_REGISTER_COUNT];
1603 struct device_reg __iomem *reg = ha->iobase;
1604 int status = 0;
1605 int cnt;
1606 uint16_t data;
1607 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1608
1609 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1610
1611 /* Soft reset chip and wait for it to finish. */
1612 WRT_REG_WORD(®->ictrl, ISP_RESET);
1613
1614 /*
1615 * We can't do a traditional PCI write flush here by reading
1616 * back the register. The card will not respond once the reset
1617 * is in action and we end up with a machine check exception
1618 * instead. Nothing to do but wait and hope for the best.
1619 * A portable pci_write_flush(pdev) call would be very useful here.
1620 */
1621 udelay(20);
1622 data = qla1280_debounce_register(®->ictrl);
1623 /*
1624 * Yet another QLogic gem ;-(
1625 */
1626 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1627 udelay(5);
1628 data = RD_REG_WORD(®->ictrl);
1629 }
1630
1631 if (!cnt)
1632 goto fail;
1633
1634 /* Reset register cleared by chip reset. */
1635 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1636
1637 WRT_REG_WORD(®->cfg_1, 0);
1638
1639 /* Reset RISC and disable BIOS which
1640 allows RISC to execute out of RAM. */
1641 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1642 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1643
1644 RD_REG_WORD(®->id_l); /* Flush PCI write */
1645 data = qla1280_debounce_register(®->mailbox0);
1646
1647 /*
1648 * I *LOVE* this code!
1649 */
1650 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1651 udelay(5);
1652 data = RD_REG_WORD(®->mailbox0);
1653 }
1654
1655 if (!cnt)
1656 goto fail;
1657
1658 /* Check product ID of chip */
1659 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1660
1661 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1662 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1663 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1664 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1665 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1666 printk(KERN_INFO "qla1280: Wrong product ID = "
1667 "0x%x,0x%x,0x%x,0x%x\n",
1668 RD_REG_WORD(®->mailbox1),
1669 RD_REG_WORD(®->mailbox2),
1670 RD_REG_WORD(®->mailbox3),
1671 RD_REG_WORD(®->mailbox4));
1672 goto fail;
1673 }
1674
1675 /*
1676 * Enable ints early!!!
1677 */
1678 qla1280_enable_intrs(ha);
1679
1680 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1681 /* Wrap Incoming Mailboxes Test. */
1682 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1683 mb[1] = 0xAAAA;
1684 mb[2] = 0x5555;
1685 mb[3] = 0xAA55;
1686 mb[4] = 0x55AA;
1687 mb[5] = 0xA5A5;
1688 mb[6] = 0x5A5A;
1689 mb[7] = 0x2525;
1690
1691 status = qla1280_mailbox_command(ha, 0xff, mb);
1692 if (status)
1693 goto fail;
1694
1695 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1696 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1697 mb[7] != 0x2525) {
1698 printk(KERN_INFO "qla1280: Failed mbox check\n");
1699 goto fail;
1700 }
1701
1702 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1703 return 0;
1704 fail:
1705 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1706 return status;
1707}
1708
1709static int
1710qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1711{
1712 /* enter with host_lock acquired */
1713
1714 const struct firmware *fw;
1715 const __le16 *fw_data;
1716 uint16_t risc_address, risc_code_size;
1717 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1718 int err = 0;
1719
1720 fw = qla1280_request_firmware(ha);
1721 if (IS_ERR(fw))
1722 return PTR_ERR(fw);
1723
1724 fw_data = (const __le16 *)&fw->data[0];
1725 ha->fwstart = __le16_to_cpu(fw_data[2]);
1726
1727 /* Load RISC code. */
1728 risc_address = ha->fwstart;
1729 fw_data = (const __le16 *)&fw->data[6];
1730 risc_code_size = (fw->size - 6) / 2;
1731
1732 for (i = 0; i < risc_code_size; i++) {
1733 mb[0] = MBC_WRITE_RAM_WORD;
1734 mb[1] = risc_address + i;
1735 mb[2] = __le16_to_cpu(fw_data[i]);
1736
1737 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1738 if (err) {
1739 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1740 ha->host_no);
1741 break;
1742 }
1743 }
1744
1745 return err;
1746}
1747
1748#define DUMP_IT_BACK 0 /* for debug of RISC loading */
1749static int
1750qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1751{
1752 /* enter with host_lock acquired */
1753 const struct firmware *fw;
1754 const __le16 *fw_data;
1755 uint16_t risc_address, risc_code_size;
1756 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1757 int err = 0, num, i;
1758#if DUMP_IT_BACK
1759 uint8_t *sp, *tbuf;
1760 dma_addr_t p_tbuf;
1761
1762 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1763 if (!tbuf)
1764 return -ENOMEM;
1765#endif
1766
1767 fw = qla1280_request_firmware(ha);
1768 if (IS_ERR(fw))
1769 return PTR_ERR(fw);
1770
1771 fw_data = (const __le16 *)&fw->data[0];
1772 ha->fwstart = __le16_to_cpu(fw_data[2]);
1773
1774 /* Load RISC code. */
1775 risc_address = ha->fwstart;
1776 fw_data = (const __le16 *)&fw->data[6];
1777 risc_code_size = (fw->size - 6) / 2;
1778
1779 dprintk(1, "%s: DMA RISC code (%i) words\n",
1780 __func__, risc_code_size);
1781
1782 num = 0;
1783 while (risc_code_size > 0) {
1784 int warn __attribute__((unused)) = 0;
1785
1786 cnt = 2000 >> 1;
1787
1788 if (cnt > risc_code_size)
1789 cnt = risc_code_size;
1790
1791 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1792 "%d,%d(0x%x)\n",
1793 fw_data, cnt, num, risc_address);
1794 for(i = 0; i < cnt; i++)
1795 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1796
1797 mb[0] = MBC_LOAD_RAM;
1798 mb[1] = risc_address;
1799 mb[4] = cnt;
1800 mb[3] = ha->request_dma & 0xffff;
1801 mb[2] = (ha->request_dma >> 16) & 0xffff;
1802 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1803 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1804 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1805 __func__, mb[0],
1806 (void *)(long)ha->request_dma,
1807 mb[6], mb[7], mb[2], mb[3]);
1808 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1809 BIT_1 | BIT_0, mb);
1810 if (err) {
1811 printk(KERN_ERR "scsi(%li): Failed to load partial "
1812 "segment of f\n", ha->host_no);
1813 goto out;
1814 }
1815
1816#if DUMP_IT_BACK
1817 mb[0] = MBC_DUMP_RAM;
1818 mb[1] = risc_address;
1819 mb[4] = cnt;
1820 mb[3] = p_tbuf & 0xffff;
1821 mb[2] = (p_tbuf >> 16) & 0xffff;
1822 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1823 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1824
1825 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1826 BIT_1 | BIT_0, mb);
1827 if (err) {
1828 printk(KERN_ERR
1829 "Failed to dump partial segment of f/w\n");
1830 goto out;
1831 }
1832 sp = (uint8_t *)ha->request_ring;
1833 for (i = 0; i < (cnt << 1); i++) {
1834 if (tbuf[i] != sp[i] && warn++ < 10) {
1835 printk(KERN_ERR "%s: FW compare error @ "
1836 "byte(0x%x) loop#=%x\n",
1837 __func__, i, num);
1838 printk(KERN_ERR "%s: FWbyte=%x "
1839 "FWfromChip=%x\n",
1840 __func__, sp[i], tbuf[i]);
1841 /*break; */
1842 }
1843 }
1844#endif
1845 risc_address += cnt;
1846 risc_code_size = risc_code_size - cnt;
1847 fw_data = fw_data + cnt;
1848 num++;
1849 }
1850
1851 out:
1852#if DUMP_IT_BACK
1853 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1854#endif
1855 return err;
1856}
1857
1858static int
1859qla1280_start_firmware(struct scsi_qla_host *ha)
1860{
1861 uint16_t mb[MAILBOX_REGISTER_COUNT];
1862 int err;
1863
1864 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1865 __func__);
1866
1867 /* Verify checksum of loaded RISC code. */
1868 mb[0] = MBC_VERIFY_CHECKSUM;
1869 /* mb[1] = ql12_risc_code_addr01; */
1870 mb[1] = ha->fwstart;
1871 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1872 if (err) {
1873 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1874 return err;
1875 }
1876
1877 /* Start firmware execution. */
1878 dprintk(1, "%s: start firmware running.\n", __func__);
1879 mb[0] = MBC_EXECUTE_FIRMWARE;
1880 mb[1] = ha->fwstart;
1881 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1882 if (err) {
1883 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1884 ha->host_no);
1885 }
1886
1887 return err;
1888}
1889
1890static int
1891qla1280_load_firmware(struct scsi_qla_host *ha)
1892{
1893 /* enter with host_lock taken */
1894 int err;
1895
1896 err = qla1280_chip_diag(ha);
1897 if (err)
1898 goto out;
1899 if (IS_ISP1040(ha))
1900 err = qla1280_load_firmware_pio(ha);
1901 else
1902 err = qla1280_load_firmware_dma(ha);
1903 if (err)
1904 goto out;
1905 err = qla1280_start_firmware(ha);
1906 out:
1907 return err;
1908}
1909
1910/*
1911 * Initialize rings
1912 *
1913 * Input:
1914 * ha = adapter block pointer.
1915 * ha->request_ring = request ring virtual address
1916 * ha->response_ring = response ring virtual address
1917 * ha->request_dma = request ring physical address
1918 * ha->response_dma = response ring physical address
1919 *
1920 * Returns:
1921 * 0 = success.
1922 */
1923static int
1924qla1280_init_rings(struct scsi_qla_host *ha)
1925{
1926 uint16_t mb[MAILBOX_REGISTER_COUNT];
1927 int status = 0;
1928
1929 ENTER("qla1280_init_rings");
1930
1931 /* Clear outstanding commands array. */
1932 memset(ha->outstanding_cmds, 0,
1933 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1934
1935 /* Initialize request queue. */
1936 ha->request_ring_ptr = ha->request_ring;
1937 ha->req_ring_index = 0;
1938 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1939 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1940 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1941 mb[1] = REQUEST_ENTRY_CNT;
1942 mb[3] = ha->request_dma & 0xffff;
1943 mb[2] = (ha->request_dma >> 16) & 0xffff;
1944 mb[4] = 0;
1945 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1946 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1947 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1948 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1949 &mb[0]))) {
1950 /* Initialize response queue. */
1951 ha->response_ring_ptr = ha->response_ring;
1952 ha->rsp_ring_index = 0;
1953 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1954 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1955 mb[1] = RESPONSE_ENTRY_CNT;
1956 mb[3] = ha->response_dma & 0xffff;
1957 mb[2] = (ha->response_dma >> 16) & 0xffff;
1958 mb[5] = 0;
1959 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1960 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1961 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1962 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1963 &mb[0]);
1964 }
1965
1966 if (status)
1967 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1968
1969 LEAVE("qla1280_init_rings");
1970 return status;
1971}
1972
1973static void
1974qla1280_print_settings(struct nvram *nv)
1975{
1976 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1977 nv->bus[0].config_1.initiator_id);
1978 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1979 nv->bus[1].config_1.initiator_id);
1980
1981 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1982 nv->bus[0].bus_reset_delay);
1983 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1984 nv->bus[1].bus_reset_delay);
1985
1986 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1987 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1988 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1989 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1990
1991 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1992 nv->bus[0].config_2.async_data_setup_time);
1993 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1994 nv->bus[1].config_2.async_data_setup_time);
1995
1996 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1997 nv->bus[0].config_2.req_ack_active_negation);
1998 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1999 nv->bus[1].config_2.req_ack_active_negation);
2000
2001 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
2002 nv->bus[0].config_2.data_line_active_negation);
2003 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
2004 nv->bus[1].config_2.data_line_active_negation);
2005
2006 dprintk(1, "qla1280 : disable loading risc code=%d\n",
2007 nv->cntr_flags_1.disable_loading_risc_code);
2008
2009 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2010 nv->cntr_flags_1.enable_64bit_addressing);
2011
2012 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2013 nv->bus[0].selection_timeout);
2014 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2015 nv->bus[1].selection_timeout);
2016
2017 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2018 nv->bus[0].max_queue_depth);
2019 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2020 nv->bus[1].max_queue_depth);
2021}
2022
2023static void
2024qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2025{
2026 struct nvram *nv = &ha->nvram;
2027
2028 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2029 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2030 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2031 nv->bus[bus].target[target].parameter.enable_sync = 1;
2032#if 1 /* Some SCSI Processors do not seem to like this */
2033 nv->bus[bus].target[target].parameter.enable_wide = 1;
2034#endif
2035 nv->bus[bus].target[target].execution_throttle =
2036 nv->bus[bus].max_queue_depth - 1;
2037 nv->bus[bus].target[target].parameter.parity_checking = 1;
2038 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2039
2040 if (IS_ISP1x160(ha)) {
2041 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2042 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2043 nv->bus[bus].target[target].sync_period = 9;
2044 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2045 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2046 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2047 } else {
2048 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2049 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2050 nv->bus[bus].target[target].sync_period = 10;
2051 }
2052}
2053
2054static void
2055qla1280_set_defaults(struct scsi_qla_host *ha)
2056{
2057 struct nvram *nv = &ha->nvram;
2058 int bus, target;
2059
2060 dprintk(1, "Using defaults for NVRAM: \n");
2061 memset(nv, 0, sizeof(struct nvram));
2062
2063 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2064 nv->firmware_feature.f.enable_fast_posting = 1;
2065 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2066 nv->termination.scsi_bus_0_control = 3;
2067 nv->termination.scsi_bus_1_control = 3;
2068 nv->termination.auto_term_support = 1;
2069
2070 /*
2071 * Set default FIFO magic - What appropriate values would be here
2072 * is unknown. This is what I have found testing with 12160s.
2073 *
2074 * Now, I would love the magic decoder ring for this one, the
2075 * header file provided by QLogic seems to be bogus or incomplete
2076 * at best.
2077 */
2078 nv->isp_config.burst_enable = 1;
2079 if (IS_ISP1040(ha))
2080 nv->isp_config.fifo_threshold |= 3;
2081 else
2082 nv->isp_config.fifo_threshold |= 4;
2083
2084 if (IS_ISP1x160(ha))
2085 nv->isp_parameter = 0x01; /* fast memory enable */
2086
2087 for (bus = 0; bus < MAX_BUSES; bus++) {
2088 nv->bus[bus].config_1.initiator_id = 7;
2089 nv->bus[bus].config_2.req_ack_active_negation = 1;
2090 nv->bus[bus].config_2.data_line_active_negation = 1;
2091 nv->bus[bus].selection_timeout = 250;
2092 nv->bus[bus].max_queue_depth = 32;
2093
2094 if (IS_ISP1040(ha)) {
2095 nv->bus[bus].bus_reset_delay = 3;
2096 nv->bus[bus].config_2.async_data_setup_time = 6;
2097 nv->bus[bus].retry_delay = 1;
2098 } else {
2099 nv->bus[bus].bus_reset_delay = 5;
2100 nv->bus[bus].config_2.async_data_setup_time = 8;
2101 }
2102
2103 for (target = 0; target < MAX_TARGETS; target++)
2104 qla1280_set_target_defaults(ha, bus, target);
2105 }
2106}
2107
2108static int
2109qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2110{
2111 struct nvram *nv = &ha->nvram;
2112 uint16_t mb[MAILBOX_REGISTER_COUNT];
2113 int status, lun;
2114 uint16_t flag;
2115
2116 /* Set Target Parameters. */
2117 mb[0] = MBC_SET_TARGET_PARAMETERS;
2118 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2119
2120 /*
2121 * Do not enable sync and ppr for the initial INQUIRY run. We
2122 * enable this later if we determine the target actually
2123 * supports it.
2124 */
2125 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2126 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2127
2128 if (IS_ISP1x160(ha))
2129 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2130 else
2131 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2132 mb[3] |= nv->bus[bus].target[target].sync_period;
2133 status = qla1280_mailbox_command(ha, 0x0f, mb);
2134
2135 /* Save Tag queuing enable flag. */
2136 flag = (BIT_0 << target);
2137 if (nv->bus[bus].target[target].parameter.tag_queuing)
2138 ha->bus_settings[bus].qtag_enables |= flag;
2139
2140 /* Save Device enable flag. */
2141 if (IS_ISP1x160(ha)) {
2142 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2143 ha->bus_settings[bus].device_enables |= flag;
2144 ha->bus_settings[bus].lun_disables |= 0;
2145 } else {
2146 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2147 ha->bus_settings[bus].device_enables |= flag;
2148 /* Save LUN disable flag. */
2149 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2150 ha->bus_settings[bus].lun_disables |= flag;
2151 }
2152
2153 /* Set Device Queue Parameters. */
2154 for (lun = 0; lun < MAX_LUNS; lun++) {
2155 mb[0] = MBC_SET_DEVICE_QUEUE;
2156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2157 mb[1] |= lun;
2158 mb[2] = nv->bus[bus].max_queue_depth;
2159 mb[3] = nv->bus[bus].target[target].execution_throttle;
2160 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2161 }
2162
2163 return status;
2164}
2165
2166static int
2167qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2168{
2169 struct nvram *nv = &ha->nvram;
2170 uint16_t mb[MAILBOX_REGISTER_COUNT];
2171 int target, status;
2172
2173 /* SCSI Reset Disable. */
2174 ha->bus_settings[bus].disable_scsi_reset =
2175 nv->bus[bus].config_1.scsi_reset_disable;
2176
2177 /* Initiator ID. */
2178 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2179 mb[0] = MBC_SET_INITIATOR_ID;
2180 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2181 ha->bus_settings[bus].id;
2182 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2183
2184 /* Reset Delay. */
2185 ha->bus_settings[bus].bus_reset_delay =
2186 nv->bus[bus].bus_reset_delay;
2187
2188 /* Command queue depth per device. */
2189 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2190
2191 /* Set target parameters. */
2192 for (target = 0; target < MAX_TARGETS; target++)
2193 status |= qla1280_config_target(ha, bus, target);
2194
2195 return status;
2196}
2197
2198static int
2199qla1280_nvram_config(struct scsi_qla_host *ha)
2200{
2201 struct device_reg __iomem *reg = ha->iobase;
2202 struct nvram *nv = &ha->nvram;
2203 int bus, target, status = 0;
2204 uint16_t mb[MAILBOX_REGISTER_COUNT];
2205
2206 ENTER("qla1280_nvram_config");
2207
2208 if (ha->nvram_valid) {
2209 /* Always force AUTO sense for LINUX SCSI */
2210 for (bus = 0; bus < MAX_BUSES; bus++)
2211 for (target = 0; target < MAX_TARGETS; target++) {
2212 nv->bus[bus].target[target].parameter.
2213 auto_request_sense = 1;
2214 }
2215 } else {
2216 qla1280_set_defaults(ha);
2217 }
2218
2219 qla1280_print_settings(nv);
2220
2221 /* Disable RISC load of firmware. */
2222 ha->flags.disable_risc_code_load =
2223 nv->cntr_flags_1.disable_loading_risc_code;
2224
2225 if (IS_ISP1040(ha)) {
2226 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2227
2228 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2229
2230 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2231 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2232 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2233
2234 /* Busted fifo, says mjacob. */
2235 if (hwrev != ISP_CFG0_1040A)
2236 cfg1 |= nv->isp_config.fifo_threshold << 4;
2237
2238 cfg1 |= nv->isp_config.burst_enable << 2;
2239 WRT_REG_WORD(®->cfg_1, cfg1);
2240
2241 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2242 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2243 } else {
2244 uint16_t cfg1, term;
2245
2246 /* Set ISP hardware DMA burst */
2247 cfg1 = nv->isp_config.fifo_threshold << 4;
2248 cfg1 |= nv->isp_config.burst_enable << 2;
2249 /* Enable DMA arbitration on dual channel controllers */
2250 if (ha->ports > 1)
2251 cfg1 |= BIT_13;
2252 WRT_REG_WORD(®->cfg_1, cfg1);
2253
2254 /* Set SCSI termination. */
2255 WRT_REG_WORD(®->gpio_enable,
2256 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2257 term = nv->termination.scsi_bus_1_control;
2258 term |= nv->termination.scsi_bus_0_control << 2;
2259 term |= nv->termination.auto_term_support << 7;
2260 RD_REG_WORD(®->id_l); /* Flush PCI write */
2261 WRT_REG_WORD(®->gpio_data, term);
2262 }
2263 RD_REG_WORD(®->id_l); /* Flush PCI write */
2264
2265 /* ISP parameter word. */
2266 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2267 mb[1] = nv->isp_parameter;
2268 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2269
2270 if (IS_ISP1x40(ha)) {
2271 /* clock rate - for qla1240 and older, only */
2272 mb[0] = MBC_SET_CLOCK_RATE;
2273 mb[1] = 40;
2274 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2275 }
2276
2277 /* Firmware feature word. */
2278 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2279 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2280 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2281 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2282#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2283 if (ia64_platform_is("sn2")) {
2284 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2285 "workaround\n", ha->host_no);
2286 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2287 }
2288#endif
2289 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2290
2291 /* Retry count and delay. */
2292 mb[0] = MBC_SET_RETRY_COUNT;
2293 mb[1] = nv->bus[0].retry_count;
2294 mb[2] = nv->bus[0].retry_delay;
2295 mb[6] = nv->bus[1].retry_count;
2296 mb[7] = nv->bus[1].retry_delay;
2297 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2298 BIT_1 | BIT_0, &mb[0]);
2299
2300 /* ASYNC data setup time. */
2301 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2302 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2303 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2304 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2305
2306 /* Active negation states. */
2307 mb[0] = MBC_SET_ACTIVE_NEGATION;
2308 mb[1] = 0;
2309 if (nv->bus[0].config_2.req_ack_active_negation)
2310 mb[1] |= BIT_5;
2311 if (nv->bus[0].config_2.data_line_active_negation)
2312 mb[1] |= BIT_4;
2313 mb[2] = 0;
2314 if (nv->bus[1].config_2.req_ack_active_negation)
2315 mb[2] |= BIT_5;
2316 if (nv->bus[1].config_2.data_line_active_negation)
2317 mb[2] |= BIT_4;
2318 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2319
2320 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2321 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2322 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2323
2324 /* thingy */
2325 mb[0] = MBC_SET_PCI_CONTROL;
2326 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2327 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2328 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2329
2330 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2331 mb[1] = 8;
2332 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2333
2334 /* Selection timeout. */
2335 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2336 mb[1] = nv->bus[0].selection_timeout;
2337 mb[2] = nv->bus[1].selection_timeout;
2338 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2339
2340 for (bus = 0; bus < ha->ports; bus++)
2341 status |= qla1280_config_bus(ha, bus);
2342
2343 if (status)
2344 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2345
2346 LEAVE("qla1280_nvram_config");
2347 return status;
2348}
2349
2350/*
2351 * Get NVRAM data word
2352 * Calculates word position in NVRAM and calls request routine to
2353 * get the word from NVRAM.
2354 *
2355 * Input:
2356 * ha = adapter block pointer.
2357 * address = NVRAM word address.
2358 *
2359 * Returns:
2360 * data word.
2361 */
2362static uint16_t
2363qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2364{
2365 uint32_t nv_cmd;
2366 uint16_t data;
2367
2368 nv_cmd = address << 16;
2369 nv_cmd |= NV_READ_OP;
2370
2371 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2372
2373 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2374 "0x%x", data);
2375
2376 return data;
2377}
2378
2379/*
2380 * NVRAM request
2381 * Sends read command to NVRAM and gets data from NVRAM.
2382 *
2383 * Input:
2384 * ha = adapter block pointer.
2385 * nv_cmd = Bit 26 = start bit
2386 * Bit 25, 24 = opcode
2387 * Bit 23-16 = address
2388 * Bit 15-0 = write data
2389 *
2390 * Returns:
2391 * data word.
2392 */
2393static uint16_t
2394qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2395{
2396 struct device_reg __iomem *reg = ha->iobase;
2397 int cnt;
2398 uint16_t data = 0;
2399 uint16_t reg_data;
2400
2401 /* Send command to NVRAM. */
2402
2403 nv_cmd <<= 5;
2404 for (cnt = 0; cnt < 11; cnt++) {
2405 if (nv_cmd & BIT_31)
2406 qla1280_nv_write(ha, NV_DATA_OUT);
2407 else
2408 qla1280_nv_write(ha, 0);
2409 nv_cmd <<= 1;
2410 }
2411
2412 /* Read data from NVRAM. */
2413
2414 for (cnt = 0; cnt < 16; cnt++) {
2415 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2416 RD_REG_WORD(®->id_l); /* Flush PCI write */
2417 NVRAM_DELAY();
2418 data <<= 1;
2419 reg_data = RD_REG_WORD(®->nvram);
2420 if (reg_data & NV_DATA_IN)
2421 data |= BIT_0;
2422 WRT_REG_WORD(®->nvram, NV_SELECT);
2423 RD_REG_WORD(®->id_l); /* Flush PCI write */
2424 NVRAM_DELAY();
2425 }
2426
2427 /* Deselect chip. */
2428
2429 WRT_REG_WORD(®->nvram, NV_DESELECT);
2430 RD_REG_WORD(®->id_l); /* Flush PCI write */
2431 NVRAM_DELAY();
2432
2433 return data;
2434}
2435
2436static void
2437qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2438{
2439 struct device_reg __iomem *reg = ha->iobase;
2440
2441 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2442 RD_REG_WORD(®->id_l); /* Flush PCI write */
2443 NVRAM_DELAY();
2444 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2445 RD_REG_WORD(®->id_l); /* Flush PCI write */
2446 NVRAM_DELAY();
2447 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2448 RD_REG_WORD(®->id_l); /* Flush PCI write */
2449 NVRAM_DELAY();
2450}
2451
2452/*
2453 * Mailbox Command
2454 * Issue mailbox command and waits for completion.
2455 *
2456 * Input:
2457 * ha = adapter block pointer.
2458 * mr = mailbox registers to load.
2459 * mb = data pointer for mailbox registers.
2460 *
2461 * Output:
2462 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2463 *
2464 * Returns:
2465 * 0 = success
2466 */
2467static int
2468qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2469{
2470 struct device_reg __iomem *reg = ha->iobase;
2471 int status = 0;
2472 int cnt;
2473 uint16_t *optr, *iptr;
2474 uint16_t __iomem *mptr;
2475 uint16_t data;
2476 DECLARE_COMPLETION_ONSTACK(wait);
2477 struct timer_list timer;
2478
2479 ENTER("qla1280_mailbox_command");
2480
2481 if (ha->mailbox_wait) {
2482 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2483 }
2484 ha->mailbox_wait = &wait;
2485
2486 /*
2487 * We really should start out by verifying that the mailbox is
2488 * available before starting sending the command data
2489 */
2490 /* Load mailbox registers. */
2491 mptr = (uint16_t __iomem *) ®->mailbox0;
2492 iptr = mb;
2493 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2494 if (mr & BIT_0) {
2495 WRT_REG_WORD(mptr, (*iptr));
2496 }
2497
2498 mr >>= 1;
2499 mptr++;
2500 iptr++;
2501 }
2502
2503 /* Issue set host interrupt command. */
2504
2505 /* set up a timer just in case we're really jammed */
2506 init_timer(&timer);
2507 timer.expires = jiffies + 20*HZ;
2508 timer.data = (unsigned long)ha;
2509 timer.function = qla1280_mailbox_timeout;
2510 add_timer(&timer);
2511
2512 spin_unlock_irq(ha->host->host_lock);
2513 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2514 data = qla1280_debounce_register(®->istatus);
2515
2516 wait_for_completion(&wait);
2517 del_timer_sync(&timer);
2518
2519 spin_lock_irq(ha->host->host_lock);
2520
2521 ha->mailbox_wait = NULL;
2522
2523 /* Check for mailbox command timeout. */
2524 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2525 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2526 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2527 "0x%04x\n",
2528 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2529 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2530 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2531 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2532 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2533 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2534 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2535 status = 1;
2536 }
2537
2538 /* Load return mailbox registers. */
2539 optr = mb;
2540 iptr = (uint16_t *) &ha->mailbox_out[0];
2541 mr = MAILBOX_REGISTER_COUNT;
2542 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2543
2544 if (ha->flags.reset_marker)
2545 qla1280_rst_aen(ha);
2546
2547 if (status)
2548 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2549 "0x%x ****\n", mb[0]);
2550
2551 LEAVE("qla1280_mailbox_command");
2552 return status;
2553}
2554
2555/*
2556 * qla1280_poll
2557 * Polls ISP for interrupts.
2558 *
2559 * Input:
2560 * ha = adapter block pointer.
2561 */
2562static void
2563qla1280_poll(struct scsi_qla_host *ha)
2564{
2565 struct device_reg __iomem *reg = ha->iobase;
2566 uint16_t data;
2567 LIST_HEAD(done_q);
2568
2569 /* ENTER("qla1280_poll"); */
2570
2571 /* Check for pending interrupts. */
2572 data = RD_REG_WORD(®->istatus);
2573 if (data & RISC_INT)
2574 qla1280_isr(ha, &done_q);
2575
2576 if (!ha->mailbox_wait) {
2577 if (ha->flags.reset_marker)
2578 qla1280_rst_aen(ha);
2579 }
2580
2581 if (!list_empty(&done_q))
2582 qla1280_done(ha);
2583
2584 /* LEAVE("qla1280_poll"); */
2585}
2586
2587/*
2588 * qla1280_bus_reset
2589 * Issue SCSI bus reset.
2590 *
2591 * Input:
2592 * ha = adapter block pointer.
2593 * bus = SCSI bus number.
2594 *
2595 * Returns:
2596 * 0 = success
2597 */
2598static int
2599qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2600{
2601 uint16_t mb[MAILBOX_REGISTER_COUNT];
2602 uint16_t reset_delay;
2603 int status;
2604
2605 dprintk(3, "qla1280_bus_reset: entered\n");
2606
2607 if (qla1280_verbose)
2608 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2609 ha->host_no, bus);
2610
2611 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2612 mb[0] = MBC_BUS_RESET;
2613 mb[1] = reset_delay;
2614 mb[2] = (uint16_t) bus;
2615 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2616
2617 if (status) {
2618 if (ha->bus_settings[bus].failed_reset_count > 2)
2619 ha->bus_settings[bus].scsi_bus_dead = 1;
2620 ha->bus_settings[bus].failed_reset_count++;
2621 } else {
2622 spin_unlock_irq(ha->host->host_lock);
2623 ssleep(reset_delay);
2624 spin_lock_irq(ha->host->host_lock);
2625
2626 ha->bus_settings[bus].scsi_bus_dead = 0;
2627 ha->bus_settings[bus].failed_reset_count = 0;
2628 ha->bus_settings[bus].reset_marker = 0;
2629 /* Issue marker command. */
2630 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2631 }
2632
2633 /*
2634 * We should probably call qla1280_set_target_parameters()
2635 * here as well for all devices on the bus.
2636 */
2637
2638 if (status)
2639 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2640 else
2641 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2642
2643 return status;
2644}
2645
2646/*
2647 * qla1280_device_reset
2648 * Issue bus device reset message to the target.
2649 *
2650 * Input:
2651 * ha = adapter block pointer.
2652 * bus = SCSI BUS number.
2653 * target = SCSI ID.
2654 *
2655 * Returns:
2656 * 0 = success
2657 */
2658static int
2659qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2660{
2661 uint16_t mb[MAILBOX_REGISTER_COUNT];
2662 int status;
2663
2664 ENTER("qla1280_device_reset");
2665
2666 mb[0] = MBC_ABORT_TARGET;
2667 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2668 mb[2] = 1;
2669 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2670
2671 /* Issue marker command. */
2672 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2673
2674 if (status)
2675 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2676
2677 LEAVE("qla1280_device_reset");
2678 return status;
2679}
2680
2681/*
2682 * qla1280_abort_command
2683 * Abort command aborts a specified IOCB.
2684 *
2685 * Input:
2686 * ha = adapter block pointer.
2687 * sp = SB structure pointer.
2688 *
2689 * Returns:
2690 * 0 = success
2691 */
2692static int
2693qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2694{
2695 uint16_t mb[MAILBOX_REGISTER_COUNT];
2696 unsigned int bus, target, lun;
2697 int status;
2698
2699 ENTER("qla1280_abort_command");
2700
2701 bus = SCSI_BUS_32(sp->cmd);
2702 target = SCSI_TCN_32(sp->cmd);
2703 lun = SCSI_LUN_32(sp->cmd);
2704
2705 sp->flags |= SRB_ABORT_PENDING;
2706
2707 mb[0] = MBC_ABORT_COMMAND;
2708 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2709 mb[2] = handle >> 16;
2710 mb[3] = handle & 0xffff;
2711 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2712
2713 if (status) {
2714 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2715 sp->flags &= ~SRB_ABORT_PENDING;
2716 }
2717
2718
2719 LEAVE("qla1280_abort_command");
2720 return status;
2721}
2722
2723/*
2724 * qla1280_reset_adapter
2725 * Reset adapter.
2726 *
2727 * Input:
2728 * ha = adapter block pointer.
2729 */
2730static void
2731qla1280_reset_adapter(struct scsi_qla_host *ha)
2732{
2733 struct device_reg __iomem *reg = ha->iobase;
2734
2735 ENTER("qla1280_reset_adapter");
2736
2737 /* Disable ISP chip */
2738 ha->flags.online = 0;
2739 WRT_REG_WORD(®->ictrl, ISP_RESET);
2740 WRT_REG_WORD(®->host_cmd,
2741 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2742 RD_REG_WORD(®->id_l); /* Flush PCI write */
2743
2744 LEAVE("qla1280_reset_adapter");
2745}
2746
2747/*
2748 * Issue marker command.
2749 * Function issues marker IOCB.
2750 *
2751 * Input:
2752 * ha = adapter block pointer.
2753 * bus = SCSI BUS number
2754 * id = SCSI ID
2755 * lun = SCSI LUN
2756 * type = marker modifier
2757 */
2758static void
2759qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2760{
2761 struct mrk_entry *pkt;
2762
2763 ENTER("qla1280_marker");
2764
2765 /* Get request packet. */
2766 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2767 pkt->entry_type = MARKER_TYPE;
2768 pkt->lun = (uint8_t) lun;
2769 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2770 pkt->modifier = type;
2771 pkt->entry_status = 0;
2772
2773 /* Issue command to ISP */
2774 qla1280_isp_cmd(ha);
2775 }
2776
2777 LEAVE("qla1280_marker");
2778}
2779
2780
2781/*
2782 * qla1280_64bit_start_scsi
2783 * The start SCSI is responsible for building request packets on
2784 * request ring and modifying ISP input pointer.
2785 *
2786 * Input:
2787 * ha = adapter block pointer.
2788 * sp = SB structure pointer.
2789 *
2790 * Returns:
2791 * 0 = success, was able to issue command.
2792 */
2793#ifdef QLA_64BIT_PTR
2794static int
2795qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2796{
2797 struct device_reg __iomem *reg = ha->iobase;
2798 struct scsi_cmnd *cmd = sp->cmd;
2799 cmd_a64_entry_t *pkt;
2800 __le32 *dword_ptr;
2801 dma_addr_t dma_handle;
2802 int status = 0;
2803 int cnt;
2804 int req_cnt;
2805 int seg_cnt;
2806 u8 dir;
2807
2808 ENTER("qla1280_64bit_start_scsi:");
2809
2810 /* Calculate number of entries and segments required. */
2811 req_cnt = 1;
2812 seg_cnt = scsi_dma_map(cmd);
2813 if (seg_cnt > 0) {
2814 if (seg_cnt > 2) {
2815 req_cnt += (seg_cnt - 2) / 5;
2816 if ((seg_cnt - 2) % 5)
2817 req_cnt++;
2818 }
2819 } else if (seg_cnt < 0) {
2820 status = 1;
2821 goto out;
2822 }
2823
2824 if ((req_cnt + 2) >= ha->req_q_cnt) {
2825 /* Calculate number of free request entries. */
2826 cnt = RD_REG_WORD(®->mailbox4);
2827 if (ha->req_ring_index < cnt)
2828 ha->req_q_cnt = cnt - ha->req_ring_index;
2829 else
2830 ha->req_q_cnt =
2831 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2832 }
2833
2834 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2835 ha->req_q_cnt, seg_cnt);
2836
2837 /* If room for request in request ring. */
2838 if ((req_cnt + 2) >= ha->req_q_cnt) {
2839 status = SCSI_MLQUEUE_HOST_BUSY;
2840 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2841 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2842 req_cnt);
2843 goto out;
2844 }
2845
2846 /* Check for room in outstanding command list. */
2847 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2848 ha->outstanding_cmds[cnt] != NULL; cnt++);
2849
2850 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2851 status = SCSI_MLQUEUE_HOST_BUSY;
2852 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2853 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2854 goto out;
2855 }
2856
2857 ha->outstanding_cmds[cnt] = sp;
2858 ha->req_q_cnt -= req_cnt;
2859 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2860
2861 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2862 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2863 dprintk(2, " bus %i, target %i, lun %i\n",
2864 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2865 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2866
2867 /*
2868 * Build command packet.
2869 */
2870 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2871
2872 pkt->entry_type = COMMAND_A64_TYPE;
2873 pkt->entry_count = (uint8_t) req_cnt;
2874 pkt->sys_define = (uint8_t) ha->req_ring_index;
2875 pkt->entry_status = 0;
2876 pkt->handle = cpu_to_le32(cnt);
2877
2878 /* Zero out remaining portion of packet. */
2879 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2880
2881 /* Set ISP command timeout. */
2882 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2883
2884 /* Set device target ID and LUN */
2885 pkt->lun = SCSI_LUN_32(cmd);
2886 pkt->target = SCSI_BUS_32(cmd) ?
2887 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2888
2889 /* Enable simple tag queuing if device supports it. */
2890 if (cmd->device->simple_tags)
2891 pkt->control_flags |= cpu_to_le16(BIT_3);
2892
2893 /* Load SCSI command packet. */
2894 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2895 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2896 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2897
2898 /* Set transfer direction. */
2899 dir = qla1280_data_direction(cmd);
2900 pkt->control_flags |= cpu_to_le16(dir);
2901
2902 /* Set total data segment count. */
2903 pkt->dseg_count = cpu_to_le16(seg_cnt);
2904
2905 /*
2906 * Load data segments.
2907 */
2908 if (seg_cnt) { /* If data transfer. */
2909 struct scatterlist *sg, *s;
2910 int remseg = seg_cnt;
2911
2912 sg = scsi_sglist(cmd);
2913
2914 /* Setup packet address segment pointer. */
2915 dword_ptr = (u32 *)&pkt->dseg_0_address;
2916
2917 /* Load command entry data segments. */
2918 for_each_sg(sg, s, seg_cnt, cnt) {
2919 if (cnt == 2)
2920 break;
2921
2922 dma_handle = sg_dma_address(s);
2923#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2924 if (ha->flags.use_pci_vchannel)
2925 sn_pci_set_vchan(ha->pdev,
2926 (unsigned long *)&dma_handle,
2927 SCSI_BUS_32(cmd));
2928#endif
2929 *dword_ptr++ =
2930 cpu_to_le32(pci_dma_lo32(dma_handle));
2931 *dword_ptr++ =
2932 cpu_to_le32(pci_dma_hi32(dma_handle));
2933 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2934 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2935 cpu_to_le32(pci_dma_hi32(dma_handle)),
2936 cpu_to_le32(pci_dma_lo32(dma_handle)),
2937 cpu_to_le32(sg_dma_len(sg_next(s))));
2938 remseg--;
2939 }
2940 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2941 "command packet data - b %i, t %i, l %i \n",
2942 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2943 SCSI_LUN_32(cmd));
2944 qla1280_dump_buffer(5, (char *)pkt,
2945 REQUEST_ENTRY_SIZE);
2946
2947 /*
2948 * Build continuation packets.
2949 */
2950 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2951 "remains\n", seg_cnt);
2952
2953 while (remseg > 0) {
2954 /* Update sg start */
2955 sg = s;
2956 /* Adjust ring index. */
2957 ha->req_ring_index++;
2958 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2959 ha->req_ring_index = 0;
2960 ha->request_ring_ptr =
2961 ha->request_ring;
2962 } else
2963 ha->request_ring_ptr++;
2964
2965 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2966
2967 /* Zero out packet. */
2968 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2969
2970 /* Load packet defaults. */
2971 ((struct cont_a64_entry *) pkt)->entry_type =
2972 CONTINUE_A64_TYPE;
2973 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2974 ((struct cont_a64_entry *) pkt)->sys_define =
2975 (uint8_t)ha->req_ring_index;
2976 /* Setup packet address segment pointer. */
2977 dword_ptr =
2978 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2979
2980 /* Load continuation entry data segments. */
2981 for_each_sg(sg, s, remseg, cnt) {
2982 if (cnt == 5)
2983 break;
2984 dma_handle = sg_dma_address(s);
2985#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2986 if (ha->flags.use_pci_vchannel)
2987 sn_pci_set_vchan(ha->pdev,
2988 (unsigned long *)&dma_handle,
2989 SCSI_BUS_32(cmd));
2990#endif
2991 *dword_ptr++ =
2992 cpu_to_le32(pci_dma_lo32(dma_handle));
2993 *dword_ptr++ =
2994 cpu_to_le32(pci_dma_hi32(dma_handle));
2995 *dword_ptr++ =
2996 cpu_to_le32(sg_dma_len(s));
2997 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2998 cpu_to_le32(pci_dma_hi32(dma_handle)),
2999 cpu_to_le32(pci_dma_lo32(dma_handle)),
3000 cpu_to_le32(sg_dma_len(s)));
3001 }
3002 remseg -= cnt;
3003 dprintk(5, "qla1280_64bit_start_scsi: "
3004 "continuation packet data - b %i, t "
3005 "%i, l %i \n", SCSI_BUS_32(cmd),
3006 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3007 qla1280_dump_buffer(5, (char *)pkt,
3008 REQUEST_ENTRY_SIZE);
3009 }
3010 } else { /* No data transfer */
3011 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3012 "packet data - b %i, t %i, l %i \n",
3013 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3014 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3015 }
3016 /* Adjust ring index. */
3017 ha->req_ring_index++;
3018 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3019 ha->req_ring_index = 0;
3020 ha->request_ring_ptr = ha->request_ring;
3021 } else
3022 ha->request_ring_ptr++;
3023
3024 /* Set chip new ring index. */
3025 dprintk(2,
3026 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3027 sp->flags |= SRB_SENT;
3028 ha->actthreads++;
3029 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3030 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3031 mmiowb();
3032
3033 out:
3034 if (status)
3035 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3036 else
3037 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3038
3039 return status;
3040}
3041#else /* !QLA_64BIT_PTR */
3042
3043/*
3044 * qla1280_32bit_start_scsi
3045 * The start SCSI is responsible for building request packets on
3046 * request ring and modifying ISP input pointer.
3047 *
3048 * The Qlogic firmware interface allows every queue slot to have a SCSI
3049 * command and up to 4 scatter/gather (SG) entries. If we need more
3050 * than 4 SG entries, then continuation entries are used that can
3051 * hold another 7 entries each. The start routine determines if there
3052 * is eought empty slots then build the combination of requests to
3053 * fulfill the OS request.
3054 *
3055 * Input:
3056 * ha = adapter block pointer.
3057 * sp = SCSI Request Block structure pointer.
3058 *
3059 * Returns:
3060 * 0 = success, was able to issue command.
3061 */
3062static int
3063qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3064{
3065 struct device_reg __iomem *reg = ha->iobase;
3066 struct scsi_cmnd *cmd = sp->cmd;
3067 struct cmd_entry *pkt;
3068 __le32 *dword_ptr;
3069 int status = 0;
3070 int cnt;
3071 int req_cnt;
3072 int seg_cnt;
3073 u8 dir;
3074
3075 ENTER("qla1280_32bit_start_scsi");
3076
3077 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3078 cmd->cmnd[0]);
3079
3080 /* Calculate number of entries and segments required. */
3081 req_cnt = 1;
3082 seg_cnt = scsi_dma_map(cmd);
3083 if (seg_cnt) {
3084 /*
3085 * if greater than four sg entries then we need to allocate
3086 * continuation entries
3087 */
3088 if (seg_cnt > 4) {
3089 req_cnt += (seg_cnt - 4) / 7;
3090 if ((seg_cnt - 4) % 7)
3091 req_cnt++;
3092 }
3093 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3094 cmd, seg_cnt, req_cnt);
3095 } else if (seg_cnt < 0) {
3096 status = 1;
3097 goto out;
3098 }
3099
3100 if ((req_cnt + 2) >= ha->req_q_cnt) {
3101 /* Calculate number of free request entries. */
3102 cnt = RD_REG_WORD(®->mailbox4);
3103 if (ha->req_ring_index < cnt)
3104 ha->req_q_cnt = cnt - ha->req_ring_index;
3105 else
3106 ha->req_q_cnt =
3107 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3108 }
3109
3110 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3111 ha->req_q_cnt, seg_cnt);
3112 /* If room for request in request ring. */
3113 if ((req_cnt + 2) >= ha->req_q_cnt) {
3114 status = SCSI_MLQUEUE_HOST_BUSY;
3115 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3116 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3117 ha->req_q_cnt, req_cnt);
3118 goto out;
3119 }
3120
3121 /* Check for empty slot in outstanding command list. */
3122 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3123 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3124
3125 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3126 status = SCSI_MLQUEUE_HOST_BUSY;
3127 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3128 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3129 goto out;
3130 }
3131
3132 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3133 ha->outstanding_cmds[cnt] = sp;
3134 ha->req_q_cnt -= req_cnt;
3135
3136 /*
3137 * Build command packet.
3138 */
3139 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3140
3141 pkt->entry_type = COMMAND_TYPE;
3142 pkt->entry_count = (uint8_t) req_cnt;
3143 pkt->sys_define = (uint8_t) ha->req_ring_index;
3144 pkt->entry_status = 0;
3145 pkt->handle = cpu_to_le32(cnt);
3146
3147 /* Zero out remaining portion of packet. */
3148 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3149
3150 /* Set ISP command timeout. */
3151 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3152
3153 /* Set device target ID and LUN */
3154 pkt->lun = SCSI_LUN_32(cmd);
3155 pkt->target = SCSI_BUS_32(cmd) ?
3156 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3157
3158 /* Enable simple tag queuing if device supports it. */
3159 if (cmd->device->simple_tags)
3160 pkt->control_flags |= cpu_to_le16(BIT_3);
3161
3162 /* Load SCSI command packet. */
3163 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3164 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3165
3166 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3167 /* Set transfer direction. */
3168 dir = qla1280_data_direction(cmd);
3169 pkt->control_flags |= cpu_to_le16(dir);
3170
3171 /* Set total data segment count. */
3172 pkt->dseg_count = cpu_to_le16(seg_cnt);
3173
3174 /*
3175 * Load data segments.
3176 */
3177 if (seg_cnt) {
3178 struct scatterlist *sg, *s;
3179 int remseg = seg_cnt;
3180
3181 sg = scsi_sglist(cmd);
3182
3183 /* Setup packet address segment pointer. */
3184 dword_ptr = &pkt->dseg_0_address;
3185
3186 dprintk(3, "Building S/G data segments..\n");
3187 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3188
3189 /* Load command entry data segments. */
3190 for_each_sg(sg, s, seg_cnt, cnt) {
3191 if (cnt == 4)
3192 break;
3193 *dword_ptr++ =
3194 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3195 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3196 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3197 (pci_dma_lo32(sg_dma_address(s))),
3198 (sg_dma_len(s)));
3199 remseg--;
3200 }
3201 /*
3202 * Build continuation packets.
3203 */
3204 dprintk(3, "S/G Building Continuation"
3205 "...seg_cnt=0x%x remains\n", seg_cnt);
3206 while (remseg > 0) {
3207 /* Continue from end point */
3208 sg = s;
3209 /* Adjust ring index. */
3210 ha->req_ring_index++;
3211 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3212 ha->req_ring_index = 0;
3213 ha->request_ring_ptr =
3214 ha->request_ring;
3215 } else
3216 ha->request_ring_ptr++;
3217
3218 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3219
3220 /* Zero out packet. */
3221 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3222
3223 /* Load packet defaults. */
3224 ((struct cont_entry *) pkt)->
3225 entry_type = CONTINUE_TYPE;
3226 ((struct cont_entry *) pkt)->entry_count = 1;
3227
3228 ((struct cont_entry *) pkt)->sys_define =
3229 (uint8_t) ha->req_ring_index;
3230
3231 /* Setup packet address segment pointer. */
3232 dword_ptr =
3233 &((struct cont_entry *) pkt)->dseg_0_address;
3234
3235 /* Load continuation entry data segments. */
3236 for_each_sg(sg, s, remseg, cnt) {
3237 if (cnt == 7)
3238 break;
3239 *dword_ptr++ =
3240 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3241 *dword_ptr++ =
3242 cpu_to_le32(sg_dma_len(s));
3243 dprintk(1,
3244 "S/G Segment Cont. phys_addr=0x%x, "
3245 "len=0x%x\n",
3246 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3247 cpu_to_le32(sg_dma_len(s)));
3248 }
3249 remseg -= cnt;
3250 dprintk(5, "qla1280_32bit_start_scsi: "
3251 "continuation packet data - "
3252 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3253 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3254 qla1280_dump_buffer(5, (char *)pkt,
3255 REQUEST_ENTRY_SIZE);
3256 }
3257 } else { /* No data transfer at all */
3258 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3259 "packet data - \n");
3260 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3261 }
3262 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3263 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3264 REQUEST_ENTRY_SIZE);
3265
3266 /* Adjust ring index. */
3267 ha->req_ring_index++;
3268 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3269 ha->req_ring_index = 0;
3270 ha->request_ring_ptr = ha->request_ring;
3271 } else
3272 ha->request_ring_ptr++;
3273
3274 /* Set chip new ring index. */
3275 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3276 "for pending command\n");
3277 sp->flags |= SRB_SENT;
3278 ha->actthreads++;
3279 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3280 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3281 mmiowb();
3282
3283out:
3284 if (status)
3285 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3286
3287 LEAVE("qla1280_32bit_start_scsi");
3288
3289 return status;
3290}
3291#endif
3292
3293/*
3294 * qla1280_req_pkt
3295 * Function is responsible for locking ring and
3296 * getting a zeroed out request packet.
3297 *
3298 * Input:
3299 * ha = adapter block pointer.
3300 *
3301 * Returns:
3302 * 0 = failed to get slot.
3303 */
3304static request_t *
3305qla1280_req_pkt(struct scsi_qla_host *ha)
3306{
3307 struct device_reg __iomem *reg = ha->iobase;
3308 request_t *pkt = NULL;
3309 int cnt;
3310 uint32_t timer;
3311
3312 ENTER("qla1280_req_pkt");
3313
3314 /*
3315 * This can be called from interrupt context, damn it!!!
3316 */
3317 /* Wait for 30 seconds for slot. */
3318 for (timer = 15000000; timer; timer--) {
3319 if (ha->req_q_cnt > 0) {
3320 /* Calculate number of free request entries. */
3321 cnt = RD_REG_WORD(®->mailbox4);
3322 if (ha->req_ring_index < cnt)
3323 ha->req_q_cnt = cnt - ha->req_ring_index;
3324 else
3325 ha->req_q_cnt =
3326 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3327 }
3328
3329 /* Found empty request ring slot? */
3330 if (ha->req_q_cnt > 0) {
3331 ha->req_q_cnt--;
3332 pkt = ha->request_ring_ptr;
3333
3334 /* Zero out packet. */
3335 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3336
3337 /*
3338 * How can this be right when we have a ring
3339 * size of 512???
3340 */
3341 /* Set system defined field. */
3342 pkt->sys_define = (uint8_t) ha->req_ring_index;
3343
3344 /* Set entry count. */
3345 pkt->entry_count = 1;
3346
3347 break;
3348 }
3349
3350 udelay(2); /* 10 */
3351
3352 /* Check for pending interrupts. */
3353 qla1280_poll(ha);
3354 }
3355
3356 if (!pkt)
3357 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3358 else
3359 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3360
3361 return pkt;
3362}
3363
3364/*
3365 * qla1280_isp_cmd
3366 * Function is responsible for modifying ISP input pointer.
3367 * Releases ring lock.
3368 *
3369 * Input:
3370 * ha = adapter block pointer.
3371 */
3372static void
3373qla1280_isp_cmd(struct scsi_qla_host *ha)
3374{
3375 struct device_reg __iomem *reg = ha->iobase;
3376
3377 ENTER("qla1280_isp_cmd");
3378
3379 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3380 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3381 REQUEST_ENTRY_SIZE);
3382
3383 /* Adjust ring index. */
3384 ha->req_ring_index++;
3385 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3386 ha->req_ring_index = 0;
3387 ha->request_ring_ptr = ha->request_ring;
3388 } else
3389 ha->request_ring_ptr++;
3390
3391 /*
3392 * Update request index to mailbox4 (Request Queue In).
3393 * The mmiowb() ensures that this write is ordered with writes by other
3394 * CPUs. Without the mmiowb(), it is possible for the following:
3395 * CPUA posts write of index 5 to mailbox4
3396 * CPUA releases host lock
3397 * CPUB acquires host lock
3398 * CPUB posts write of index 6 to mailbox4
3399 * On PCI bus, order reverses and write of 6 posts, then index 5,
3400 * causing chip to issue full queue of stale commands
3401 * The mmiowb() prevents future writes from crossing the barrier.
3402 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3403 */
3404 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3405 mmiowb();
3406
3407 LEAVE("qla1280_isp_cmd");
3408}
3409
3410/****************************************************************************/
3411/* Interrupt Service Routine. */
3412/****************************************************************************/
3413
3414/****************************************************************************
3415 * qla1280_isr
3416 * Calls I/O done on command completion.
3417 *
3418 * Input:
3419 * ha = adapter block pointer.
3420 * done_q = done queue.
3421 ****************************************************************************/
3422static void
3423qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3424{
3425 struct device_reg __iomem *reg = ha->iobase;
3426 struct response *pkt;
3427 struct srb *sp = NULL;
3428 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3429 uint16_t *wptr;
3430 uint32_t index;
3431 u16 istatus;
3432
3433 ENTER("qla1280_isr");
3434
3435 istatus = RD_REG_WORD(®->istatus);
3436 if (!(istatus & (RISC_INT | PCI_INT)))
3437 return;
3438
3439 /* Save mailbox register 5 */
3440 mailbox[5] = RD_REG_WORD(®->mailbox5);
3441
3442 /* Check for mailbox interrupt. */
3443
3444 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3445
3446 if (mailbox[0] & BIT_0) {
3447 /* Get mailbox data. */
3448 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3449
3450 wptr = &mailbox[0];
3451 *wptr++ = RD_REG_WORD(®->mailbox0);
3452 *wptr++ = RD_REG_WORD(®->mailbox1);
3453 *wptr = RD_REG_WORD(®->mailbox2);
3454 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3455 wptr++;
3456 *wptr++ = RD_REG_WORD(®->mailbox3);
3457 *wptr++ = RD_REG_WORD(®->mailbox4);
3458 wptr++;
3459 *wptr++ = RD_REG_WORD(®->mailbox6);
3460 *wptr = RD_REG_WORD(®->mailbox7);
3461 }
3462
3463 /* Release mailbox registers. */
3464
3465 WRT_REG_WORD(®->semaphore, 0);
3466 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3467
3468 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3469 mailbox[0]);
3470
3471 /* Handle asynchronous event */
3472 switch (mailbox[0]) {
3473 case MBA_SCSI_COMPLETION: /* Response completion */
3474 dprintk(5, "qla1280_isr: mailbox SCSI response "
3475 "completion\n");
3476
3477 if (ha->flags.online) {
3478 /* Get outstanding command index. */
3479 index = mailbox[2] << 16 | mailbox[1];
3480
3481 /* Validate handle. */
3482 if (index < MAX_OUTSTANDING_COMMANDS)
3483 sp = ha->outstanding_cmds[index];
3484 else
3485 sp = NULL;
3486
3487 if (sp) {
3488 /* Free outstanding command slot. */
3489 ha->outstanding_cmds[index] = NULL;
3490
3491 /* Save ISP completion status */
3492 CMD_RESULT(sp->cmd) = 0;
3493 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3494
3495 /* Place block on done queue */
3496 list_add_tail(&sp->list, done_q);
3497 } else {
3498 /*
3499 * If we get here we have a real problem!
3500 */
3501 printk(KERN_WARNING
3502 "qla1280: ISP invalid handle\n");
3503 }
3504 }
3505 break;
3506
3507 case MBA_BUS_RESET: /* SCSI Bus Reset */
3508 ha->flags.reset_marker = 1;
3509 index = mailbox[6] & BIT_0;
3510 ha->bus_settings[index].reset_marker = 1;
3511
3512 printk(KERN_DEBUG "qla1280_isr(): index %i "
3513 "asynchronous BUS_RESET\n", index);
3514 break;
3515
3516 case MBA_SYSTEM_ERR: /* System Error */
3517 printk(KERN_WARNING
3518 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3519 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3520 mailbox[3]);
3521 break;
3522
3523 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3524 printk(KERN_WARNING
3525 "qla1280: ISP Request Transfer Error\n");
3526 break;
3527
3528 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3529 printk(KERN_WARNING
3530 "qla1280: ISP Response Transfer Error\n");
3531 break;
3532
3533 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3534 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3535 break;
3536
3537 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3538 dprintk(2,
3539 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3540 break;
3541
3542 case MBA_DEVICE_RESET: /* Bus Device Reset */
3543 printk(KERN_INFO "qla1280_isr(): asynchronous "
3544 "BUS_DEVICE_RESET\n");
3545
3546 ha->flags.reset_marker = 1;
3547 index = mailbox[6] & BIT_0;
3548 ha->bus_settings[index].reset_marker = 1;
3549 break;
3550
3551 case MBA_BUS_MODE_CHANGE:
3552 dprintk(2,
3553 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3554 break;
3555
3556 default:
3557 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3558 if (mailbox[0] < MBA_ASYNC_EVENT) {
3559 wptr = &mailbox[0];
3560 memcpy((uint16_t *) ha->mailbox_out, wptr,
3561 MAILBOX_REGISTER_COUNT *
3562 sizeof(uint16_t));
3563
3564 if(ha->mailbox_wait != NULL)
3565 complete(ha->mailbox_wait);
3566 }
3567 break;
3568 }
3569 } else {
3570 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3571 }
3572
3573 /*
3574 * We will receive interrupts during mailbox testing prior to
3575 * the card being marked online, hence the double check.
3576 */
3577 if (!(ha->flags.online && !ha->mailbox_wait)) {
3578 dprintk(2, "qla1280_isr: Response pointer Error\n");
3579 goto out;
3580 }
3581
3582 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3583 goto out;
3584
3585 while (ha->rsp_ring_index != mailbox[5]) {
3586 pkt = ha->response_ring_ptr;
3587
3588 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3589 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3590 dprintk(5,"qla1280_isr: response packet data\n");
3591 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3592
3593 if (pkt->entry_type == STATUS_TYPE) {
3594 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3595 || pkt->comp_status || pkt->entry_status) {
3596 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3597 "0x%x mailbox[5] = 0x%x, comp_status "
3598 "= 0x%x, scsi_status = 0x%x\n",
3599 ha->rsp_ring_index, mailbox[5],
3600 le16_to_cpu(pkt->comp_status),
3601 le16_to_cpu(pkt->scsi_status));
3602 }
3603 } else {
3604 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3605 "0x%x, mailbox[5] = 0x%x\n",
3606 ha->rsp_ring_index, mailbox[5]);
3607 dprintk(2, "qla1280_isr: response packet data\n");
3608 qla1280_dump_buffer(2, (char *)pkt,
3609 RESPONSE_ENTRY_SIZE);
3610 }
3611
3612 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3613 dprintk(2, "status: Cmd %p, handle %i\n",
3614 ha->outstanding_cmds[pkt->handle]->cmd,
3615 pkt->handle);
3616 if (pkt->entry_type == STATUS_TYPE)
3617 qla1280_status_entry(ha, pkt, done_q);
3618 else
3619 qla1280_error_entry(ha, pkt, done_q);
3620 /* Adjust ring index. */
3621 ha->rsp_ring_index++;
3622 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3623 ha->rsp_ring_index = 0;
3624 ha->response_ring_ptr = ha->response_ring;
3625 } else
3626 ha->response_ring_ptr++;
3627 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3628 }
3629 }
3630
3631 out:
3632 LEAVE("qla1280_isr");
3633}
3634
3635/*
3636 * qla1280_rst_aen
3637 * Processes asynchronous reset.
3638 *
3639 * Input:
3640 * ha = adapter block pointer.
3641 */
3642static void
3643qla1280_rst_aen(struct scsi_qla_host *ha)
3644{
3645 uint8_t bus;
3646
3647 ENTER("qla1280_rst_aen");
3648
3649 if (ha->flags.online && !ha->flags.reset_active &&
3650 !ha->flags.abort_isp_active) {
3651 ha->flags.reset_active = 1;
3652 while (ha->flags.reset_marker) {
3653 /* Issue marker command. */
3654 ha->flags.reset_marker = 0;
3655 for (bus = 0; bus < ha->ports &&
3656 !ha->flags.reset_marker; bus++) {
3657 if (ha->bus_settings[bus].reset_marker) {
3658 ha->bus_settings[bus].reset_marker = 0;
3659 qla1280_marker(ha, bus, 0, 0,
3660 MK_SYNC_ALL);
3661 }
3662 }
3663 }
3664 }
3665
3666 LEAVE("qla1280_rst_aen");
3667}
3668
3669
3670/*
3671 * qla1280_status_entry
3672 * Processes received ISP status entry.
3673 *
3674 * Input:
3675 * ha = adapter block pointer.
3676 * pkt = entry pointer.
3677 * done_q = done queue.
3678 */
3679static void
3680qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3681 struct list_head *done_q)
3682{
3683 unsigned int bus, target, lun;
3684 int sense_sz;
3685 struct srb *sp;
3686 struct scsi_cmnd *cmd;
3687 uint32_t handle = le32_to_cpu(pkt->handle);
3688 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3689 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3690
3691 ENTER("qla1280_status_entry");
3692
3693 /* Validate handle. */
3694 if (handle < MAX_OUTSTANDING_COMMANDS)
3695 sp = ha->outstanding_cmds[handle];
3696 else
3697 sp = NULL;
3698
3699 if (!sp) {
3700 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3701 goto out;
3702 }
3703
3704 /* Free outstanding command slot. */
3705 ha->outstanding_cmds[handle] = NULL;
3706
3707 cmd = sp->cmd;
3708
3709 /* Generate LU queue on cntrl, target, LUN */
3710 bus = SCSI_BUS_32(cmd);
3711 target = SCSI_TCN_32(cmd);
3712 lun = SCSI_LUN_32(cmd);
3713
3714 if (comp_status || scsi_status) {
3715 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3716 "0x%x, handle = 0x%x\n", comp_status,
3717 scsi_status, handle);
3718 }
3719
3720 /* Target busy or queue full */
3721 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3722 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3723 CMD_RESULT(cmd) = scsi_status & 0xff;
3724 } else {
3725
3726 /* Save ISP completion status */
3727 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3728
3729 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3730 if (comp_status != CS_ARS_FAILED) {
3731 uint16_t req_sense_length =
3732 le16_to_cpu(pkt->req_sense_length);
3733 if (req_sense_length < CMD_SNSLEN(cmd))
3734 sense_sz = req_sense_length;
3735 else
3736 /*
3737 * scsi_cmnd->sense_buffer is
3738 * 64 bytes, why only copy 63?
3739 * This looks wrong! /Jes
3740 */
3741 sense_sz = CMD_SNSLEN(cmd) - 1;
3742
3743 memcpy(cmd->sense_buffer,
3744 &pkt->req_sense_data, sense_sz);
3745 } else
3746 sense_sz = 0;
3747 memset(cmd->sense_buffer + sense_sz, 0,
3748 SCSI_SENSE_BUFFERSIZE - sense_sz);
3749
3750 dprintk(2, "qla1280_status_entry: Check "
3751 "condition Sense data, b %i, t %i, "
3752 "l %i\n", bus, target, lun);
3753 if (sense_sz)
3754 qla1280_dump_buffer(2,
3755 (char *)cmd->sense_buffer,
3756 sense_sz);
3757 }
3758 }
3759
3760 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3761
3762 /* Place command on done queue. */
3763 list_add_tail(&sp->list, done_q);
3764 out:
3765 LEAVE("qla1280_status_entry");
3766}
3767
3768/*
3769 * qla1280_error_entry
3770 * Processes error entry.
3771 *
3772 * Input:
3773 * ha = adapter block pointer.
3774 * pkt = entry pointer.
3775 * done_q = done queue.
3776 */
3777static void
3778qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3779 struct list_head *done_q)
3780{
3781 struct srb *sp;
3782 uint32_t handle = le32_to_cpu(pkt->handle);
3783
3784 ENTER("qla1280_error_entry");
3785
3786 if (pkt->entry_status & BIT_3)
3787 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3788 else if (pkt->entry_status & BIT_2)
3789 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3790 else if (pkt->entry_status & BIT_1)
3791 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3792 else
3793 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3794
3795 /* Validate handle. */
3796 if (handle < MAX_OUTSTANDING_COMMANDS)
3797 sp = ha->outstanding_cmds[handle];
3798 else
3799 sp = NULL;
3800
3801 if (sp) {
3802 /* Free outstanding command slot. */
3803 ha->outstanding_cmds[handle] = NULL;
3804
3805 /* Bad payload or header */
3806 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3807 /* Bad payload or header, set error status. */
3808 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3809 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3810 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3811 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3812 } else {
3813 /* Set error status. */
3814 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3815 }
3816
3817 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3818
3819 /* Place command on done queue. */
3820 list_add_tail(&sp->list, done_q);
3821 }
3822#ifdef QLA_64BIT_PTR
3823 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3824 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3825 }
3826#endif
3827
3828 LEAVE("qla1280_error_entry");
3829}
3830
3831/*
3832 * qla1280_abort_isp
3833 * Resets ISP and aborts all outstanding commands.
3834 *
3835 * Input:
3836 * ha = adapter block pointer.
3837 *
3838 * Returns:
3839 * 0 = success
3840 */
3841static int
3842qla1280_abort_isp(struct scsi_qla_host *ha)
3843{
3844 struct device_reg __iomem *reg = ha->iobase;
3845 struct srb *sp;
3846 int status = 0;
3847 int cnt;
3848 int bus;
3849
3850 ENTER("qla1280_abort_isp");
3851
3852 if (ha->flags.abort_isp_active || !ha->flags.online)
3853 goto out;
3854
3855 ha->flags.abort_isp_active = 1;
3856
3857 /* Disable ISP interrupts. */
3858 qla1280_disable_intrs(ha);
3859 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3860 RD_REG_WORD(®->id_l);
3861
3862 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3863 ha->host_no);
3864 /* Dequeue all commands in outstanding command list. */
3865 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3866 struct scsi_cmnd *cmd;
3867 sp = ha->outstanding_cmds[cnt];
3868 if (sp) {
3869 cmd = sp->cmd;
3870 CMD_RESULT(cmd) = DID_RESET << 16;
3871 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3872 ha->outstanding_cmds[cnt] = NULL;
3873 list_add_tail(&sp->list, &ha->done_q);
3874 }
3875 }
3876
3877 qla1280_done(ha);
3878
3879 status = qla1280_load_firmware(ha);
3880 if (status)
3881 goto out;
3882
3883 /* Setup adapter based on NVRAM parameters. */
3884 qla1280_nvram_config (ha);
3885
3886 status = qla1280_init_rings(ha);
3887 if (status)
3888 goto out;
3889
3890 /* Issue SCSI reset. */
3891 for (bus = 0; bus < ha->ports; bus++)
3892 qla1280_bus_reset(ha, bus);
3893
3894 ha->flags.abort_isp_active = 0;
3895 out:
3896 if (status) {
3897 printk(KERN_WARNING
3898 "qla1280: ISP error recovery failed, board disabled");
3899 qla1280_reset_adapter(ha);
3900 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3901 }
3902
3903 LEAVE("qla1280_abort_isp");
3904 return status;
3905}
3906
3907
3908/*
3909 * qla1280_debounce_register
3910 * Debounce register.
3911 *
3912 * Input:
3913 * port = register address.
3914 *
3915 * Returns:
3916 * register value.
3917 */
3918static u16
3919qla1280_debounce_register(volatile u16 __iomem * addr)
3920{
3921 volatile u16 ret;
3922 volatile u16 ret2;
3923
3924 ret = RD_REG_WORD(addr);
3925 ret2 = RD_REG_WORD(addr);
3926
3927 if (ret == ret2)
3928 return ret;
3929
3930 do {
3931 cpu_relax();
3932 ret = RD_REG_WORD(addr);
3933 ret2 = RD_REG_WORD(addr);
3934 } while (ret != ret2);
3935
3936 return ret;
3937}
3938
3939
3940/************************************************************************
3941 * qla1280_check_for_dead_scsi_bus *
3942 * *
3943 * This routine checks for a dead SCSI bus *
3944 ************************************************************************/
3945#define SET_SXP_BANK 0x0100
3946#define SCSI_PHASE_INVALID 0x87FF
3947static int
3948qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3949{
3950 uint16_t config_reg, scsi_control;
3951 struct device_reg __iomem *reg = ha->iobase;
3952
3953 if (ha->bus_settings[bus].scsi_bus_dead) {
3954 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3955 config_reg = RD_REG_WORD(®->cfg_1);
3956 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3957 scsi_control = RD_REG_WORD(®->scsiControlPins);
3958 WRT_REG_WORD(®->cfg_1, config_reg);
3959 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3960
3961 if (scsi_control == SCSI_PHASE_INVALID) {
3962 ha->bus_settings[bus].scsi_bus_dead = 1;
3963 return 1; /* bus is dead */
3964 } else {
3965 ha->bus_settings[bus].scsi_bus_dead = 0;
3966 ha->bus_settings[bus].failed_reset_count = 0;
3967 }
3968 }
3969 return 0; /* bus is not dead */
3970}
3971
3972static void
3973qla1280_get_target_parameters(struct scsi_qla_host *ha,
3974 struct scsi_device *device)
3975{
3976 uint16_t mb[MAILBOX_REGISTER_COUNT];
3977 int bus, target, lun;
3978
3979 bus = device->channel;
3980 target = device->id;
3981 lun = device->lun;
3982
3983
3984 mb[0] = MBC_GET_TARGET_PARAMETERS;
3985 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3986 mb[1] <<= 8;
3987 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3988 &mb[0]);
3989
3990 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3991
3992 if (mb[3] != 0) {
3993 printk(" Sync: period %d, offset %d",
3994 (mb[3] & 0xff), (mb[3] >> 8));
3995 if (mb[2] & BIT_13)
3996 printk(", Wide");
3997 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3998 printk(", DT");
3999 } else
4000 printk(" Async");
4001
4002 if (device->simple_tags)
4003 printk(", Tagged queuing: depth %d", device->queue_depth);
4004 printk("\n");
4005}
4006
4007
4008#if DEBUG_QLA1280
4009static void
4010__qla1280_dump_buffer(char *b, int size)
4011{
4012 int cnt;
4013 u8 c;
4014
4015 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4016 "Bh Ch Dh Eh Fh\n");
4017 printk(KERN_DEBUG "---------------------------------------------"
4018 "------------------\n");
4019
4020 for (cnt = 0; cnt < size;) {
4021 c = *b++;
4022
4023 printk("0x%02x", c);
4024 cnt++;
4025 if (!(cnt % 16))
4026 printk("\n");
4027 else
4028 printk(" ");
4029 }
4030 if (cnt % 16)
4031 printk("\n");
4032}
4033
4034/**************************************************************************
4035 * ql1280_print_scsi_cmd
4036 *
4037 **************************************************************************/
4038static void
4039__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4040{
4041 struct scsi_qla_host *ha;
4042 struct Scsi_Host *host = CMD_HOST(cmd);
4043 struct srb *sp;
4044 /* struct scatterlist *sg; */
4045
4046 int i;
4047 ha = (struct scsi_qla_host *)host->hostdata;
4048
4049 sp = (struct srb *)CMD_SP(cmd);
4050 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4051 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4052 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4053 CMD_CDBLEN(cmd));
4054 printk(" CDB = ");
4055 for (i = 0; i < cmd->cmd_len; i++) {
4056 printk("0x%02x ", cmd->cmnd[i]);
4057 }
4058 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4059 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4060 scsi_sglist(cmd), scsi_bufflen(cmd));
4061 /* if (cmd->use_sg)
4062 {
4063 sg = (struct scatterlist *) cmd->request_buffer;
4064 printk(" SG buffer: \n");
4065 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4066 } */
4067 printk(" tag=%d, transfersize=0x%x \n",
4068 cmd->tag, cmd->transfersize);
4069 printk(" SP=0x%p\n", CMD_SP(cmd));
4070 printk(" underflow size = 0x%x, direction=0x%x\n",
4071 cmd->underflow, cmd->sc_data_direction);
4072}
4073
4074/**************************************************************************
4075 * ql1280_dump_device
4076 *
4077 **************************************************************************/
4078static void
4079ql1280_dump_device(struct scsi_qla_host *ha)
4080{
4081
4082 struct scsi_cmnd *cp;
4083 struct srb *sp;
4084 int i;
4085
4086 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4087
4088 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4089 if ((sp = ha->outstanding_cmds[i]) == NULL)
4090 continue;
4091 if ((cp = sp->cmd) == NULL)
4092 continue;
4093 qla1280_print_scsi_cmd(1, cp);
4094 }
4095}
4096#endif
4097
4098
4099enum tokens {
4100 TOKEN_NVRAM,
4101 TOKEN_SYNC,
4102 TOKEN_WIDE,
4103 TOKEN_PPR,
4104 TOKEN_VERBOSE,
4105 TOKEN_DEBUG,
4106};
4107
4108struct setup_tokens {
4109 char *token;
4110 int val;
4111};
4112
4113static struct setup_tokens setup_token[] __initdata =
4114{
4115 { "nvram", TOKEN_NVRAM },
4116 { "sync", TOKEN_SYNC },
4117 { "wide", TOKEN_WIDE },
4118 { "ppr", TOKEN_PPR },
4119 { "verbose", TOKEN_VERBOSE },
4120 { "debug", TOKEN_DEBUG },
4121};
4122
4123
4124/**************************************************************************
4125 * qla1280_setup
4126 *
4127 * Handle boot parameters. This really needs to be changed so one
4128 * can specify per adapter parameters.
4129 **************************************************************************/
4130static int __init
4131qla1280_setup(char *s)
4132{
4133 char *cp, *ptr;
4134 unsigned long val;
4135 int toke;
4136
4137 cp = s;
4138
4139 while (cp && (ptr = strchr(cp, ':'))) {
4140 ptr++;
4141 if (!strcmp(ptr, "yes")) {
4142 val = 0x10000;
4143 ptr += 3;
4144 } else if (!strcmp(ptr, "no")) {
4145 val = 0;
4146 ptr += 2;
4147 } else
4148 val = simple_strtoul(ptr, &ptr, 0);
4149
4150 switch ((toke = qla1280_get_token(cp))) {
4151 case TOKEN_NVRAM:
4152 if (!val)
4153 driver_setup.no_nvram = 1;
4154 break;
4155 case TOKEN_SYNC:
4156 if (!val)
4157 driver_setup.no_sync = 1;
4158 else if (val != 0x10000)
4159 driver_setup.sync_mask = val;
4160 break;
4161 case TOKEN_WIDE:
4162 if (!val)
4163 driver_setup.no_wide = 1;
4164 else if (val != 0x10000)
4165 driver_setup.wide_mask = val;
4166 break;
4167 case TOKEN_PPR:
4168 if (!val)
4169 driver_setup.no_ppr = 1;
4170 else if (val != 0x10000)
4171 driver_setup.ppr_mask = val;
4172 break;
4173 case TOKEN_VERBOSE:
4174 qla1280_verbose = val;
4175 break;
4176 default:
4177 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4178 cp);
4179 }
4180
4181 cp = strchr(ptr, ';');
4182 if (cp)
4183 cp++;
4184 else {
4185 break;
4186 }
4187 }
4188 return 1;
4189}
4190
4191
4192static int __init
4193qla1280_get_token(char *str)
4194{
4195 char *sep;
4196 long ret = -1;
4197 int i;
4198
4199 sep = strchr(str, ':');
4200
4201 if (sep) {
4202 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4203 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4204 ret = setup_token[i].val;
4205 break;
4206 }
4207 }
4208 }
4209
4210 return ret;
4211}
4212
4213
4214static struct scsi_host_template qla1280_driver_template = {
4215 .module = THIS_MODULE,
4216 .proc_name = "qla1280",
4217 .name = "Qlogic ISP 1280/12160",
4218 .info = qla1280_info,
4219 .slave_configure = qla1280_slave_configure,
4220 .queuecommand = qla1280_queuecommand,
4221 .eh_abort_handler = qla1280_eh_abort,
4222 .eh_device_reset_handler= qla1280_eh_device_reset,
4223 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4224 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4225 .bios_param = qla1280_biosparam,
4226 .can_queue = 0xfffff,
4227 .this_id = -1,
4228 .sg_tablesize = SG_ALL,
4229 .cmd_per_lun = 1,
4230 .use_clustering = ENABLE_CLUSTERING,
4231};
4232
4233
4234static int __devinit
4235qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4236{
4237 int devnum = id->driver_data;
4238 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4239 struct Scsi_Host *host;
4240 struct scsi_qla_host *ha;
4241 int error = -ENODEV;
4242
4243 /* Bypass all AMI SUBSYS VENDOR IDs */
4244 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4245 printk(KERN_INFO
4246 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4247 goto error;
4248 }
4249
4250 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4251 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4252
4253 if (pci_enable_device(pdev)) {
4254 printk(KERN_WARNING
4255 "qla1280: Failed to enabled pci device, aborting.\n");
4256 goto error;
4257 }
4258
4259 pci_set_master(pdev);
4260
4261 error = -ENOMEM;
4262 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4263 if (!host) {
4264 printk(KERN_WARNING
4265 "qla1280: Failed to register host, aborting.\n");
4266 goto error_disable_device;
4267 }
4268
4269 ha = (struct scsi_qla_host *)host->hostdata;
4270 memset(ha, 0, sizeof(struct scsi_qla_host));
4271
4272 ha->pdev = pdev;
4273 ha->devnum = devnum; /* specifies microcode load address */
4274
4275#ifdef QLA_64BIT_PTR
4276 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4277 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4278 printk(KERN_WARNING "scsi(%li): Unable to set a "
4279 "suitable DMA mask - aborting\n", ha->host_no);
4280 error = -ENODEV;
4281 goto error_put_host;
4282 }
4283 } else
4284 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4285 ha->host_no);
4286#else
4287 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4288 printk(KERN_WARNING "scsi(%li): Unable to set a "
4289 "suitable DMA mask - aborting\n", ha->host_no);
4290 error = -ENODEV;
4291 goto error_put_host;
4292 }
4293#endif
4294
4295 ha->request_ring = pci_alloc_consistent(ha->pdev,
4296 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4297 &ha->request_dma);
4298 if (!ha->request_ring) {
4299 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4300 goto error_put_host;
4301 }
4302
4303 ha->response_ring = pci_alloc_consistent(ha->pdev,
4304 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4305 &ha->response_dma);
4306 if (!ha->response_ring) {
4307 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4308 goto error_free_request_ring;
4309 }
4310
4311 ha->ports = bdp->numPorts;
4312
4313 ha->host = host;
4314 ha->host_no = host->host_no;
4315
4316 host->irq = pdev->irq;
4317 host->max_channel = bdp->numPorts - 1;
4318 host->max_lun = MAX_LUNS - 1;
4319 host->max_id = MAX_TARGETS;
4320 host->max_sectors = 1024;
4321 host->unique_id = host->host_no;
4322
4323 error = -ENODEV;
4324
4325#if MEMORY_MAPPED_IO
4326 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4327 if (!ha->mmpbase) {
4328 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4329 goto error_free_response_ring;
4330 }
4331
4332 host->base = (unsigned long)ha->mmpbase;
4333 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4334#else
4335 host->io_port = pci_resource_start(ha->pdev, 0);
4336 if (!request_region(host->io_port, 0xff, "qla1280")) {
4337 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4338 "0x%04lx-0x%04lx - already in use\n",
4339 host->io_port, host->io_port + 0xff);
4340 goto error_free_response_ring;
4341 }
4342
4343 ha->iobase = (struct device_reg *)host->io_port;
4344#endif
4345
4346 INIT_LIST_HEAD(&ha->done_q);
4347
4348 /* Disable ISP interrupts. */
4349 qla1280_disable_intrs(ha);
4350
4351 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4352 "qla1280", ha)) {
4353 printk("qla1280 : Failed to reserve interrupt %d already "
4354 "in use\n", pdev->irq);
4355 goto error_release_region;
4356 }
4357
4358 /* load the F/W, read paramaters, and init the H/W */
4359 if (qla1280_initialize_adapter(ha)) {
4360 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4361 goto error_free_irq;
4362 }
4363
4364 /* set our host ID (need to do something about our two IDs) */
4365 host->this_id = ha->bus_settings[0].id;
4366
4367 pci_set_drvdata(pdev, host);
4368
4369 error = scsi_add_host(host, &pdev->dev);
4370 if (error)
4371 goto error_disable_adapter;
4372 scsi_scan_host(host);
4373
4374 return 0;
4375
4376 error_disable_adapter:
4377 qla1280_disable_intrs(ha);
4378 error_free_irq:
4379 free_irq(pdev->irq, ha);
4380 error_release_region:
4381#if MEMORY_MAPPED_IO
4382 iounmap(ha->mmpbase);
4383#else
4384 release_region(host->io_port, 0xff);
4385#endif
4386 error_free_response_ring:
4387 pci_free_consistent(ha->pdev,
4388 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4389 ha->response_ring, ha->response_dma);
4390 error_free_request_ring:
4391 pci_free_consistent(ha->pdev,
4392 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4393 ha->request_ring, ha->request_dma);
4394 error_put_host:
4395 scsi_host_put(host);
4396 error_disable_device:
4397 pci_disable_device(pdev);
4398 error:
4399 return error;
4400}
4401
4402
4403static void __devexit
4404qla1280_remove_one(struct pci_dev *pdev)
4405{
4406 struct Scsi_Host *host = pci_get_drvdata(pdev);
4407 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4408
4409 scsi_remove_host(host);
4410
4411 qla1280_disable_intrs(ha);
4412
4413 free_irq(pdev->irq, ha);
4414
4415#if MEMORY_MAPPED_IO
4416 iounmap(ha->mmpbase);
4417#else
4418 release_region(host->io_port, 0xff);
4419#endif
4420
4421 pci_free_consistent(ha->pdev,
4422 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4423 ha->request_ring, ha->request_dma);
4424 pci_free_consistent(ha->pdev,
4425 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4426 ha->response_ring, ha->response_dma);
4427
4428 pci_disable_device(pdev);
4429
4430 scsi_host_put(host);
4431}
4432
4433static struct pci_driver qla1280_pci_driver = {
4434 .name = "qla1280",
4435 .id_table = qla1280_pci_tbl,
4436 .probe = qla1280_probe_one,
4437 .remove = __devexit_p(qla1280_remove_one),
4438};
4439
4440static int __init
4441qla1280_init(void)
4442{
4443 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4444 printk(KERN_WARNING
4445 "qla1280: struct srb too big, aborting\n");
4446 return -EINVAL;
4447 }
4448
4449#ifdef MODULE
4450 /*
4451 * If we are called as a module, the qla1280 pointer may not be null
4452 * and it would point to our bootup string, just like on the lilo
4453 * command line. IF not NULL, then process this config string with
4454 * qla1280_setup
4455 *
4456 * Boot time Options
4457 * To add options at boot time add a line to your lilo.conf file like:
4458 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4459 * which will result in the first four devices on the first two
4460 * controllers being set to a tagged queue depth of 32.
4461 */
4462 if (qla1280)
4463 qla1280_setup(qla1280);
4464#endif
4465
4466 return pci_register_driver(&qla1280_pci_driver);
4467}
4468
4469static void __exit
4470qla1280_exit(void)
4471{
4472 int i;
4473
4474 pci_unregister_driver(&qla1280_pci_driver);
4475 /* release any allocated firmware images */
4476 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4477 if (qla1280_fw_tbl[i].fw) {
4478 release_firmware(qla1280_fw_tbl[i].fw);
4479 qla1280_fw_tbl[i].fw = NULL;
4480 }
4481 }
4482}
4483
4484module_init(qla1280_init);
4485module_exit(qla1280_exit);
4486
4487
4488MODULE_AUTHOR("Qlogic & Jes Sorensen");
4489MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4490MODULE_LICENSE("GPL");
4491MODULE_FIRMWARE("qlogic/1040.bin");
4492MODULE_FIRMWARE("qlogic/1280.bin");
4493MODULE_FIRMWARE("qlogic/12160.bin");
4494MODULE_VERSION(QLA1280_VERSION);
4495
4496/*
4497 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4498 * Emacs will notice this stuff at the end of the file and automatically
4499 * adjust the settings for this buffer only. This must remain at the end
4500 * of the file.
4501 * ---------------------------------------------------------------------------
4502 * Local variables:
4503 * c-basic-offset: 8
4504 * tab-width: 8
4505 * End:
4506 */
1/******************************************************************************
2* QLOGIC LINUX SOFTWARE
3*
4* QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5* Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6* Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7* Copyright (C) 2003-2004 Christoph Hellwig
8*
9* This program is free software; you can redistribute it and/or modify it
10* under the terms of the GNU General Public License as published by the
11* Free Software Foundation; either version 2, or (at your option) any
12* later version.
13*
14* This program is distributed in the hope that it will be useful, but
15* WITHOUT ANY WARRANTY; without even the implied warranty of
16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17* General Public License for more details.
18*
19******************************************************************************/
20#define QLA1280_VERSION "3.27.1"
21/*****************************************************************************
22 Revision History:
23 Rev 3.27.1, February 8, 2010, Michael Reed
24 - Retain firmware image for error recovery.
25 Rev 3.27, February 10, 2009, Michael Reed
26 - General code cleanup.
27 - Improve error recovery.
28 Rev 3.26, January 16, 2006 Jes Sorensen
29 - Ditch all < 2.6 support
30 Rev 3.25.1, February 10, 2005 Christoph Hellwig
31 - use pci_map_single to map non-S/G requests
32 - remove qla1280_proc_info
33 Rev 3.25, September 28, 2004, Christoph Hellwig
34 - add support for ISP1020/1040
35 - don't include "scsi.h" anymore for 2.6.x
36 Rev 3.24.4 June 7, 2004 Christoph Hellwig
37 - restructure firmware loading, cleanup initialization code
38 - prepare support for ISP1020/1040 chips
39 Rev 3.24.3 January 19, 2004, Jes Sorensen
40 - Handle PCI DMA mask settings correctly
41 - Correct order of error handling in probe_one, free_irq should not
42 be called if request_irq failed
43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
44 - Big endian fixes (James)
45 - Remove bogus IOCB content on zero data transfer commands (Andrew)
46 Rev 3.24.1 January 5, 2004, Jes Sorensen
47 - Initialize completion queue to avoid OOPS on probe
48 - Handle interrupts during mailbox testing
49 Rev 3.24 November 17, 2003, Christoph Hellwig
50 - use struct list_head for completion queue
51 - avoid old Scsi_FOO typedefs
52 - cleanup 2.4 compat glue a bit
53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
54 - make initialization for memory mapped vs port I/O more similar
55 - remove broken pci config space manipulation
56 - kill more cruft
57 - this is an almost perfect 2.6 scsi driver now! ;)
58 Rev 3.23.39 December 17, 2003, Jes Sorensen
59 - Delete completion queue from srb if mailbox command failed to
60 to avoid qla1280_done completeting qla1280_error_action's
61 obsolete context
62 - Reduce arguments for qla1280_done
63 Rev 3.23.38 October 18, 2003, Christoph Hellwig
64 - Convert to new-style hotplugable driver for 2.6
65 - Fix missing scsi_unregister/scsi_host_put on HBA removal
66 - Kill some more cruft
67 Rev 3.23.37 October 1, 2003, Jes Sorensen
68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
69 random CONFIG option
70 - Clean up locking in probe path
71 Rev 3.23.36 October 1, 2003, Christoph Hellwig
72 - queuecommand only ever receives new commands - clear flags
73 - Reintegrate lost fixes from Linux 2.5
74 Rev 3.23.35 August 14, 2003, Jes Sorensen
75 - Build against 2.6
76 Rev 3.23.34 July 23, 2003, Jes Sorensen
77 - Remove pointless TRUE/FALSE macros
78 - Clean up vchan handling
79 Rev 3.23.33 July 3, 2003, Jes Sorensen
80 - Don't define register access macros before define determining MMIO.
81 This just happened to work out on ia64 but not elsewhere.
82 - Don't try and read from the card while it is in reset as
83 it won't respond and causes an MCA
84 Rev 3.23.32 June 23, 2003, Jes Sorensen
85 - Basic support for boot time arguments
86 Rev 3.23.31 June 8, 2003, Jes Sorensen
87 - Reduce boot time messages
88 Rev 3.23.30 June 6, 2003, Jes Sorensen
89 - Do not enable sync/wide/ppr before it has been determined
90 that the target device actually supports it
91 - Enable DMA arbitration for multi channel controllers
92 Rev 3.23.29 June 3, 2003, Jes Sorensen
93 - Port to 2.5.69
94 Rev 3.23.28 June 3, 2003, Jes Sorensen
95 - Eliminate duplicate marker commands on bus resets
96 - Handle outstanding commands appropriately on bus/device resets
97 Rev 3.23.27 May 28, 2003, Jes Sorensen
98 - Remove bogus input queue code, let the Linux SCSI layer do the work
99 - Clean up NVRAM handling, only read it once from the card
100 - Add a number of missing default nvram parameters
101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
102 - Use completion queue for mailbox commands instead of busy wait
103 Rev 3.23.25 Beta May 27, 2003, James Bottomley
104 - Migrate to use new error handling code
105 Rev 3.23.24 Beta May 21, 2003, James Bottomley
106 - Big endian support
107 - Cleanup data direction code
108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
109 - Switch to using MMIO instead of PIO
110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
111 - Fix PCI parity problem with 12160 during reset.
112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
113 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
115 - Remove < 2.4.x support
116 - Introduce HOST_LOCK to make the spin lock changes portable.
117 - Remove a bunch of idiotic and unnecessary typedef's
118 - Kill all leftovers of target-mode support which never worked anyway
119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
120 - Do qla1280_pci_config() before calling request_irq() and
121 request_region()
122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
123 of large shifts
124 - Hand correct arguments to free_irq() in case of failure
125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
126 - Run source through Lindent and clean up the output
127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
130 - Rely on mailbox commands generating interrupts - do not
131 run qla1280_isr() from ql1280_mailbox_command()
132 - Remove device_reg_t
133 - Integrate ql12160_set_target_parameters() with 1280 version
134 - Make qla1280_setup() non static
135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
136 sent to the card - this command pauses the firmware!!!
137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
139 - Remove a pile of pointless and confusing (srb_t **) and
140 (scsi_lu_t *) typecasts
141 - Explicit mark that we do not use the new error handling (for now)
142 - Remove scsi_qla_host_t and use 'struct' instead
143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
144 pci_64bit_slot flags which weren't used for anything anyway
145 - Grab host->host_lock while calling qla1280_isr() from abort()
146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
147 do not need to save/restore flags in the interrupt handler
148 - Enable interrupts early (before any mailbox access) in preparation
149 for cleaning up the mailbox handling
150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
152 it with proper use of dprintk().
153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
154 a debug level argument to determine if data is to be printed
155 - Add KERN_* info to printk()
156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
157 - Significant cosmetic cleanups
158 - Change debug code to use dprintk() and remove #if mess
159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
160 - More cosmetic cleanups, fix places treating return as function
161 - use cpu_relax() in qla1280_debounce_register()
162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
163 - Make it compile under 2.5.5
164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
165 - Do no typecast short * to long * in QL1280BoardTbl, this
166 broke miserably on big endian boxes
167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
170 unsigned int to match the types from struct scsi_cmnd
171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
172 - Remove bogus timer_t typedef from qla1280.h
173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
174 for PCI_ values, call pci_set_master()
175 - Fix memleak of qla1280_buffer on module unload
176 - Only compile module parsing code #ifdef MODULE - should be
177 changed to use individual MODULE_PARM's later
178 - Remove dummy_buffer that was never modified nor printed
179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
181 - Remove \r from print statements, this is Linux, not DOS
182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
183 dummy macros
184 - Remove C++ compile hack in header file as Linux driver are not
185 supposed to be compiled as C++
186 - Kill MS_64BITS macro as it makes the code more readable
187 - Remove unnecessary flags.in_interrupts bit
188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
189 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
190 - Check whether the interrupt was generated by the QLA1280 before
191 doing any processing
192 - qla1280_status_entry(): Only zero out part of sense_buffer that
193 is not being copied into
194 - Remove more superflouous typecasts
195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
197 - Don't walk the entire list in qla1280_putq_t() just to directly
198 grab the pointer to the last element afterwards
199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
202 - Set dev->max_sectors to 1024
203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
204 - Provide compat macros for pci_enable_device(), pci_find_subsys()
205 and scsi_set_pci_device()
206 - Call scsi_set_pci_device() for all devices
207 - Reduce size of kernel version dependent device probe code
208 - Move duplicate probe/init code to separate function
209 - Handle error if qla1280_mem_alloc() fails
210 - Kill OFFSET() macro and use Linux's PCI definitions instead
211 - Kill private structure defining PCI config space (struct config_reg)
212 - Only allocate I/O port region if not in MMIO mode
213 - Remove duplicate (unused) sanity check of sife of srb_t
214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
215 - Change home-brew memset() implementations to use memset()
216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
217 port directly is not legal under Linux.
218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
219 - Remove pre 2.2 kernel support
220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
221 - Fix MMIO access to use readl/writel instead of directly
222 dereferencing pointers
223 - Nuke MSDOS debugging code
224 - Change true/false data types to int from uint8_t
225 - Use int for counters instead of uint8_t etc.
226 - Clean up size & byte order conversion macro usage
227 Rev 3.23 Beta January 11, 2001 BN Qlogic
228 - Added check of device_id when handling non
229 QLA12160s during detect().
230 Rev 3.22 Beta January 5, 2001 BN Qlogic
231 - Changed queue_task() to schedule_task()
232 for kernels 2.4.0 and higher.
233 Note: 2.4.0-testxx kernels released prior to
234 the actual 2.4.0 kernel release on January 2001
235 will get compile/link errors with schedule_task().
236 Please update your kernel to released 2.4.0 level,
237 or comment lines in this file flagged with 3.22
238 to resolve compile/link error of schedule_task().
239 - Added -DCONFIG_SMP in addition to -D__SMP__
240 in Makefile for 2.4.0 builds of driver as module.
241 Rev 3.21 Beta January 4, 2001 BN Qlogic
242 - Changed criteria of 64/32 Bit mode of HBA
243 operation according to BITS_PER_LONG rather
244 than HBA's NVRAM setting of >4Gig memory bit;
245 so that the HBA auto-configures without the need
246 to setup each system individually.
247 Rev 3.20 Beta December 5, 2000 BN Qlogic
248 - Added priority handling to IA-64 onboard SCSI
249 ISP12160 chip for kernels greater than 2.3.18.
250 - Added irqrestore for qla1280_intr_handler.
251 - Enabled /proc/scsi/qla1280 interface.
252 - Clear /proc/scsi/qla1280 counters in detect().
253 Rev 3.19 Beta October 13, 2000 BN Qlogic
254 - Declare driver_template for new kernel
255 (2.4.0 and greater) scsi initialization scheme.
256 - Update /proc/scsi entry for 2.3.18 kernels and
257 above as qla1280
258 Rev 3.18 Beta October 10, 2000 BN Qlogic
259 - Changed scan order of adapters to map
260 the QLA12160 followed by the QLA1280.
261 Rev 3.17 Beta September 18, 2000 BN Qlogic
262 - Removed warnings for 32 bit 2.4.x compiles
263 - Corrected declared size for request and response
264 DMA addresses that are kept in each ha
265 Rev. 3.16 Beta August 25, 2000 BN Qlogic
266 - Corrected 64 bit addressing issue on IA-64
267 where the upper 32 bits were not properly
268 passed to the RISC engine.
269 Rev. 3.15 Beta August 22, 2000 BN Qlogic
270 - Modified qla1280_setup_chip to properly load
271 ISP firmware for greater that 4 Gig memory on IA-64
272 Rev. 3.14 Beta August 16, 2000 BN Qlogic
273 - Added setting of dma_mask to full 64 bit
274 if flags.enable_64bit_addressing is set in NVRAM
275 Rev. 3.13 Beta August 16, 2000 BN Qlogic
276 - Use new PCI DMA mapping APIs for 2.4.x kernel
277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
278 - Added check of pci_enable_device to detect() for 2.3.x
279 - Use pci_resource_start() instead of
280 pdev->resource[0].start in detect() for 2.3.x
281 - Updated driver version
282 Rev. 3.11 July 14, 2000 BN Qlogic
283 - Updated SCSI Firmware to following versions:
284 qla1x80: 8.13.08
285 qla1x160: 10.04.08
286 - Updated driver version to 3.11
287 Rev. 3.10 June 23, 2000 BN Qlogic
288 - Added filtering of AMI SubSys Vendor ID devices
289 Rev. 3.9
290 - DEBUG_QLA1280 undefined and new version BN Qlogic
291 Rev. 3.08b May 9, 2000 MD Dell
292 - Added logic to check against AMI subsystem vendor ID
293 Rev. 3.08 May 4, 2000 DG Qlogic
294 - Added logic to check for PCI subsystem ID.
295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
296 - Updated SCSI Firmware to following versions:
297 qla12160: 10.01.19
298 qla1280: 8.09.00
299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
300 - Internal revision; not released
301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
302 - Edit correction for virt_to_bus and PROC.
303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
304 - Merge changes from ia64 port.
305 Rev. 3.03 Mar 28, 2000 BN Qlogic
306 - Increase version to reflect new code drop with compile fix
307 of issue with inclusion of linux/spinlock for 2.3 kernels
308 Rev. 3.02 Mar 15, 2000 BN Qlogic
309 - Merge qla1280_proc_info from 2.10 code base
310 Rev. 3.01 Feb 10, 2000 BN Qlogic
311 - Corrected code to compile on a 2.2.x kernel.
312 Rev. 3.00 Jan 17, 2000 DG Qlogic
313 - Added 64-bit support.
314 Rev. 2.07 Nov 9, 1999 DG Qlogic
315 - Added new routine to set target parameters for ISP12160.
316 Rev. 2.06 Sept 10, 1999 DG Qlogic
317 - Added support for ISP12160 Ultra 3 chip.
318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
319 - Modified code to remove errors generated when compiling with
320 Cygnus IA64 Compiler.
321 - Changed conversion of pointers to unsigned longs instead of integers.
322 - Changed type of I/O port variables from uint32_t to unsigned long.
323 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
324 - Changed sprintf and printk format specifiers for pointers to %p.
325 - Changed some int to long type casts where needed in sprintf & printk.
326 - Added l modifiers to sprintf and printk format specifiers for longs.
327 - Removed unused local variables.
328 Rev. 1.20 June 8, 1999 DG, Qlogic
329 Changes to support RedHat release 6.0 (kernel 2.2.5).
330 - Added SCSI exclusive access lock (io_request_lock) when accessing
331 the adapter.
332 - Added changes for the new LINUX interface template. Some new error
333 handling routines have been added to the template, but for now we
334 will use the old ones.
335 - Initial Beta Release.
336*****************************************************************************/
337
338
339#include <linux/module.h>
340
341#include <linux/types.h>
342#include <linux/string.h>
343#include <linux/errno.h>
344#include <linux/kernel.h>
345#include <linux/ioport.h>
346#include <linux/delay.h>
347#include <linux/timer.h>
348#include <linux/pci.h>
349#include <linux/proc_fs.h>
350#include <linux/stat.h>
351#include <linux/pci_ids.h>
352#include <linux/interrupt.h>
353#include <linux/init.h>
354#include <linux/dma-mapping.h>
355#include <linux/firmware.h>
356
357#include <asm/io.h>
358#include <asm/irq.h>
359#include <asm/byteorder.h>
360#include <asm/processor.h>
361#include <asm/types.h>
362
363#include <scsi/scsi.h>
364#include <scsi/scsi_cmnd.h>
365#include <scsi/scsi_device.h>
366#include <scsi/scsi_host.h>
367#include <scsi/scsi_tcq.h>
368
369#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
370#include <asm/sn/io.h>
371#endif
372
373
374/*
375 * Compile time Options:
376 * 0 - Disable and 1 - Enable
377 */
378#define DEBUG_QLA1280_INTR 0
379#define DEBUG_PRINT_NVRAM 0
380#define DEBUG_QLA1280 0
381
382#define MEMORY_MAPPED_IO 1
383
384#include "qla1280.h"
385
386#ifndef BITS_PER_LONG
387#error "BITS_PER_LONG not defined!"
388#endif
389#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
390#define QLA_64BIT_PTR 1
391#endif
392
393#ifdef QLA_64BIT_PTR
394#define pci_dma_hi32(a) ((a >> 16) >> 16)
395#else
396#define pci_dma_hi32(a) 0
397#endif
398#define pci_dma_lo32(a) (a & 0xffffffff)
399
400#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
401
402#if defined(__ia64__) && !defined(ia64_platform_is)
403#define ia64_platform_is(foo) (!strcmp(x, platform_name))
404#endif
405
406
407#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
408#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
409 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
410#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
411 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
412
413
414static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
415static void qla1280_remove_one(struct pci_dev *);
416
417/*
418 * QLogic Driver Support Function Prototypes.
419 */
420static void qla1280_done(struct scsi_qla_host *);
421static int qla1280_get_token(char *);
422static int qla1280_setup(char *s) __init;
423
424/*
425 * QLogic ISP1280 Hardware Support Function Prototypes.
426 */
427static int qla1280_load_firmware(struct scsi_qla_host *);
428static int qla1280_init_rings(struct scsi_qla_host *);
429static int qla1280_nvram_config(struct scsi_qla_host *);
430static int qla1280_mailbox_command(struct scsi_qla_host *,
431 uint8_t, uint16_t *);
432static int qla1280_bus_reset(struct scsi_qla_host *, int);
433static int qla1280_device_reset(struct scsi_qla_host *, int, int);
434static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
435static int qla1280_abort_isp(struct scsi_qla_host *);
436#ifdef QLA_64BIT_PTR
437static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
438#else
439static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
440#endif
441static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
442static void qla1280_poll(struct scsi_qla_host *);
443static void qla1280_reset_adapter(struct scsi_qla_host *);
444static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
445static void qla1280_isp_cmd(struct scsi_qla_host *);
446static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
447static void qla1280_rst_aen(struct scsi_qla_host *);
448static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
449 struct list_head *);
450static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
451 struct list_head *);
452static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
453static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
454static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
455static request_t *qla1280_req_pkt(struct scsi_qla_host *);
456static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
457 unsigned int);
458static void qla1280_get_target_parameters(struct scsi_qla_host *,
459 struct scsi_device *);
460static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
461
462
463static struct qla_driver_setup driver_setup;
464
465/*
466 * convert scsi data direction to request_t control flags
467 */
468static inline uint16_t
469qla1280_data_direction(struct scsi_cmnd *cmnd)
470{
471 switch(cmnd->sc_data_direction) {
472 case DMA_FROM_DEVICE:
473 return BIT_5;
474 case DMA_TO_DEVICE:
475 return BIT_6;
476 case DMA_BIDIRECTIONAL:
477 return BIT_5 | BIT_6;
478 /*
479 * We could BUG() on default here if one of the four cases aren't
480 * met, but then again if we receive something like that from the
481 * SCSI layer we have more serious problems. This shuts up GCC.
482 */
483 case DMA_NONE:
484 default:
485 return 0;
486 }
487}
488
489#if DEBUG_QLA1280
490static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
491static void __qla1280_dump_buffer(char *, int);
492#endif
493
494
495/*
496 * insmod needs to find the variable and make it point to something
497 */
498#ifdef MODULE
499static char *qla1280;
500
501/* insmod qla1280 options=verbose" */
502module_param(qla1280, charp, 0);
503#else
504__setup("qla1280=", qla1280_setup);
505#endif
506
507
508/*
509 * We use the scsi_pointer structure that's included with each scsi_command
510 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
511 * bigger than a scsi_pointer.
512 */
513
514#define CMD_SP(Cmnd) &Cmnd->SCp
515#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
516#define CMD_CDBP(Cmnd) Cmnd->cmnd
517#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
518#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
519#define CMD_RESULT(Cmnd) Cmnd->result
520#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
521#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
522
523#define CMD_HOST(Cmnd) Cmnd->device->host
524#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
525#define SCSI_TCN_32(Cmnd) Cmnd->device->id
526#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
527
528
529/*****************************************/
530/* ISP Boards supported by this driver */
531/*****************************************/
532
533struct qla_boards {
534 char *name; /* Board ID String */
535 int numPorts; /* Number of SCSI ports */
536 int fw_index; /* index into qla1280_fw_tbl for firmware */
537};
538
539/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
540static struct pci_device_id qla1280_pci_tbl[] = {
541 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
542 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
543 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
544 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
545 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
547 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
553 {0,}
554};
555MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
556
557DEFINE_MUTEX(qla1280_firmware_mutex);
558
559struct qla_fw {
560 char *fwname;
561 const struct firmware *fw;
562};
563
564#define QL_NUM_FW_IMAGES 3
565
566struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
567 {"qlogic/1040.bin", NULL}, /* image 0 */
568 {"qlogic/1280.bin", NULL}, /* image 1 */
569 {"qlogic/12160.bin", NULL}, /* image 2 */
570};
571
572/* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
573static struct qla_boards ql1280_board_tbl[] = {
574 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
575 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
576 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
577 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
578 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
579 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
580 {.name = " ", .numPorts = 0, .fw_index = -1},
581};
582
583static int qla1280_verbose = 1;
584
585#if DEBUG_QLA1280
586static int ql_debug_level = 1;
587#define dprintk(level, format, a...) \
588 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
589#define qla1280_dump_buffer(level, buf, size) \
590 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
591#define qla1280_print_scsi_cmd(level, cmd) \
592 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
593#else
594#define ql_debug_level 0
595#define dprintk(level, format, a...) do{}while(0)
596#define qla1280_dump_buffer(a, b, c) do{}while(0)
597#define qla1280_print_scsi_cmd(a, b) do{}while(0)
598#endif
599
600#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
601#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
602#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
603#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
604
605
606static int qla1280_read_nvram(struct scsi_qla_host *ha)
607{
608 uint16_t *wptr;
609 uint8_t chksum;
610 int cnt, i;
611 struct nvram *nv;
612
613 ENTER("qla1280_read_nvram");
614
615 if (driver_setup.no_nvram)
616 return 1;
617
618 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
619
620 wptr = (uint16_t *)&ha->nvram;
621 nv = &ha->nvram;
622 chksum = 0;
623 for (cnt = 0; cnt < 3; cnt++) {
624 *wptr = qla1280_get_nvram_word(ha, cnt);
625 chksum += *wptr & 0xff;
626 chksum += (*wptr >> 8) & 0xff;
627 wptr++;
628 }
629
630 if (nv->id0 != 'I' || nv->id1 != 'S' ||
631 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
632 dprintk(2, "Invalid nvram ID or version!\n");
633 chksum = 1;
634 } else {
635 for (; cnt < sizeof(struct nvram); cnt++) {
636 *wptr = qla1280_get_nvram_word(ha, cnt);
637 chksum += *wptr & 0xff;
638 chksum += (*wptr >> 8) & 0xff;
639 wptr++;
640 }
641 }
642
643 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
644 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
645 nv->version);
646
647
648 if (chksum) {
649 if (!driver_setup.no_nvram)
650 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
651 "validate NVRAM checksum, using default "
652 "settings\n", ha->host_no);
653 ha->nvram_valid = 0;
654 } else
655 ha->nvram_valid = 1;
656
657 /* The firmware interface is, um, interesting, in that the
658 * actual firmware image on the chip is little endian, thus,
659 * the process of taking that image to the CPU would end up
660 * little endian. However, the firmware interface requires it
661 * to be read a word (two bytes) at a time.
662 *
663 * The net result of this would be that the word (and
664 * doubleword) quantites in the firmware would be correct, but
665 * the bytes would be pairwise reversed. Since most of the
666 * firmware quantites are, in fact, bytes, we do an extra
667 * le16_to_cpu() in the firmware read routine.
668 *
669 * The upshot of all this is that the bytes in the firmware
670 * are in the correct places, but the 16 and 32 bit quantites
671 * are still in little endian format. We fix that up below by
672 * doing extra reverses on them */
673 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
674 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
675 for(i = 0; i < MAX_BUSES; i++) {
676 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
677 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
678 }
679 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
680 LEAVE("qla1280_read_nvram");
681
682 return chksum;
683}
684
685/**************************************************************************
686 * qla1280_info
687 * Return a string describing the driver.
688 **************************************************************************/
689static const char *
690qla1280_info(struct Scsi_Host *host)
691{
692 static char qla1280_scsi_name_buffer[125];
693 char *bp;
694 struct scsi_qla_host *ha;
695 struct qla_boards *bdp;
696
697 bp = &qla1280_scsi_name_buffer[0];
698 ha = (struct scsi_qla_host *)host->hostdata;
699 bdp = &ql1280_board_tbl[ha->devnum];
700 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
701
702 sprintf (bp,
703 "QLogic %s PCI to SCSI Host Adapter\n"
704 " Firmware version: %2d.%02d.%02d, Driver version %s",
705 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
706 QLA1280_VERSION);
707 return bp;
708}
709
710/**************************************************************************
711 * qla1280_queuecommand
712 * Queue a command to the controller.
713 *
714 * Note:
715 * The mid-level driver tries to ensures that queuecommand never gets invoked
716 * concurrently with itself or the interrupt handler (although the
717 * interrupt handler may call this routine as part of request-completion
718 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
719 * context which is a big NO! NO!.
720 **************************************************************************/
721static int
722qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
723{
724 struct Scsi_Host *host = cmd->device->host;
725 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
726 struct srb *sp = (struct srb *)CMD_SP(cmd);
727 int status;
728
729 cmd->scsi_done = fn;
730 sp->cmd = cmd;
731 sp->flags = 0;
732 sp->wait = NULL;
733 CMD_HANDLE(cmd) = (unsigned char *)NULL;
734
735 qla1280_print_scsi_cmd(5, cmd);
736
737#ifdef QLA_64BIT_PTR
738 /*
739 * Using 64 bit commands if the PCI bridge doesn't support it is a
740 * bit wasteful, however this should really only happen if one's
741 * PCI controller is completely broken, like the BCM1250. For
742 * sane hardware this is not an issue.
743 */
744 status = qla1280_64bit_start_scsi(ha, sp);
745#else
746 status = qla1280_32bit_start_scsi(ha, sp);
747#endif
748 return status;
749}
750
751static DEF_SCSI_QCMD(qla1280_queuecommand)
752
753enum action {
754 ABORT_COMMAND,
755 DEVICE_RESET,
756 BUS_RESET,
757 ADAPTER_RESET,
758};
759
760
761static void qla1280_mailbox_timeout(unsigned long __data)
762{
763 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
764 struct device_reg __iomem *reg;
765 reg = ha->iobase;
766
767 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
768 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
769 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
770 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
771 complete(ha->mailbox_wait);
772}
773
774static int
775_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
776 struct completion *wait)
777{
778 int status = FAILED;
779 struct scsi_cmnd *cmd = sp->cmd;
780
781 spin_unlock_irq(ha->host->host_lock);
782 wait_for_completion_timeout(wait, 4*HZ);
783 spin_lock_irq(ha->host->host_lock);
784 sp->wait = NULL;
785 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
786 status = SUCCESS;
787 (*cmd->scsi_done)(cmd);
788 }
789 return status;
790}
791
792static int
793qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
794{
795 DECLARE_COMPLETION_ONSTACK(wait);
796
797 sp->wait = &wait;
798 return _qla1280_wait_for_single_command(ha, sp, &wait);
799}
800
801static int
802qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
803{
804 int cnt;
805 int status;
806 struct srb *sp;
807 struct scsi_cmnd *cmd;
808
809 status = SUCCESS;
810
811 /*
812 * Wait for all commands with the designated bus/target
813 * to be completed by the firmware
814 */
815 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
816 sp = ha->outstanding_cmds[cnt];
817 if (sp) {
818 cmd = sp->cmd;
819
820 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
821 continue;
822 if (target >= 0 && SCSI_TCN_32(cmd) != target)
823 continue;
824
825 status = qla1280_wait_for_single_command(ha, sp);
826 if (status == FAILED)
827 break;
828 }
829 }
830 return status;
831}
832
833/**************************************************************************
834 * qla1280_error_action
835 * The function will attempt to perform a specified error action and
836 * wait for the results (or time out).
837 *
838 * Input:
839 * cmd = Linux SCSI command packet of the command that cause the
840 * bus reset.
841 * action = error action to take (see action_t)
842 *
843 * Returns:
844 * SUCCESS or FAILED
845 *
846 **************************************************************************/
847static int
848qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
849{
850 struct scsi_qla_host *ha;
851 int bus, target, lun;
852 struct srb *sp;
853 int i, found;
854 int result=FAILED;
855 int wait_for_bus=-1;
856 int wait_for_target = -1;
857 DECLARE_COMPLETION_ONSTACK(wait);
858
859 ENTER("qla1280_error_action");
860
861 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
862 sp = (struct srb *)CMD_SP(cmd);
863 bus = SCSI_BUS_32(cmd);
864 target = SCSI_TCN_32(cmd);
865 lun = SCSI_LUN_32(cmd);
866
867 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
868 RD_REG_WORD(&ha->iobase->istatus));
869
870 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
871 RD_REG_WORD(&ha->iobase->host_cmd),
872 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
873
874 if (qla1280_verbose)
875 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
876 "Handle=0x%p, action=0x%x\n",
877 ha->host_no, cmd, CMD_HANDLE(cmd), action);
878
879 /*
880 * Check to see if we have the command in the outstanding_cmds[]
881 * array. If not then it must have completed before this error
882 * action was initiated. If the error_action isn't ABORT_COMMAND
883 * then the driver must proceed with the requested action.
884 */
885 found = -1;
886 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
887 if (sp == ha->outstanding_cmds[i]) {
888 found = i;
889 sp->wait = &wait; /* we'll wait for it to complete */
890 break;
891 }
892 }
893
894 if (found < 0) { /* driver doesn't have command */
895 result = SUCCESS;
896 if (qla1280_verbose) {
897 printk(KERN_INFO
898 "scsi(%ld:%d:%d:%d): specified command has "
899 "already completed.\n", ha->host_no, bus,
900 target, lun);
901 }
902 }
903
904 switch (action) {
905
906 case ABORT_COMMAND:
907 dprintk(1, "qla1280: RISC aborting command\n");
908 /*
909 * The abort might fail due to race when the host_lock
910 * is released to issue the abort. As such, we
911 * don't bother to check the return status.
912 */
913 if (found >= 0)
914 qla1280_abort_command(ha, sp, found);
915 break;
916
917 case DEVICE_RESET:
918 if (qla1280_verbose)
919 printk(KERN_INFO
920 "scsi(%ld:%d:%d:%d): Queueing device reset "
921 "command.\n", ha->host_no, bus, target, lun);
922 if (qla1280_device_reset(ha, bus, target) == 0) {
923 /* issued device reset, set wait conditions */
924 wait_for_bus = bus;
925 wait_for_target = target;
926 }
927 break;
928
929 case BUS_RESET:
930 if (qla1280_verbose)
931 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
932 "reset.\n", ha->host_no, bus);
933 if (qla1280_bus_reset(ha, bus) == 0) {
934 /* issued bus reset, set wait conditions */
935 wait_for_bus = bus;
936 }
937 break;
938
939 case ADAPTER_RESET:
940 default:
941 if (qla1280_verbose) {
942 printk(KERN_INFO
943 "scsi(%ld): Issued ADAPTER RESET\n",
944 ha->host_no);
945 printk(KERN_INFO "scsi(%ld): I/O processing will "
946 "continue automatically\n", ha->host_no);
947 }
948 ha->flags.reset_active = 1;
949
950 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
951 result = FAILED;
952 }
953
954 ha->flags.reset_active = 0;
955 }
956
957 /*
958 * At this point, the host_lock has been released and retaken
959 * by the issuance of the mailbox command.
960 * Wait for the command passed in by the mid-layer if it
961 * was found by the driver. It might have been returned
962 * between eh recovery steps, hence the check of the "found"
963 * variable.
964 */
965
966 if (found >= 0)
967 result = _qla1280_wait_for_single_command(ha, sp, &wait);
968
969 if (action == ABORT_COMMAND && result != SUCCESS) {
970 printk(KERN_WARNING
971 "scsi(%li:%i:%i:%i): "
972 "Unable to abort command!\n",
973 ha->host_no, bus, target, lun);
974 }
975
976 /*
977 * If the command passed in by the mid-layer has been
978 * returned by the board, then wait for any additional
979 * commands which are supposed to complete based upon
980 * the error action.
981 *
982 * All commands are unconditionally returned during a
983 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
984 * to wait for them.
985 */
986 if (result == SUCCESS && wait_for_bus >= 0) {
987 result = qla1280_wait_for_pending_commands(ha,
988 wait_for_bus, wait_for_target);
989 }
990
991 dprintk(1, "RESET returning %d\n", result);
992
993 LEAVE("qla1280_error_action");
994 return result;
995}
996
997/**************************************************************************
998 * qla1280_abort
999 * Abort the specified SCSI command(s).
1000 **************************************************************************/
1001static int
1002qla1280_eh_abort(struct scsi_cmnd * cmd)
1003{
1004 int rc;
1005
1006 spin_lock_irq(cmd->device->host->host_lock);
1007 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1008 spin_unlock_irq(cmd->device->host->host_lock);
1009
1010 return rc;
1011}
1012
1013/**************************************************************************
1014 * qla1280_device_reset
1015 * Reset the specified SCSI device
1016 **************************************************************************/
1017static int
1018qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1019{
1020 int rc;
1021
1022 spin_lock_irq(cmd->device->host->host_lock);
1023 rc = qla1280_error_action(cmd, DEVICE_RESET);
1024 spin_unlock_irq(cmd->device->host->host_lock);
1025
1026 return rc;
1027}
1028
1029/**************************************************************************
1030 * qla1280_bus_reset
1031 * Reset the specified bus.
1032 **************************************************************************/
1033static int
1034qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1035{
1036 int rc;
1037
1038 spin_lock_irq(cmd->device->host->host_lock);
1039 rc = qla1280_error_action(cmd, BUS_RESET);
1040 spin_unlock_irq(cmd->device->host->host_lock);
1041
1042 return rc;
1043}
1044
1045/**************************************************************************
1046 * qla1280_adapter_reset
1047 * Reset the specified adapter (both channels)
1048 **************************************************************************/
1049static int
1050qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1051{
1052 int rc;
1053
1054 spin_lock_irq(cmd->device->host->host_lock);
1055 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1056 spin_unlock_irq(cmd->device->host->host_lock);
1057
1058 return rc;
1059}
1060
1061static int
1062qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1063 sector_t capacity, int geom[])
1064{
1065 int heads, sectors, cylinders;
1066
1067 heads = 64;
1068 sectors = 32;
1069 cylinders = (unsigned long)capacity / (heads * sectors);
1070 if (cylinders > 1024) {
1071 heads = 255;
1072 sectors = 63;
1073 cylinders = (unsigned long)capacity / (heads * sectors);
1074 /* if (cylinders > 1023)
1075 cylinders = 1023; */
1076 }
1077
1078 geom[0] = heads;
1079 geom[1] = sectors;
1080 geom[2] = cylinders;
1081
1082 return 0;
1083}
1084
1085
1086/* disable risc and host interrupts */
1087static inline void
1088qla1280_disable_intrs(struct scsi_qla_host *ha)
1089{
1090 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1091 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1092}
1093
1094/* enable risc and host interrupts */
1095static inline void
1096qla1280_enable_intrs(struct scsi_qla_host *ha)
1097{
1098 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1099 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1100}
1101
1102/**************************************************************************
1103 * qla1280_intr_handler
1104 * Handles the H/W interrupt
1105 **************************************************************************/
1106static irqreturn_t
1107qla1280_intr_handler(int irq, void *dev_id)
1108{
1109 struct scsi_qla_host *ha;
1110 struct device_reg __iomem *reg;
1111 u16 data;
1112 int handled = 0;
1113
1114 ENTER_INTR ("qla1280_intr_handler");
1115 ha = (struct scsi_qla_host *)dev_id;
1116
1117 spin_lock(ha->host->host_lock);
1118
1119 ha->isr_count++;
1120 reg = ha->iobase;
1121
1122 qla1280_disable_intrs(ha);
1123
1124 data = qla1280_debounce_register(®->istatus);
1125 /* Check for pending interrupts. */
1126 if (data & RISC_INT) {
1127 qla1280_isr(ha, &ha->done_q);
1128 handled = 1;
1129 }
1130 if (!list_empty(&ha->done_q))
1131 qla1280_done(ha);
1132
1133 spin_unlock(ha->host->host_lock);
1134
1135 qla1280_enable_intrs(ha);
1136
1137 LEAVE_INTR("qla1280_intr_handler");
1138 return IRQ_RETVAL(handled);
1139}
1140
1141
1142static int
1143qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1144{
1145 uint8_t mr;
1146 uint16_t mb[MAILBOX_REGISTER_COUNT];
1147 struct nvram *nv;
1148 int status, lun;
1149
1150 nv = &ha->nvram;
1151
1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1153
1154 /* Set Target Parameters. */
1155 mb[0] = MBC_SET_TARGET_PARAMETERS;
1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1165
1166 if (IS_ISP1x160(ha)) {
1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1171 mr |= BIT_6;
1172 } else {
1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1174 }
1175 mb[3] |= nv->bus[bus].target[target].sync_period;
1176
1177 status = qla1280_mailbox_command(ha, mr, mb);
1178
1179 /* Set Device Queue Parameters. */
1180 for (lun = 0; lun < MAX_LUNS; lun++) {
1181 mb[0] = MBC_SET_DEVICE_QUEUE;
1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1183 mb[1] |= lun;
1184 mb[2] = nv->bus[bus].max_queue_depth;
1185 mb[3] = nv->bus[bus].target[target].execution_throttle;
1186 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1187 }
1188
1189 if (status)
1190 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1191 "qla1280_set_target_parameters() failed\n",
1192 ha->host_no, bus, target);
1193 return status;
1194}
1195
1196
1197/**************************************************************************
1198 * qla1280_slave_configure
1199 *
1200 * Description:
1201 * Determines the queue depth for a given device. There are two ways
1202 * a queue depth can be obtained for a tagged queueing device. One
1203 * way is the default queue depth which is determined by whether
1204 * If it is defined, then it is used
1205 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1206 * default queue depth (dependent on the number of hardware SCBs).
1207 **************************************************************************/
1208static int
1209qla1280_slave_configure(struct scsi_device *device)
1210{
1211 struct scsi_qla_host *ha;
1212 int default_depth = 3;
1213 int bus = device->channel;
1214 int target = device->id;
1215 int status = 0;
1216 struct nvram *nv;
1217 unsigned long flags;
1218
1219 ha = (struct scsi_qla_host *)device->host->hostdata;
1220 nv = &ha->nvram;
1221
1222 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1223 return 1;
1224
1225 if (device->tagged_supported &&
1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1227 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1228 ha->bus_settings[bus].hiwat);
1229 } else {
1230 scsi_adjust_queue_depth(device, 0, default_depth);
1231 }
1232
1233 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1234 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1235 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1236
1237 if (driver_setup.no_sync ||
1238 (driver_setup.sync_mask &&
1239 (~driver_setup.sync_mask & (1 << target))))
1240 nv->bus[bus].target[target].parameter.enable_sync = 0;
1241 if (driver_setup.no_wide ||
1242 (driver_setup.wide_mask &&
1243 (~driver_setup.wide_mask & (1 << target))))
1244 nv->bus[bus].target[target].parameter.enable_wide = 0;
1245 if (IS_ISP1x160(ha)) {
1246 if (driver_setup.no_ppr ||
1247 (driver_setup.ppr_mask &&
1248 (~driver_setup.ppr_mask & (1 << target))))
1249 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1250 }
1251
1252 spin_lock_irqsave(ha->host->host_lock, flags);
1253 if (nv->bus[bus].target[target].parameter.enable_sync)
1254 status = qla1280_set_target_parameters(ha, bus, target);
1255 qla1280_get_target_parameters(ha, device);
1256 spin_unlock_irqrestore(ha->host->host_lock, flags);
1257 return status;
1258}
1259
1260
1261/*
1262 * qla1280_done
1263 * Process completed commands.
1264 *
1265 * Input:
1266 * ha = adapter block pointer.
1267 */
1268static void
1269qla1280_done(struct scsi_qla_host *ha)
1270{
1271 struct srb *sp;
1272 struct list_head *done_q;
1273 int bus, target, lun;
1274 struct scsi_cmnd *cmd;
1275
1276 ENTER("qla1280_done");
1277
1278 done_q = &ha->done_q;
1279
1280 while (!list_empty(done_q)) {
1281 sp = list_entry(done_q->next, struct srb, list);
1282
1283 list_del(&sp->list);
1284
1285 cmd = sp->cmd;
1286 bus = SCSI_BUS_32(cmd);
1287 target = SCSI_TCN_32(cmd);
1288 lun = SCSI_LUN_32(cmd);
1289
1290 switch ((CMD_RESULT(cmd) >> 16)) {
1291 case DID_RESET:
1292 /* Issue marker command. */
1293 if (!ha->flags.abort_isp_active)
1294 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1295 break;
1296 case DID_ABORT:
1297 sp->flags &= ~SRB_ABORT_PENDING;
1298 sp->flags |= SRB_ABORTED;
1299 break;
1300 default:
1301 break;
1302 }
1303
1304 /* Release memory used for this I/O */
1305 scsi_dma_unmap(cmd);
1306
1307 /* Call the mid-level driver interrupt handler */
1308 ha->actthreads--;
1309
1310 if (sp->wait == NULL)
1311 (*(cmd)->scsi_done)(cmd);
1312 else
1313 complete(sp->wait);
1314 }
1315 LEAVE("qla1280_done");
1316}
1317
1318/*
1319 * Translates a ISP error to a Linux SCSI error
1320 */
1321static int
1322qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1323{
1324 int host_status = DID_ERROR;
1325 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1326 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1327 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1328 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1329#if DEBUG_QLA1280_INTR
1330 static char *reason[] = {
1331 "DID_OK",
1332 "DID_NO_CONNECT",
1333 "DID_BUS_BUSY",
1334 "DID_TIME_OUT",
1335 "DID_BAD_TARGET",
1336 "DID_ABORT",
1337 "DID_PARITY",
1338 "DID_ERROR",
1339 "DID_RESET",
1340 "DID_BAD_INTR"
1341 };
1342#endif /* DEBUG_QLA1280_INTR */
1343
1344 ENTER("qla1280_return_status");
1345
1346#if DEBUG_QLA1280_INTR
1347 /*
1348 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1349 comp_status);
1350 */
1351#endif
1352
1353 switch (comp_status) {
1354 case CS_COMPLETE:
1355 host_status = DID_OK;
1356 break;
1357
1358 case CS_INCOMPLETE:
1359 if (!(state_flags & SF_GOT_BUS))
1360 host_status = DID_NO_CONNECT;
1361 else if (!(state_flags & SF_GOT_TARGET))
1362 host_status = DID_BAD_TARGET;
1363 else if (!(state_flags & SF_SENT_CDB))
1364 host_status = DID_ERROR;
1365 else if (!(state_flags & SF_TRANSFERRED_DATA))
1366 host_status = DID_ERROR;
1367 else if (!(state_flags & SF_GOT_STATUS))
1368 host_status = DID_ERROR;
1369 else if (!(state_flags & SF_GOT_SENSE))
1370 host_status = DID_ERROR;
1371 break;
1372
1373 case CS_RESET:
1374 host_status = DID_RESET;
1375 break;
1376
1377 case CS_ABORTED:
1378 host_status = DID_ABORT;
1379 break;
1380
1381 case CS_TIMEOUT:
1382 host_status = DID_TIME_OUT;
1383 break;
1384
1385 case CS_DATA_OVERRUN:
1386 dprintk(2, "Data overrun 0x%x\n", residual_length);
1387 dprintk(2, "qla1280_return_status: response packet data\n");
1388 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1389 host_status = DID_ERROR;
1390 break;
1391
1392 case CS_DATA_UNDERRUN:
1393 if ((scsi_bufflen(cp) - residual_length) <
1394 cp->underflow) {
1395 printk(KERN_WARNING
1396 "scsi: Underflow detected - retrying "
1397 "command.\n");
1398 host_status = DID_ERROR;
1399 } else {
1400 scsi_set_resid(cp, residual_length);
1401 host_status = DID_OK;
1402 }
1403 break;
1404
1405 default:
1406 host_status = DID_ERROR;
1407 break;
1408 }
1409
1410#if DEBUG_QLA1280_INTR
1411 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1412 reason[host_status], scsi_status);
1413#endif
1414
1415 LEAVE("qla1280_return_status");
1416
1417 return (scsi_status & 0xff) | (host_status << 16);
1418}
1419
1420/****************************************************************************/
1421/* QLogic ISP1280 Hardware Support Functions. */
1422/****************************************************************************/
1423
1424/*
1425 * qla1280_initialize_adapter
1426 * Initialize board.
1427 *
1428 * Input:
1429 * ha = adapter block pointer.
1430 *
1431 * Returns:
1432 * 0 = success
1433 */
1434static int
1435qla1280_initialize_adapter(struct scsi_qla_host *ha)
1436{
1437 struct device_reg __iomem *reg;
1438 int status;
1439 int bus;
1440 unsigned long flags;
1441
1442 ENTER("qla1280_initialize_adapter");
1443
1444 /* Clear adapter flags. */
1445 ha->flags.online = 0;
1446 ha->flags.disable_host_adapter = 0;
1447 ha->flags.reset_active = 0;
1448 ha->flags.abort_isp_active = 0;
1449
1450#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1451 if (ia64_platform_is("sn2")) {
1452 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1453 "dual channel lockup workaround\n", ha->host_no);
1454 ha->flags.use_pci_vchannel = 1;
1455 driver_setup.no_nvram = 1;
1456 }
1457#endif
1458
1459 /* TODO: implement support for the 1040 nvram format */
1460 if (IS_ISP1040(ha))
1461 driver_setup.no_nvram = 1;
1462
1463 dprintk(1, "Configure PCI space for adapter...\n");
1464
1465 reg = ha->iobase;
1466
1467 /* Insure mailbox registers are free. */
1468 WRT_REG_WORD(®->semaphore, 0);
1469 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1470 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1471 RD_REG_WORD(®->host_cmd);
1472
1473 if (qla1280_read_nvram(ha)) {
1474 dprintk(2, "qla1280_initialize_adapter: failed to read "
1475 "NVRAM\n");
1476 }
1477
1478 /*
1479 * It's necessary to grab the spin here as qla1280_mailbox_command
1480 * needs to be able to drop the lock unconditionally to wait
1481 * for completion.
1482 */
1483 spin_lock_irqsave(ha->host->host_lock, flags);
1484
1485 status = qla1280_load_firmware(ha);
1486 if (status) {
1487 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1488 ha->host_no);
1489 goto out;
1490 }
1491
1492 /* Setup adapter based on NVRAM parameters. */
1493 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1494 qla1280_nvram_config(ha);
1495
1496 if (ha->flags.disable_host_adapter) {
1497 status = 1;
1498 goto out;
1499 }
1500
1501 status = qla1280_init_rings(ha);
1502 if (status)
1503 goto out;
1504
1505 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1506 for (bus = 0; bus < ha->ports; bus++) {
1507 if (!ha->bus_settings[bus].disable_scsi_reset &&
1508 qla1280_bus_reset(ha, bus) &&
1509 qla1280_bus_reset(ha, bus))
1510 ha->bus_settings[bus].scsi_bus_dead = 1;
1511 }
1512
1513 ha->flags.online = 1;
1514 out:
1515 spin_unlock_irqrestore(ha->host->host_lock, flags);
1516
1517 if (status)
1518 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1519
1520 LEAVE("qla1280_initialize_adapter");
1521 return status;
1522}
1523
1524/*
1525 * qla1280_request_firmware
1526 * Acquire firmware for chip. Retain in memory
1527 * for error recovery.
1528 *
1529 * Input:
1530 * ha = adapter block pointer.
1531 *
1532 * Returns:
1533 * Pointer to firmware image or an error code
1534 * cast to pointer via ERR_PTR().
1535 */
1536static const struct firmware *
1537qla1280_request_firmware(struct scsi_qla_host *ha)
1538{
1539 const struct firmware *fw;
1540 int err;
1541 int index;
1542 char *fwname;
1543
1544 spin_unlock_irq(ha->host->host_lock);
1545 mutex_lock(&qla1280_firmware_mutex);
1546
1547 index = ql1280_board_tbl[ha->devnum].fw_index;
1548 fw = qla1280_fw_tbl[index].fw;
1549 if (fw)
1550 goto out;
1551
1552 fwname = qla1280_fw_tbl[index].fwname;
1553 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1554
1555 if (err) {
1556 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1557 fwname, err);
1558 fw = ERR_PTR(err);
1559 goto unlock;
1560 }
1561 if ((fw->size % 2) || (fw->size < 6)) {
1562 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1563 fw->size, fwname);
1564 release_firmware(fw);
1565 fw = ERR_PTR(-EINVAL);
1566 goto unlock;
1567 }
1568
1569 qla1280_fw_tbl[index].fw = fw;
1570
1571 out:
1572 ha->fwver1 = fw->data[0];
1573 ha->fwver2 = fw->data[1];
1574 ha->fwver3 = fw->data[2];
1575 unlock:
1576 mutex_unlock(&qla1280_firmware_mutex);
1577 spin_lock_irq(ha->host->host_lock);
1578 return fw;
1579}
1580
1581/*
1582 * Chip diagnostics
1583 * Test chip for proper operation.
1584 *
1585 * Input:
1586 * ha = adapter block pointer.
1587 *
1588 * Returns:
1589 * 0 = success.
1590 */
1591static int
1592qla1280_chip_diag(struct scsi_qla_host *ha)
1593{
1594 uint16_t mb[MAILBOX_REGISTER_COUNT];
1595 struct device_reg __iomem *reg = ha->iobase;
1596 int status = 0;
1597 int cnt;
1598 uint16_t data;
1599 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1600
1601 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1602
1603 /* Soft reset chip and wait for it to finish. */
1604 WRT_REG_WORD(®->ictrl, ISP_RESET);
1605
1606 /*
1607 * We can't do a traditional PCI write flush here by reading
1608 * back the register. The card will not respond once the reset
1609 * is in action and we end up with a machine check exception
1610 * instead. Nothing to do but wait and hope for the best.
1611 * A portable pci_write_flush(pdev) call would be very useful here.
1612 */
1613 udelay(20);
1614 data = qla1280_debounce_register(®->ictrl);
1615 /*
1616 * Yet another QLogic gem ;-(
1617 */
1618 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1619 udelay(5);
1620 data = RD_REG_WORD(®->ictrl);
1621 }
1622
1623 if (!cnt)
1624 goto fail;
1625
1626 /* Reset register cleared by chip reset. */
1627 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1628
1629 WRT_REG_WORD(®->cfg_1, 0);
1630
1631 /* Reset RISC and disable BIOS which
1632 allows RISC to execute out of RAM. */
1633 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1634 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1635
1636 RD_REG_WORD(®->id_l); /* Flush PCI write */
1637 data = qla1280_debounce_register(®->mailbox0);
1638
1639 /*
1640 * I *LOVE* this code!
1641 */
1642 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1643 udelay(5);
1644 data = RD_REG_WORD(®->mailbox0);
1645 }
1646
1647 if (!cnt)
1648 goto fail;
1649
1650 /* Check product ID of chip */
1651 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1652
1653 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1654 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1655 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1656 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1657 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1658 printk(KERN_INFO "qla1280: Wrong product ID = "
1659 "0x%x,0x%x,0x%x,0x%x\n",
1660 RD_REG_WORD(®->mailbox1),
1661 RD_REG_WORD(®->mailbox2),
1662 RD_REG_WORD(®->mailbox3),
1663 RD_REG_WORD(®->mailbox4));
1664 goto fail;
1665 }
1666
1667 /*
1668 * Enable ints early!!!
1669 */
1670 qla1280_enable_intrs(ha);
1671
1672 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1673 /* Wrap Incoming Mailboxes Test. */
1674 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1675 mb[1] = 0xAAAA;
1676 mb[2] = 0x5555;
1677 mb[3] = 0xAA55;
1678 mb[4] = 0x55AA;
1679 mb[5] = 0xA5A5;
1680 mb[6] = 0x5A5A;
1681 mb[7] = 0x2525;
1682
1683 status = qla1280_mailbox_command(ha, 0xff, mb);
1684 if (status)
1685 goto fail;
1686
1687 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1688 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1689 mb[7] != 0x2525) {
1690 printk(KERN_INFO "qla1280: Failed mbox check\n");
1691 goto fail;
1692 }
1693
1694 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1695 return 0;
1696 fail:
1697 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1698 return status;
1699}
1700
1701static int
1702qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1703{
1704 /* enter with host_lock acquired */
1705
1706 const struct firmware *fw;
1707 const __le16 *fw_data;
1708 uint16_t risc_address, risc_code_size;
1709 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1710 int err = 0;
1711
1712 fw = qla1280_request_firmware(ha);
1713 if (IS_ERR(fw))
1714 return PTR_ERR(fw);
1715
1716 fw_data = (const __le16 *)&fw->data[0];
1717 ha->fwstart = __le16_to_cpu(fw_data[2]);
1718
1719 /* Load RISC code. */
1720 risc_address = ha->fwstart;
1721 fw_data = (const __le16 *)&fw->data[6];
1722 risc_code_size = (fw->size - 6) / 2;
1723
1724 for (i = 0; i < risc_code_size; i++) {
1725 mb[0] = MBC_WRITE_RAM_WORD;
1726 mb[1] = risc_address + i;
1727 mb[2] = __le16_to_cpu(fw_data[i]);
1728
1729 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1730 if (err) {
1731 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1732 ha->host_no);
1733 break;
1734 }
1735 }
1736
1737 return err;
1738}
1739
1740#define DUMP_IT_BACK 0 /* for debug of RISC loading */
1741static int
1742qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1743{
1744 /* enter with host_lock acquired */
1745 const struct firmware *fw;
1746 const __le16 *fw_data;
1747 uint16_t risc_address, risc_code_size;
1748 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1749 int err = 0, num, i;
1750#if DUMP_IT_BACK
1751 uint8_t *sp, *tbuf;
1752 dma_addr_t p_tbuf;
1753
1754 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1755 if (!tbuf)
1756 return -ENOMEM;
1757#endif
1758
1759 fw = qla1280_request_firmware(ha);
1760 if (IS_ERR(fw))
1761 return PTR_ERR(fw);
1762
1763 fw_data = (const __le16 *)&fw->data[0];
1764 ha->fwstart = __le16_to_cpu(fw_data[2]);
1765
1766 /* Load RISC code. */
1767 risc_address = ha->fwstart;
1768 fw_data = (const __le16 *)&fw->data[6];
1769 risc_code_size = (fw->size - 6) / 2;
1770
1771 dprintk(1, "%s: DMA RISC code (%i) words\n",
1772 __func__, risc_code_size);
1773
1774 num = 0;
1775 while (risc_code_size > 0) {
1776 int warn __attribute__((unused)) = 0;
1777
1778 cnt = 2000 >> 1;
1779
1780 if (cnt > risc_code_size)
1781 cnt = risc_code_size;
1782
1783 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1784 "%d,%d(0x%x)\n",
1785 fw_data, cnt, num, risc_address);
1786 for(i = 0; i < cnt; i++)
1787 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1788
1789 mb[0] = MBC_LOAD_RAM;
1790 mb[1] = risc_address;
1791 mb[4] = cnt;
1792 mb[3] = ha->request_dma & 0xffff;
1793 mb[2] = (ha->request_dma >> 16) & 0xffff;
1794 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1795 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1796 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1797 __func__, mb[0],
1798 (void *)(long)ha->request_dma,
1799 mb[6], mb[7], mb[2], mb[3]);
1800 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1801 BIT_1 | BIT_0, mb);
1802 if (err) {
1803 printk(KERN_ERR "scsi(%li): Failed to load partial "
1804 "segment of f\n", ha->host_no);
1805 goto out;
1806 }
1807
1808#if DUMP_IT_BACK
1809 mb[0] = MBC_DUMP_RAM;
1810 mb[1] = risc_address;
1811 mb[4] = cnt;
1812 mb[3] = p_tbuf & 0xffff;
1813 mb[2] = (p_tbuf >> 16) & 0xffff;
1814 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1815 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1816
1817 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1818 BIT_1 | BIT_0, mb);
1819 if (err) {
1820 printk(KERN_ERR
1821 "Failed to dump partial segment of f/w\n");
1822 goto out;
1823 }
1824 sp = (uint8_t *)ha->request_ring;
1825 for (i = 0; i < (cnt << 1); i++) {
1826 if (tbuf[i] != sp[i] && warn++ < 10) {
1827 printk(KERN_ERR "%s: FW compare error @ "
1828 "byte(0x%x) loop#=%x\n",
1829 __func__, i, num);
1830 printk(KERN_ERR "%s: FWbyte=%x "
1831 "FWfromChip=%x\n",
1832 __func__, sp[i], tbuf[i]);
1833 /*break; */
1834 }
1835 }
1836#endif
1837 risc_address += cnt;
1838 risc_code_size = risc_code_size - cnt;
1839 fw_data = fw_data + cnt;
1840 num++;
1841 }
1842
1843 out:
1844#if DUMP_IT_BACK
1845 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1846#endif
1847 return err;
1848}
1849
1850static int
1851qla1280_start_firmware(struct scsi_qla_host *ha)
1852{
1853 uint16_t mb[MAILBOX_REGISTER_COUNT];
1854 int err;
1855
1856 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1857 __func__);
1858
1859 /* Verify checksum of loaded RISC code. */
1860 mb[0] = MBC_VERIFY_CHECKSUM;
1861 /* mb[1] = ql12_risc_code_addr01; */
1862 mb[1] = ha->fwstart;
1863 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1864 if (err) {
1865 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1866 return err;
1867 }
1868
1869 /* Start firmware execution. */
1870 dprintk(1, "%s: start firmware running.\n", __func__);
1871 mb[0] = MBC_EXECUTE_FIRMWARE;
1872 mb[1] = ha->fwstart;
1873 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1874 if (err) {
1875 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1876 ha->host_no);
1877 }
1878
1879 return err;
1880}
1881
1882static int
1883qla1280_load_firmware(struct scsi_qla_host *ha)
1884{
1885 /* enter with host_lock taken */
1886 int err;
1887
1888 err = qla1280_chip_diag(ha);
1889 if (err)
1890 goto out;
1891 if (IS_ISP1040(ha))
1892 err = qla1280_load_firmware_pio(ha);
1893 else
1894 err = qla1280_load_firmware_dma(ha);
1895 if (err)
1896 goto out;
1897 err = qla1280_start_firmware(ha);
1898 out:
1899 return err;
1900}
1901
1902/*
1903 * Initialize rings
1904 *
1905 * Input:
1906 * ha = adapter block pointer.
1907 * ha->request_ring = request ring virtual address
1908 * ha->response_ring = response ring virtual address
1909 * ha->request_dma = request ring physical address
1910 * ha->response_dma = response ring physical address
1911 *
1912 * Returns:
1913 * 0 = success.
1914 */
1915static int
1916qla1280_init_rings(struct scsi_qla_host *ha)
1917{
1918 uint16_t mb[MAILBOX_REGISTER_COUNT];
1919 int status = 0;
1920
1921 ENTER("qla1280_init_rings");
1922
1923 /* Clear outstanding commands array. */
1924 memset(ha->outstanding_cmds, 0,
1925 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1926
1927 /* Initialize request queue. */
1928 ha->request_ring_ptr = ha->request_ring;
1929 ha->req_ring_index = 0;
1930 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1931 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1932 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1933 mb[1] = REQUEST_ENTRY_CNT;
1934 mb[3] = ha->request_dma & 0xffff;
1935 mb[2] = (ha->request_dma >> 16) & 0xffff;
1936 mb[4] = 0;
1937 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1938 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1939 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1940 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1941 &mb[0]))) {
1942 /* Initialize response queue. */
1943 ha->response_ring_ptr = ha->response_ring;
1944 ha->rsp_ring_index = 0;
1945 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1946 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1947 mb[1] = RESPONSE_ENTRY_CNT;
1948 mb[3] = ha->response_dma & 0xffff;
1949 mb[2] = (ha->response_dma >> 16) & 0xffff;
1950 mb[5] = 0;
1951 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1952 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1953 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1954 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1955 &mb[0]);
1956 }
1957
1958 if (status)
1959 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1960
1961 LEAVE("qla1280_init_rings");
1962 return status;
1963}
1964
1965static void
1966qla1280_print_settings(struct nvram *nv)
1967{
1968 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1969 nv->bus[0].config_1.initiator_id);
1970 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1971 nv->bus[1].config_1.initiator_id);
1972
1973 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1974 nv->bus[0].bus_reset_delay);
1975 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1976 nv->bus[1].bus_reset_delay);
1977
1978 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1979 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1980 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1981 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1982
1983 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1984 nv->bus[0].config_2.async_data_setup_time);
1985 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1986 nv->bus[1].config_2.async_data_setup_time);
1987
1988 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1989 nv->bus[0].config_2.req_ack_active_negation);
1990 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1991 nv->bus[1].config_2.req_ack_active_negation);
1992
1993 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1994 nv->bus[0].config_2.data_line_active_negation);
1995 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1996 nv->bus[1].config_2.data_line_active_negation);
1997
1998 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1999 nv->cntr_flags_1.disable_loading_risc_code);
2000
2001 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2002 nv->cntr_flags_1.enable_64bit_addressing);
2003
2004 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2005 nv->bus[0].selection_timeout);
2006 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2007 nv->bus[1].selection_timeout);
2008
2009 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2010 nv->bus[0].max_queue_depth);
2011 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2012 nv->bus[1].max_queue_depth);
2013}
2014
2015static void
2016qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2017{
2018 struct nvram *nv = &ha->nvram;
2019
2020 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2021 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2022 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2023 nv->bus[bus].target[target].parameter.enable_sync = 1;
2024#if 1 /* Some SCSI Processors do not seem to like this */
2025 nv->bus[bus].target[target].parameter.enable_wide = 1;
2026#endif
2027 nv->bus[bus].target[target].execution_throttle =
2028 nv->bus[bus].max_queue_depth - 1;
2029 nv->bus[bus].target[target].parameter.parity_checking = 1;
2030 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2031
2032 if (IS_ISP1x160(ha)) {
2033 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2034 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2035 nv->bus[bus].target[target].sync_period = 9;
2036 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2038 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2039 } else {
2040 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2041 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2042 nv->bus[bus].target[target].sync_period = 10;
2043 }
2044}
2045
2046static void
2047qla1280_set_defaults(struct scsi_qla_host *ha)
2048{
2049 struct nvram *nv = &ha->nvram;
2050 int bus, target;
2051
2052 dprintk(1, "Using defaults for NVRAM: \n");
2053 memset(nv, 0, sizeof(struct nvram));
2054
2055 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2056 nv->firmware_feature.f.enable_fast_posting = 1;
2057 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2058 nv->termination.scsi_bus_0_control = 3;
2059 nv->termination.scsi_bus_1_control = 3;
2060 nv->termination.auto_term_support = 1;
2061
2062 /*
2063 * Set default FIFO magic - What appropriate values would be here
2064 * is unknown. This is what I have found testing with 12160s.
2065 *
2066 * Now, I would love the magic decoder ring for this one, the
2067 * header file provided by QLogic seems to be bogus or incomplete
2068 * at best.
2069 */
2070 nv->isp_config.burst_enable = 1;
2071 if (IS_ISP1040(ha))
2072 nv->isp_config.fifo_threshold |= 3;
2073 else
2074 nv->isp_config.fifo_threshold |= 4;
2075
2076 if (IS_ISP1x160(ha))
2077 nv->isp_parameter = 0x01; /* fast memory enable */
2078
2079 for (bus = 0; bus < MAX_BUSES; bus++) {
2080 nv->bus[bus].config_1.initiator_id = 7;
2081 nv->bus[bus].config_2.req_ack_active_negation = 1;
2082 nv->bus[bus].config_2.data_line_active_negation = 1;
2083 nv->bus[bus].selection_timeout = 250;
2084 nv->bus[bus].max_queue_depth = 32;
2085
2086 if (IS_ISP1040(ha)) {
2087 nv->bus[bus].bus_reset_delay = 3;
2088 nv->bus[bus].config_2.async_data_setup_time = 6;
2089 nv->bus[bus].retry_delay = 1;
2090 } else {
2091 nv->bus[bus].bus_reset_delay = 5;
2092 nv->bus[bus].config_2.async_data_setup_time = 8;
2093 }
2094
2095 for (target = 0; target < MAX_TARGETS; target++)
2096 qla1280_set_target_defaults(ha, bus, target);
2097 }
2098}
2099
2100static int
2101qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2102{
2103 struct nvram *nv = &ha->nvram;
2104 uint16_t mb[MAILBOX_REGISTER_COUNT];
2105 int status, lun;
2106 uint16_t flag;
2107
2108 /* Set Target Parameters. */
2109 mb[0] = MBC_SET_TARGET_PARAMETERS;
2110 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2111
2112 /*
2113 * Do not enable sync and ppr for the initial INQUIRY run. We
2114 * enable this later if we determine the target actually
2115 * supports it.
2116 */
2117 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2118 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2119
2120 if (IS_ISP1x160(ha))
2121 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2122 else
2123 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2124 mb[3] |= nv->bus[bus].target[target].sync_period;
2125 status = qla1280_mailbox_command(ha, 0x0f, mb);
2126
2127 /* Save Tag queuing enable flag. */
2128 flag = (BIT_0 << target);
2129 if (nv->bus[bus].target[target].parameter.tag_queuing)
2130 ha->bus_settings[bus].qtag_enables |= flag;
2131
2132 /* Save Device enable flag. */
2133 if (IS_ISP1x160(ha)) {
2134 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2135 ha->bus_settings[bus].device_enables |= flag;
2136 ha->bus_settings[bus].lun_disables |= 0;
2137 } else {
2138 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2139 ha->bus_settings[bus].device_enables |= flag;
2140 /* Save LUN disable flag. */
2141 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2142 ha->bus_settings[bus].lun_disables |= flag;
2143 }
2144
2145 /* Set Device Queue Parameters. */
2146 for (lun = 0; lun < MAX_LUNS; lun++) {
2147 mb[0] = MBC_SET_DEVICE_QUEUE;
2148 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2149 mb[1] |= lun;
2150 mb[2] = nv->bus[bus].max_queue_depth;
2151 mb[3] = nv->bus[bus].target[target].execution_throttle;
2152 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2153 }
2154
2155 return status;
2156}
2157
2158static int
2159qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2160{
2161 struct nvram *nv = &ha->nvram;
2162 uint16_t mb[MAILBOX_REGISTER_COUNT];
2163 int target, status;
2164
2165 /* SCSI Reset Disable. */
2166 ha->bus_settings[bus].disable_scsi_reset =
2167 nv->bus[bus].config_1.scsi_reset_disable;
2168
2169 /* Initiator ID. */
2170 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2171 mb[0] = MBC_SET_INITIATOR_ID;
2172 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2173 ha->bus_settings[bus].id;
2174 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2175
2176 /* Reset Delay. */
2177 ha->bus_settings[bus].bus_reset_delay =
2178 nv->bus[bus].bus_reset_delay;
2179
2180 /* Command queue depth per device. */
2181 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2182
2183 /* Set target parameters. */
2184 for (target = 0; target < MAX_TARGETS; target++)
2185 status |= qla1280_config_target(ha, bus, target);
2186
2187 return status;
2188}
2189
2190static int
2191qla1280_nvram_config(struct scsi_qla_host *ha)
2192{
2193 struct device_reg __iomem *reg = ha->iobase;
2194 struct nvram *nv = &ha->nvram;
2195 int bus, target, status = 0;
2196 uint16_t mb[MAILBOX_REGISTER_COUNT];
2197
2198 ENTER("qla1280_nvram_config");
2199
2200 if (ha->nvram_valid) {
2201 /* Always force AUTO sense for LINUX SCSI */
2202 for (bus = 0; bus < MAX_BUSES; bus++)
2203 for (target = 0; target < MAX_TARGETS; target++) {
2204 nv->bus[bus].target[target].parameter.
2205 auto_request_sense = 1;
2206 }
2207 } else {
2208 qla1280_set_defaults(ha);
2209 }
2210
2211 qla1280_print_settings(nv);
2212
2213 /* Disable RISC load of firmware. */
2214 ha->flags.disable_risc_code_load =
2215 nv->cntr_flags_1.disable_loading_risc_code;
2216
2217 if (IS_ISP1040(ha)) {
2218 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2219
2220 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2221
2222 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2223 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2224 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2225
2226 /* Busted fifo, says mjacob. */
2227 if (hwrev != ISP_CFG0_1040A)
2228 cfg1 |= nv->isp_config.fifo_threshold << 4;
2229
2230 cfg1 |= nv->isp_config.burst_enable << 2;
2231 WRT_REG_WORD(®->cfg_1, cfg1);
2232
2233 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2234 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2235 } else {
2236 uint16_t cfg1, term;
2237
2238 /* Set ISP hardware DMA burst */
2239 cfg1 = nv->isp_config.fifo_threshold << 4;
2240 cfg1 |= nv->isp_config.burst_enable << 2;
2241 /* Enable DMA arbitration on dual channel controllers */
2242 if (ha->ports > 1)
2243 cfg1 |= BIT_13;
2244 WRT_REG_WORD(®->cfg_1, cfg1);
2245
2246 /* Set SCSI termination. */
2247 WRT_REG_WORD(®->gpio_enable,
2248 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2249 term = nv->termination.scsi_bus_1_control;
2250 term |= nv->termination.scsi_bus_0_control << 2;
2251 term |= nv->termination.auto_term_support << 7;
2252 RD_REG_WORD(®->id_l); /* Flush PCI write */
2253 WRT_REG_WORD(®->gpio_data, term);
2254 }
2255 RD_REG_WORD(®->id_l); /* Flush PCI write */
2256
2257 /* ISP parameter word. */
2258 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2259 mb[1] = nv->isp_parameter;
2260 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2261
2262 if (IS_ISP1x40(ha)) {
2263 /* clock rate - for qla1240 and older, only */
2264 mb[0] = MBC_SET_CLOCK_RATE;
2265 mb[1] = 40;
2266 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2267 }
2268
2269 /* Firmware feature word. */
2270 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2271 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2272 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2273 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2274#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2275 if (ia64_platform_is("sn2")) {
2276 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2277 "workaround\n", ha->host_no);
2278 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2279 }
2280#endif
2281 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2282
2283 /* Retry count and delay. */
2284 mb[0] = MBC_SET_RETRY_COUNT;
2285 mb[1] = nv->bus[0].retry_count;
2286 mb[2] = nv->bus[0].retry_delay;
2287 mb[6] = nv->bus[1].retry_count;
2288 mb[7] = nv->bus[1].retry_delay;
2289 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2290 BIT_1 | BIT_0, &mb[0]);
2291
2292 /* ASYNC data setup time. */
2293 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2294 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2295 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2296 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2297
2298 /* Active negation states. */
2299 mb[0] = MBC_SET_ACTIVE_NEGATION;
2300 mb[1] = 0;
2301 if (nv->bus[0].config_2.req_ack_active_negation)
2302 mb[1] |= BIT_5;
2303 if (nv->bus[0].config_2.data_line_active_negation)
2304 mb[1] |= BIT_4;
2305 mb[2] = 0;
2306 if (nv->bus[1].config_2.req_ack_active_negation)
2307 mb[2] |= BIT_5;
2308 if (nv->bus[1].config_2.data_line_active_negation)
2309 mb[2] |= BIT_4;
2310 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2311
2312 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2313 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2314 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2315
2316 /* thingy */
2317 mb[0] = MBC_SET_PCI_CONTROL;
2318 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2319 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2320 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2321
2322 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2323 mb[1] = 8;
2324 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2325
2326 /* Selection timeout. */
2327 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2328 mb[1] = nv->bus[0].selection_timeout;
2329 mb[2] = nv->bus[1].selection_timeout;
2330 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2331
2332 for (bus = 0; bus < ha->ports; bus++)
2333 status |= qla1280_config_bus(ha, bus);
2334
2335 if (status)
2336 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2337
2338 LEAVE("qla1280_nvram_config");
2339 return status;
2340}
2341
2342/*
2343 * Get NVRAM data word
2344 * Calculates word position in NVRAM and calls request routine to
2345 * get the word from NVRAM.
2346 *
2347 * Input:
2348 * ha = adapter block pointer.
2349 * address = NVRAM word address.
2350 *
2351 * Returns:
2352 * data word.
2353 */
2354static uint16_t
2355qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2356{
2357 uint32_t nv_cmd;
2358 uint16_t data;
2359
2360 nv_cmd = address << 16;
2361 nv_cmd |= NV_READ_OP;
2362
2363 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2364
2365 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2366 "0x%x", data);
2367
2368 return data;
2369}
2370
2371/*
2372 * NVRAM request
2373 * Sends read command to NVRAM and gets data from NVRAM.
2374 *
2375 * Input:
2376 * ha = adapter block pointer.
2377 * nv_cmd = Bit 26 = start bit
2378 * Bit 25, 24 = opcode
2379 * Bit 23-16 = address
2380 * Bit 15-0 = write data
2381 *
2382 * Returns:
2383 * data word.
2384 */
2385static uint16_t
2386qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2387{
2388 struct device_reg __iomem *reg = ha->iobase;
2389 int cnt;
2390 uint16_t data = 0;
2391 uint16_t reg_data;
2392
2393 /* Send command to NVRAM. */
2394
2395 nv_cmd <<= 5;
2396 for (cnt = 0; cnt < 11; cnt++) {
2397 if (nv_cmd & BIT_31)
2398 qla1280_nv_write(ha, NV_DATA_OUT);
2399 else
2400 qla1280_nv_write(ha, 0);
2401 nv_cmd <<= 1;
2402 }
2403
2404 /* Read data from NVRAM. */
2405
2406 for (cnt = 0; cnt < 16; cnt++) {
2407 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2408 RD_REG_WORD(®->id_l); /* Flush PCI write */
2409 NVRAM_DELAY();
2410 data <<= 1;
2411 reg_data = RD_REG_WORD(®->nvram);
2412 if (reg_data & NV_DATA_IN)
2413 data |= BIT_0;
2414 WRT_REG_WORD(®->nvram, NV_SELECT);
2415 RD_REG_WORD(®->id_l); /* Flush PCI write */
2416 NVRAM_DELAY();
2417 }
2418
2419 /* Deselect chip. */
2420
2421 WRT_REG_WORD(®->nvram, NV_DESELECT);
2422 RD_REG_WORD(®->id_l); /* Flush PCI write */
2423 NVRAM_DELAY();
2424
2425 return data;
2426}
2427
2428static void
2429qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2430{
2431 struct device_reg __iomem *reg = ha->iobase;
2432
2433 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2434 RD_REG_WORD(®->id_l); /* Flush PCI write */
2435 NVRAM_DELAY();
2436 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2437 RD_REG_WORD(®->id_l); /* Flush PCI write */
2438 NVRAM_DELAY();
2439 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2440 RD_REG_WORD(®->id_l); /* Flush PCI write */
2441 NVRAM_DELAY();
2442}
2443
2444/*
2445 * Mailbox Command
2446 * Issue mailbox command and waits for completion.
2447 *
2448 * Input:
2449 * ha = adapter block pointer.
2450 * mr = mailbox registers to load.
2451 * mb = data pointer for mailbox registers.
2452 *
2453 * Output:
2454 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2455 *
2456 * Returns:
2457 * 0 = success
2458 */
2459static int
2460qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2461{
2462 struct device_reg __iomem *reg = ha->iobase;
2463 int status = 0;
2464 int cnt;
2465 uint16_t *optr, *iptr;
2466 uint16_t __iomem *mptr;
2467 uint16_t data;
2468 DECLARE_COMPLETION_ONSTACK(wait);
2469 struct timer_list timer;
2470
2471 ENTER("qla1280_mailbox_command");
2472
2473 if (ha->mailbox_wait) {
2474 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2475 }
2476 ha->mailbox_wait = &wait;
2477
2478 /*
2479 * We really should start out by verifying that the mailbox is
2480 * available before starting sending the command data
2481 */
2482 /* Load mailbox registers. */
2483 mptr = (uint16_t __iomem *) ®->mailbox0;
2484 iptr = mb;
2485 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2486 if (mr & BIT_0) {
2487 WRT_REG_WORD(mptr, (*iptr));
2488 }
2489
2490 mr >>= 1;
2491 mptr++;
2492 iptr++;
2493 }
2494
2495 /* Issue set host interrupt command. */
2496
2497 /* set up a timer just in case we're really jammed */
2498 init_timer_on_stack(&timer);
2499 timer.expires = jiffies + 20*HZ;
2500 timer.data = (unsigned long)ha;
2501 timer.function = qla1280_mailbox_timeout;
2502 add_timer(&timer);
2503
2504 spin_unlock_irq(ha->host->host_lock);
2505 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2506 data = qla1280_debounce_register(®->istatus);
2507
2508 wait_for_completion(&wait);
2509 del_timer_sync(&timer);
2510
2511 spin_lock_irq(ha->host->host_lock);
2512
2513 ha->mailbox_wait = NULL;
2514
2515 /* Check for mailbox command timeout. */
2516 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2517 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2518 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2519 "0x%04x\n",
2520 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2521 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2522 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2523 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2524 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2525 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2526 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2527 status = 1;
2528 }
2529
2530 /* Load return mailbox registers. */
2531 optr = mb;
2532 iptr = (uint16_t *) &ha->mailbox_out[0];
2533 mr = MAILBOX_REGISTER_COUNT;
2534 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2535
2536 if (ha->flags.reset_marker)
2537 qla1280_rst_aen(ha);
2538
2539 if (status)
2540 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2541 "0x%x ****\n", mb[0]);
2542
2543 LEAVE("qla1280_mailbox_command");
2544 return status;
2545}
2546
2547/*
2548 * qla1280_poll
2549 * Polls ISP for interrupts.
2550 *
2551 * Input:
2552 * ha = adapter block pointer.
2553 */
2554static void
2555qla1280_poll(struct scsi_qla_host *ha)
2556{
2557 struct device_reg __iomem *reg = ha->iobase;
2558 uint16_t data;
2559 LIST_HEAD(done_q);
2560
2561 /* ENTER("qla1280_poll"); */
2562
2563 /* Check for pending interrupts. */
2564 data = RD_REG_WORD(®->istatus);
2565 if (data & RISC_INT)
2566 qla1280_isr(ha, &done_q);
2567
2568 if (!ha->mailbox_wait) {
2569 if (ha->flags.reset_marker)
2570 qla1280_rst_aen(ha);
2571 }
2572
2573 if (!list_empty(&done_q))
2574 qla1280_done(ha);
2575
2576 /* LEAVE("qla1280_poll"); */
2577}
2578
2579/*
2580 * qla1280_bus_reset
2581 * Issue SCSI bus reset.
2582 *
2583 * Input:
2584 * ha = adapter block pointer.
2585 * bus = SCSI bus number.
2586 *
2587 * Returns:
2588 * 0 = success
2589 */
2590static int
2591qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2592{
2593 uint16_t mb[MAILBOX_REGISTER_COUNT];
2594 uint16_t reset_delay;
2595 int status;
2596
2597 dprintk(3, "qla1280_bus_reset: entered\n");
2598
2599 if (qla1280_verbose)
2600 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2601 ha->host_no, bus);
2602
2603 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2604 mb[0] = MBC_BUS_RESET;
2605 mb[1] = reset_delay;
2606 mb[2] = (uint16_t) bus;
2607 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2608
2609 if (status) {
2610 if (ha->bus_settings[bus].failed_reset_count > 2)
2611 ha->bus_settings[bus].scsi_bus_dead = 1;
2612 ha->bus_settings[bus].failed_reset_count++;
2613 } else {
2614 spin_unlock_irq(ha->host->host_lock);
2615 ssleep(reset_delay);
2616 spin_lock_irq(ha->host->host_lock);
2617
2618 ha->bus_settings[bus].scsi_bus_dead = 0;
2619 ha->bus_settings[bus].failed_reset_count = 0;
2620 ha->bus_settings[bus].reset_marker = 0;
2621 /* Issue marker command. */
2622 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2623 }
2624
2625 /*
2626 * We should probably call qla1280_set_target_parameters()
2627 * here as well for all devices on the bus.
2628 */
2629
2630 if (status)
2631 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2632 else
2633 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2634
2635 return status;
2636}
2637
2638/*
2639 * qla1280_device_reset
2640 * Issue bus device reset message to the target.
2641 *
2642 * Input:
2643 * ha = adapter block pointer.
2644 * bus = SCSI BUS number.
2645 * target = SCSI ID.
2646 *
2647 * Returns:
2648 * 0 = success
2649 */
2650static int
2651qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2652{
2653 uint16_t mb[MAILBOX_REGISTER_COUNT];
2654 int status;
2655
2656 ENTER("qla1280_device_reset");
2657
2658 mb[0] = MBC_ABORT_TARGET;
2659 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2660 mb[2] = 1;
2661 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2662
2663 /* Issue marker command. */
2664 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2665
2666 if (status)
2667 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2668
2669 LEAVE("qla1280_device_reset");
2670 return status;
2671}
2672
2673/*
2674 * qla1280_abort_command
2675 * Abort command aborts a specified IOCB.
2676 *
2677 * Input:
2678 * ha = adapter block pointer.
2679 * sp = SB structure pointer.
2680 *
2681 * Returns:
2682 * 0 = success
2683 */
2684static int
2685qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2686{
2687 uint16_t mb[MAILBOX_REGISTER_COUNT];
2688 unsigned int bus, target, lun;
2689 int status;
2690
2691 ENTER("qla1280_abort_command");
2692
2693 bus = SCSI_BUS_32(sp->cmd);
2694 target = SCSI_TCN_32(sp->cmd);
2695 lun = SCSI_LUN_32(sp->cmd);
2696
2697 sp->flags |= SRB_ABORT_PENDING;
2698
2699 mb[0] = MBC_ABORT_COMMAND;
2700 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2701 mb[2] = handle >> 16;
2702 mb[3] = handle & 0xffff;
2703 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2704
2705 if (status) {
2706 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2707 sp->flags &= ~SRB_ABORT_PENDING;
2708 }
2709
2710
2711 LEAVE("qla1280_abort_command");
2712 return status;
2713}
2714
2715/*
2716 * qla1280_reset_adapter
2717 * Reset adapter.
2718 *
2719 * Input:
2720 * ha = adapter block pointer.
2721 */
2722static void
2723qla1280_reset_adapter(struct scsi_qla_host *ha)
2724{
2725 struct device_reg __iomem *reg = ha->iobase;
2726
2727 ENTER("qla1280_reset_adapter");
2728
2729 /* Disable ISP chip */
2730 ha->flags.online = 0;
2731 WRT_REG_WORD(®->ictrl, ISP_RESET);
2732 WRT_REG_WORD(®->host_cmd,
2733 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2734 RD_REG_WORD(®->id_l); /* Flush PCI write */
2735
2736 LEAVE("qla1280_reset_adapter");
2737}
2738
2739/*
2740 * Issue marker command.
2741 * Function issues marker IOCB.
2742 *
2743 * Input:
2744 * ha = adapter block pointer.
2745 * bus = SCSI BUS number
2746 * id = SCSI ID
2747 * lun = SCSI LUN
2748 * type = marker modifier
2749 */
2750static void
2751qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2752{
2753 struct mrk_entry *pkt;
2754
2755 ENTER("qla1280_marker");
2756
2757 /* Get request packet. */
2758 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2759 pkt->entry_type = MARKER_TYPE;
2760 pkt->lun = (uint8_t) lun;
2761 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2762 pkt->modifier = type;
2763 pkt->entry_status = 0;
2764
2765 /* Issue command to ISP */
2766 qla1280_isp_cmd(ha);
2767 }
2768
2769 LEAVE("qla1280_marker");
2770}
2771
2772
2773/*
2774 * qla1280_64bit_start_scsi
2775 * The start SCSI is responsible for building request packets on
2776 * request ring and modifying ISP input pointer.
2777 *
2778 * Input:
2779 * ha = adapter block pointer.
2780 * sp = SB structure pointer.
2781 *
2782 * Returns:
2783 * 0 = success, was able to issue command.
2784 */
2785#ifdef QLA_64BIT_PTR
2786static int
2787qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2788{
2789 struct device_reg __iomem *reg = ha->iobase;
2790 struct scsi_cmnd *cmd = sp->cmd;
2791 cmd_a64_entry_t *pkt;
2792 __le32 *dword_ptr;
2793 dma_addr_t dma_handle;
2794 int status = 0;
2795 int cnt;
2796 int req_cnt;
2797 int seg_cnt;
2798 u8 dir;
2799
2800 ENTER("qla1280_64bit_start_scsi:");
2801
2802 /* Calculate number of entries and segments required. */
2803 req_cnt = 1;
2804 seg_cnt = scsi_dma_map(cmd);
2805 if (seg_cnt > 0) {
2806 if (seg_cnt > 2) {
2807 req_cnt += (seg_cnt - 2) / 5;
2808 if ((seg_cnt - 2) % 5)
2809 req_cnt++;
2810 }
2811 } else if (seg_cnt < 0) {
2812 status = 1;
2813 goto out;
2814 }
2815
2816 if ((req_cnt + 2) >= ha->req_q_cnt) {
2817 /* Calculate number of free request entries. */
2818 cnt = RD_REG_WORD(®->mailbox4);
2819 if (ha->req_ring_index < cnt)
2820 ha->req_q_cnt = cnt - ha->req_ring_index;
2821 else
2822 ha->req_q_cnt =
2823 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2824 }
2825
2826 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2827 ha->req_q_cnt, seg_cnt);
2828
2829 /* If room for request in request ring. */
2830 if ((req_cnt + 2) >= ha->req_q_cnt) {
2831 status = SCSI_MLQUEUE_HOST_BUSY;
2832 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2833 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2834 req_cnt);
2835 goto out;
2836 }
2837
2838 /* Check for room in outstanding command list. */
2839 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2840 ha->outstanding_cmds[cnt] != NULL; cnt++);
2841
2842 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2843 status = SCSI_MLQUEUE_HOST_BUSY;
2844 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2845 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2846 goto out;
2847 }
2848
2849 ha->outstanding_cmds[cnt] = sp;
2850 ha->req_q_cnt -= req_cnt;
2851 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2852
2853 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2854 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2855 dprintk(2, " bus %i, target %i, lun %i\n",
2856 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2857 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2858
2859 /*
2860 * Build command packet.
2861 */
2862 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2863
2864 pkt->entry_type = COMMAND_A64_TYPE;
2865 pkt->entry_count = (uint8_t) req_cnt;
2866 pkt->sys_define = (uint8_t) ha->req_ring_index;
2867 pkt->entry_status = 0;
2868 pkt->handle = cpu_to_le32(cnt);
2869
2870 /* Zero out remaining portion of packet. */
2871 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2872
2873 /* Set ISP command timeout. */
2874 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2875
2876 /* Set device target ID and LUN */
2877 pkt->lun = SCSI_LUN_32(cmd);
2878 pkt->target = SCSI_BUS_32(cmd) ?
2879 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2880
2881 /* Enable simple tag queuing if device supports it. */
2882 if (cmd->device->simple_tags)
2883 pkt->control_flags |= cpu_to_le16(BIT_3);
2884
2885 /* Load SCSI command packet. */
2886 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2887 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2888 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2889
2890 /* Set transfer direction. */
2891 dir = qla1280_data_direction(cmd);
2892 pkt->control_flags |= cpu_to_le16(dir);
2893
2894 /* Set total data segment count. */
2895 pkt->dseg_count = cpu_to_le16(seg_cnt);
2896
2897 /*
2898 * Load data segments.
2899 */
2900 if (seg_cnt) { /* If data transfer. */
2901 struct scatterlist *sg, *s;
2902 int remseg = seg_cnt;
2903
2904 sg = scsi_sglist(cmd);
2905
2906 /* Setup packet address segment pointer. */
2907 dword_ptr = (u32 *)&pkt->dseg_0_address;
2908
2909 /* Load command entry data segments. */
2910 for_each_sg(sg, s, seg_cnt, cnt) {
2911 if (cnt == 2)
2912 break;
2913
2914 dma_handle = sg_dma_address(s);
2915#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2916 if (ha->flags.use_pci_vchannel)
2917 sn_pci_set_vchan(ha->pdev,
2918 (unsigned long *)&dma_handle,
2919 SCSI_BUS_32(cmd));
2920#endif
2921 *dword_ptr++ =
2922 cpu_to_le32(pci_dma_lo32(dma_handle));
2923 *dword_ptr++ =
2924 cpu_to_le32(pci_dma_hi32(dma_handle));
2925 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2926 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2927 cpu_to_le32(pci_dma_hi32(dma_handle)),
2928 cpu_to_le32(pci_dma_lo32(dma_handle)),
2929 cpu_to_le32(sg_dma_len(sg_next(s))));
2930 remseg--;
2931 }
2932 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2933 "command packet data - b %i, t %i, l %i \n",
2934 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2935 SCSI_LUN_32(cmd));
2936 qla1280_dump_buffer(5, (char *)pkt,
2937 REQUEST_ENTRY_SIZE);
2938
2939 /*
2940 * Build continuation packets.
2941 */
2942 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2943 "remains\n", seg_cnt);
2944
2945 while (remseg > 0) {
2946 /* Update sg start */
2947 sg = s;
2948 /* Adjust ring index. */
2949 ha->req_ring_index++;
2950 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2951 ha->req_ring_index = 0;
2952 ha->request_ring_ptr =
2953 ha->request_ring;
2954 } else
2955 ha->request_ring_ptr++;
2956
2957 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2958
2959 /* Zero out packet. */
2960 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2961
2962 /* Load packet defaults. */
2963 ((struct cont_a64_entry *) pkt)->entry_type =
2964 CONTINUE_A64_TYPE;
2965 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2966 ((struct cont_a64_entry *) pkt)->sys_define =
2967 (uint8_t)ha->req_ring_index;
2968 /* Setup packet address segment pointer. */
2969 dword_ptr =
2970 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2971
2972 /* Load continuation entry data segments. */
2973 for_each_sg(sg, s, remseg, cnt) {
2974 if (cnt == 5)
2975 break;
2976 dma_handle = sg_dma_address(s);
2977#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2978 if (ha->flags.use_pci_vchannel)
2979 sn_pci_set_vchan(ha->pdev,
2980 (unsigned long *)&dma_handle,
2981 SCSI_BUS_32(cmd));
2982#endif
2983 *dword_ptr++ =
2984 cpu_to_le32(pci_dma_lo32(dma_handle));
2985 *dword_ptr++ =
2986 cpu_to_le32(pci_dma_hi32(dma_handle));
2987 *dword_ptr++ =
2988 cpu_to_le32(sg_dma_len(s));
2989 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2990 cpu_to_le32(pci_dma_hi32(dma_handle)),
2991 cpu_to_le32(pci_dma_lo32(dma_handle)),
2992 cpu_to_le32(sg_dma_len(s)));
2993 }
2994 remseg -= cnt;
2995 dprintk(5, "qla1280_64bit_start_scsi: "
2996 "continuation packet data - b %i, t "
2997 "%i, l %i \n", SCSI_BUS_32(cmd),
2998 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2999 qla1280_dump_buffer(5, (char *)pkt,
3000 REQUEST_ENTRY_SIZE);
3001 }
3002 } else { /* No data transfer */
3003 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3004 "packet data - b %i, t %i, l %i \n",
3005 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3006 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3007 }
3008 /* Adjust ring index. */
3009 ha->req_ring_index++;
3010 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3011 ha->req_ring_index = 0;
3012 ha->request_ring_ptr = ha->request_ring;
3013 } else
3014 ha->request_ring_ptr++;
3015
3016 /* Set chip new ring index. */
3017 dprintk(2,
3018 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3019 sp->flags |= SRB_SENT;
3020 ha->actthreads++;
3021 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3022 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3023 mmiowb();
3024
3025 out:
3026 if (status)
3027 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3028 else
3029 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3030
3031 return status;
3032}
3033#else /* !QLA_64BIT_PTR */
3034
3035/*
3036 * qla1280_32bit_start_scsi
3037 * The start SCSI is responsible for building request packets on
3038 * request ring and modifying ISP input pointer.
3039 *
3040 * The Qlogic firmware interface allows every queue slot to have a SCSI
3041 * command and up to 4 scatter/gather (SG) entries. If we need more
3042 * than 4 SG entries, then continuation entries are used that can
3043 * hold another 7 entries each. The start routine determines if there
3044 * is eought empty slots then build the combination of requests to
3045 * fulfill the OS request.
3046 *
3047 * Input:
3048 * ha = adapter block pointer.
3049 * sp = SCSI Request Block structure pointer.
3050 *
3051 * Returns:
3052 * 0 = success, was able to issue command.
3053 */
3054static int
3055qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3056{
3057 struct device_reg __iomem *reg = ha->iobase;
3058 struct scsi_cmnd *cmd = sp->cmd;
3059 struct cmd_entry *pkt;
3060 __le32 *dword_ptr;
3061 int status = 0;
3062 int cnt;
3063 int req_cnt;
3064 int seg_cnt;
3065 u8 dir;
3066
3067 ENTER("qla1280_32bit_start_scsi");
3068
3069 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3070 cmd->cmnd[0]);
3071
3072 /* Calculate number of entries and segments required. */
3073 req_cnt = 1;
3074 seg_cnt = scsi_dma_map(cmd);
3075 if (seg_cnt) {
3076 /*
3077 * if greater than four sg entries then we need to allocate
3078 * continuation entries
3079 */
3080 if (seg_cnt > 4) {
3081 req_cnt += (seg_cnt - 4) / 7;
3082 if ((seg_cnt - 4) % 7)
3083 req_cnt++;
3084 }
3085 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3086 cmd, seg_cnt, req_cnt);
3087 } else if (seg_cnt < 0) {
3088 status = 1;
3089 goto out;
3090 }
3091
3092 if ((req_cnt + 2) >= ha->req_q_cnt) {
3093 /* Calculate number of free request entries. */
3094 cnt = RD_REG_WORD(®->mailbox4);
3095 if (ha->req_ring_index < cnt)
3096 ha->req_q_cnt = cnt - ha->req_ring_index;
3097 else
3098 ha->req_q_cnt =
3099 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3100 }
3101
3102 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3103 ha->req_q_cnt, seg_cnt);
3104 /* If room for request in request ring. */
3105 if ((req_cnt + 2) >= ha->req_q_cnt) {
3106 status = SCSI_MLQUEUE_HOST_BUSY;
3107 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3108 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3109 ha->req_q_cnt, req_cnt);
3110 goto out;
3111 }
3112
3113 /* Check for empty slot in outstanding command list. */
3114 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3115 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3116
3117 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3118 status = SCSI_MLQUEUE_HOST_BUSY;
3119 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3120 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3121 goto out;
3122 }
3123
3124 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3125 ha->outstanding_cmds[cnt] = sp;
3126 ha->req_q_cnt -= req_cnt;
3127
3128 /*
3129 * Build command packet.
3130 */
3131 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3132
3133 pkt->entry_type = COMMAND_TYPE;
3134 pkt->entry_count = (uint8_t) req_cnt;
3135 pkt->sys_define = (uint8_t) ha->req_ring_index;
3136 pkt->entry_status = 0;
3137 pkt->handle = cpu_to_le32(cnt);
3138
3139 /* Zero out remaining portion of packet. */
3140 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3141
3142 /* Set ISP command timeout. */
3143 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3144
3145 /* Set device target ID and LUN */
3146 pkt->lun = SCSI_LUN_32(cmd);
3147 pkt->target = SCSI_BUS_32(cmd) ?
3148 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3149
3150 /* Enable simple tag queuing if device supports it. */
3151 if (cmd->device->simple_tags)
3152 pkt->control_flags |= cpu_to_le16(BIT_3);
3153
3154 /* Load SCSI command packet. */
3155 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3156 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3157
3158 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3159 /* Set transfer direction. */
3160 dir = qla1280_data_direction(cmd);
3161 pkt->control_flags |= cpu_to_le16(dir);
3162
3163 /* Set total data segment count. */
3164 pkt->dseg_count = cpu_to_le16(seg_cnt);
3165
3166 /*
3167 * Load data segments.
3168 */
3169 if (seg_cnt) {
3170 struct scatterlist *sg, *s;
3171 int remseg = seg_cnt;
3172
3173 sg = scsi_sglist(cmd);
3174
3175 /* Setup packet address segment pointer. */
3176 dword_ptr = &pkt->dseg_0_address;
3177
3178 dprintk(3, "Building S/G data segments..\n");
3179 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3180
3181 /* Load command entry data segments. */
3182 for_each_sg(sg, s, seg_cnt, cnt) {
3183 if (cnt == 4)
3184 break;
3185 *dword_ptr++ =
3186 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3187 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3188 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3189 (pci_dma_lo32(sg_dma_address(s))),
3190 (sg_dma_len(s)));
3191 remseg--;
3192 }
3193 /*
3194 * Build continuation packets.
3195 */
3196 dprintk(3, "S/G Building Continuation"
3197 "...seg_cnt=0x%x remains\n", seg_cnt);
3198 while (remseg > 0) {
3199 /* Continue from end point */
3200 sg = s;
3201 /* Adjust ring index. */
3202 ha->req_ring_index++;
3203 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3204 ha->req_ring_index = 0;
3205 ha->request_ring_ptr =
3206 ha->request_ring;
3207 } else
3208 ha->request_ring_ptr++;
3209
3210 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3211
3212 /* Zero out packet. */
3213 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3214
3215 /* Load packet defaults. */
3216 ((struct cont_entry *) pkt)->
3217 entry_type = CONTINUE_TYPE;
3218 ((struct cont_entry *) pkt)->entry_count = 1;
3219
3220 ((struct cont_entry *) pkt)->sys_define =
3221 (uint8_t) ha->req_ring_index;
3222
3223 /* Setup packet address segment pointer. */
3224 dword_ptr =
3225 &((struct cont_entry *) pkt)->dseg_0_address;
3226
3227 /* Load continuation entry data segments. */
3228 for_each_sg(sg, s, remseg, cnt) {
3229 if (cnt == 7)
3230 break;
3231 *dword_ptr++ =
3232 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3233 *dword_ptr++ =
3234 cpu_to_le32(sg_dma_len(s));
3235 dprintk(1,
3236 "S/G Segment Cont. phys_addr=0x%x, "
3237 "len=0x%x\n",
3238 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3239 cpu_to_le32(sg_dma_len(s)));
3240 }
3241 remseg -= cnt;
3242 dprintk(5, "qla1280_32bit_start_scsi: "
3243 "continuation packet data - "
3244 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3245 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3246 qla1280_dump_buffer(5, (char *)pkt,
3247 REQUEST_ENTRY_SIZE);
3248 }
3249 } else { /* No data transfer at all */
3250 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3251 "packet data - \n");
3252 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3253 }
3254 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3255 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3256 REQUEST_ENTRY_SIZE);
3257
3258 /* Adjust ring index. */
3259 ha->req_ring_index++;
3260 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3261 ha->req_ring_index = 0;
3262 ha->request_ring_ptr = ha->request_ring;
3263 } else
3264 ha->request_ring_ptr++;
3265
3266 /* Set chip new ring index. */
3267 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3268 "for pending command\n");
3269 sp->flags |= SRB_SENT;
3270 ha->actthreads++;
3271 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3272 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3273 mmiowb();
3274
3275out:
3276 if (status)
3277 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3278
3279 LEAVE("qla1280_32bit_start_scsi");
3280
3281 return status;
3282}
3283#endif
3284
3285/*
3286 * qla1280_req_pkt
3287 * Function is responsible for locking ring and
3288 * getting a zeroed out request packet.
3289 *
3290 * Input:
3291 * ha = adapter block pointer.
3292 *
3293 * Returns:
3294 * 0 = failed to get slot.
3295 */
3296static request_t *
3297qla1280_req_pkt(struct scsi_qla_host *ha)
3298{
3299 struct device_reg __iomem *reg = ha->iobase;
3300 request_t *pkt = NULL;
3301 int cnt;
3302 uint32_t timer;
3303
3304 ENTER("qla1280_req_pkt");
3305
3306 /*
3307 * This can be called from interrupt context, damn it!!!
3308 */
3309 /* Wait for 30 seconds for slot. */
3310 for (timer = 15000000; timer; timer--) {
3311 if (ha->req_q_cnt > 0) {
3312 /* Calculate number of free request entries. */
3313 cnt = RD_REG_WORD(®->mailbox4);
3314 if (ha->req_ring_index < cnt)
3315 ha->req_q_cnt = cnt - ha->req_ring_index;
3316 else
3317 ha->req_q_cnt =
3318 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3319 }
3320
3321 /* Found empty request ring slot? */
3322 if (ha->req_q_cnt > 0) {
3323 ha->req_q_cnt--;
3324 pkt = ha->request_ring_ptr;
3325
3326 /* Zero out packet. */
3327 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3328
3329 /*
3330 * How can this be right when we have a ring
3331 * size of 512???
3332 */
3333 /* Set system defined field. */
3334 pkt->sys_define = (uint8_t) ha->req_ring_index;
3335
3336 /* Set entry count. */
3337 pkt->entry_count = 1;
3338
3339 break;
3340 }
3341
3342 udelay(2); /* 10 */
3343
3344 /* Check for pending interrupts. */
3345 qla1280_poll(ha);
3346 }
3347
3348 if (!pkt)
3349 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3350 else
3351 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3352
3353 return pkt;
3354}
3355
3356/*
3357 * qla1280_isp_cmd
3358 * Function is responsible for modifying ISP input pointer.
3359 * Releases ring lock.
3360 *
3361 * Input:
3362 * ha = adapter block pointer.
3363 */
3364static void
3365qla1280_isp_cmd(struct scsi_qla_host *ha)
3366{
3367 struct device_reg __iomem *reg = ha->iobase;
3368
3369 ENTER("qla1280_isp_cmd");
3370
3371 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3372 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3373 REQUEST_ENTRY_SIZE);
3374
3375 /* Adjust ring index. */
3376 ha->req_ring_index++;
3377 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3378 ha->req_ring_index = 0;
3379 ha->request_ring_ptr = ha->request_ring;
3380 } else
3381 ha->request_ring_ptr++;
3382
3383 /*
3384 * Update request index to mailbox4 (Request Queue In).
3385 * The mmiowb() ensures that this write is ordered with writes by other
3386 * CPUs. Without the mmiowb(), it is possible for the following:
3387 * CPUA posts write of index 5 to mailbox4
3388 * CPUA releases host lock
3389 * CPUB acquires host lock
3390 * CPUB posts write of index 6 to mailbox4
3391 * On PCI bus, order reverses and write of 6 posts, then index 5,
3392 * causing chip to issue full queue of stale commands
3393 * The mmiowb() prevents future writes from crossing the barrier.
3394 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3395 */
3396 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3397 mmiowb();
3398
3399 LEAVE("qla1280_isp_cmd");
3400}
3401
3402/****************************************************************************/
3403/* Interrupt Service Routine. */
3404/****************************************************************************/
3405
3406/****************************************************************************
3407 * qla1280_isr
3408 * Calls I/O done on command completion.
3409 *
3410 * Input:
3411 * ha = adapter block pointer.
3412 * done_q = done queue.
3413 ****************************************************************************/
3414static void
3415qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3416{
3417 struct device_reg __iomem *reg = ha->iobase;
3418 struct response *pkt;
3419 struct srb *sp = NULL;
3420 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3421 uint16_t *wptr;
3422 uint32_t index;
3423 u16 istatus;
3424
3425 ENTER("qla1280_isr");
3426
3427 istatus = RD_REG_WORD(®->istatus);
3428 if (!(istatus & (RISC_INT | PCI_INT)))
3429 return;
3430
3431 /* Save mailbox register 5 */
3432 mailbox[5] = RD_REG_WORD(®->mailbox5);
3433
3434 /* Check for mailbox interrupt. */
3435
3436 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3437
3438 if (mailbox[0] & BIT_0) {
3439 /* Get mailbox data. */
3440 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3441
3442 wptr = &mailbox[0];
3443 *wptr++ = RD_REG_WORD(®->mailbox0);
3444 *wptr++ = RD_REG_WORD(®->mailbox1);
3445 *wptr = RD_REG_WORD(®->mailbox2);
3446 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3447 wptr++;
3448 *wptr++ = RD_REG_WORD(®->mailbox3);
3449 *wptr++ = RD_REG_WORD(®->mailbox4);
3450 wptr++;
3451 *wptr++ = RD_REG_WORD(®->mailbox6);
3452 *wptr = RD_REG_WORD(®->mailbox7);
3453 }
3454
3455 /* Release mailbox registers. */
3456
3457 WRT_REG_WORD(®->semaphore, 0);
3458 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3459
3460 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3461 mailbox[0]);
3462
3463 /* Handle asynchronous event */
3464 switch (mailbox[0]) {
3465 case MBA_SCSI_COMPLETION: /* Response completion */
3466 dprintk(5, "qla1280_isr: mailbox SCSI response "
3467 "completion\n");
3468
3469 if (ha->flags.online) {
3470 /* Get outstanding command index. */
3471 index = mailbox[2] << 16 | mailbox[1];
3472
3473 /* Validate handle. */
3474 if (index < MAX_OUTSTANDING_COMMANDS)
3475 sp = ha->outstanding_cmds[index];
3476 else
3477 sp = NULL;
3478
3479 if (sp) {
3480 /* Free outstanding command slot. */
3481 ha->outstanding_cmds[index] = NULL;
3482
3483 /* Save ISP completion status */
3484 CMD_RESULT(sp->cmd) = 0;
3485 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3486
3487 /* Place block on done queue */
3488 list_add_tail(&sp->list, done_q);
3489 } else {
3490 /*
3491 * If we get here we have a real problem!
3492 */
3493 printk(KERN_WARNING
3494 "qla1280: ISP invalid handle\n");
3495 }
3496 }
3497 break;
3498
3499 case MBA_BUS_RESET: /* SCSI Bus Reset */
3500 ha->flags.reset_marker = 1;
3501 index = mailbox[6] & BIT_0;
3502 ha->bus_settings[index].reset_marker = 1;
3503
3504 printk(KERN_DEBUG "qla1280_isr(): index %i "
3505 "asynchronous BUS_RESET\n", index);
3506 break;
3507
3508 case MBA_SYSTEM_ERR: /* System Error */
3509 printk(KERN_WARNING
3510 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3511 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3512 mailbox[3]);
3513 break;
3514
3515 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3516 printk(KERN_WARNING
3517 "qla1280: ISP Request Transfer Error\n");
3518 break;
3519
3520 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3521 printk(KERN_WARNING
3522 "qla1280: ISP Response Transfer Error\n");
3523 break;
3524
3525 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3526 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3527 break;
3528
3529 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3530 dprintk(2,
3531 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3532 break;
3533
3534 case MBA_DEVICE_RESET: /* Bus Device Reset */
3535 printk(KERN_INFO "qla1280_isr(): asynchronous "
3536 "BUS_DEVICE_RESET\n");
3537
3538 ha->flags.reset_marker = 1;
3539 index = mailbox[6] & BIT_0;
3540 ha->bus_settings[index].reset_marker = 1;
3541 break;
3542
3543 case MBA_BUS_MODE_CHANGE:
3544 dprintk(2,
3545 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3546 break;
3547
3548 default:
3549 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3550 if (mailbox[0] < MBA_ASYNC_EVENT) {
3551 wptr = &mailbox[0];
3552 memcpy((uint16_t *) ha->mailbox_out, wptr,
3553 MAILBOX_REGISTER_COUNT *
3554 sizeof(uint16_t));
3555
3556 if(ha->mailbox_wait != NULL)
3557 complete(ha->mailbox_wait);
3558 }
3559 break;
3560 }
3561 } else {
3562 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3563 }
3564
3565 /*
3566 * We will receive interrupts during mailbox testing prior to
3567 * the card being marked online, hence the double check.
3568 */
3569 if (!(ha->flags.online && !ha->mailbox_wait)) {
3570 dprintk(2, "qla1280_isr: Response pointer Error\n");
3571 goto out;
3572 }
3573
3574 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3575 goto out;
3576
3577 while (ha->rsp_ring_index != mailbox[5]) {
3578 pkt = ha->response_ring_ptr;
3579
3580 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3581 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3582 dprintk(5,"qla1280_isr: response packet data\n");
3583 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3584
3585 if (pkt->entry_type == STATUS_TYPE) {
3586 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3587 || pkt->comp_status || pkt->entry_status) {
3588 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3589 "0x%x mailbox[5] = 0x%x, comp_status "
3590 "= 0x%x, scsi_status = 0x%x\n",
3591 ha->rsp_ring_index, mailbox[5],
3592 le16_to_cpu(pkt->comp_status),
3593 le16_to_cpu(pkt->scsi_status));
3594 }
3595 } else {
3596 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3597 "0x%x, mailbox[5] = 0x%x\n",
3598 ha->rsp_ring_index, mailbox[5]);
3599 dprintk(2, "qla1280_isr: response packet data\n");
3600 qla1280_dump_buffer(2, (char *)pkt,
3601 RESPONSE_ENTRY_SIZE);
3602 }
3603
3604 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3605 dprintk(2, "status: Cmd %p, handle %i\n",
3606 ha->outstanding_cmds[pkt->handle]->cmd,
3607 pkt->handle);
3608 if (pkt->entry_type == STATUS_TYPE)
3609 qla1280_status_entry(ha, pkt, done_q);
3610 else
3611 qla1280_error_entry(ha, pkt, done_q);
3612 /* Adjust ring index. */
3613 ha->rsp_ring_index++;
3614 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3615 ha->rsp_ring_index = 0;
3616 ha->response_ring_ptr = ha->response_ring;
3617 } else
3618 ha->response_ring_ptr++;
3619 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3620 }
3621 }
3622
3623 out:
3624 LEAVE("qla1280_isr");
3625}
3626
3627/*
3628 * qla1280_rst_aen
3629 * Processes asynchronous reset.
3630 *
3631 * Input:
3632 * ha = adapter block pointer.
3633 */
3634static void
3635qla1280_rst_aen(struct scsi_qla_host *ha)
3636{
3637 uint8_t bus;
3638
3639 ENTER("qla1280_rst_aen");
3640
3641 if (ha->flags.online && !ha->flags.reset_active &&
3642 !ha->flags.abort_isp_active) {
3643 ha->flags.reset_active = 1;
3644 while (ha->flags.reset_marker) {
3645 /* Issue marker command. */
3646 ha->flags.reset_marker = 0;
3647 for (bus = 0; bus < ha->ports &&
3648 !ha->flags.reset_marker; bus++) {
3649 if (ha->bus_settings[bus].reset_marker) {
3650 ha->bus_settings[bus].reset_marker = 0;
3651 qla1280_marker(ha, bus, 0, 0,
3652 MK_SYNC_ALL);
3653 }
3654 }
3655 }
3656 }
3657
3658 LEAVE("qla1280_rst_aen");
3659}
3660
3661
3662/*
3663 * qla1280_status_entry
3664 * Processes received ISP status entry.
3665 *
3666 * Input:
3667 * ha = adapter block pointer.
3668 * pkt = entry pointer.
3669 * done_q = done queue.
3670 */
3671static void
3672qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3673 struct list_head *done_q)
3674{
3675 unsigned int bus, target, lun;
3676 int sense_sz;
3677 struct srb *sp;
3678 struct scsi_cmnd *cmd;
3679 uint32_t handle = le32_to_cpu(pkt->handle);
3680 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3681 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3682
3683 ENTER("qla1280_status_entry");
3684
3685 /* Validate handle. */
3686 if (handle < MAX_OUTSTANDING_COMMANDS)
3687 sp = ha->outstanding_cmds[handle];
3688 else
3689 sp = NULL;
3690
3691 if (!sp) {
3692 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3693 goto out;
3694 }
3695
3696 /* Free outstanding command slot. */
3697 ha->outstanding_cmds[handle] = NULL;
3698
3699 cmd = sp->cmd;
3700
3701 /* Generate LU queue on cntrl, target, LUN */
3702 bus = SCSI_BUS_32(cmd);
3703 target = SCSI_TCN_32(cmd);
3704 lun = SCSI_LUN_32(cmd);
3705
3706 if (comp_status || scsi_status) {
3707 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3708 "0x%x, handle = 0x%x\n", comp_status,
3709 scsi_status, handle);
3710 }
3711
3712 /* Target busy or queue full */
3713 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3714 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3715 CMD_RESULT(cmd) = scsi_status & 0xff;
3716 } else {
3717
3718 /* Save ISP completion status */
3719 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3720
3721 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3722 if (comp_status != CS_ARS_FAILED) {
3723 uint16_t req_sense_length =
3724 le16_to_cpu(pkt->req_sense_length);
3725 if (req_sense_length < CMD_SNSLEN(cmd))
3726 sense_sz = req_sense_length;
3727 else
3728 /*
3729 * scsi_cmnd->sense_buffer is
3730 * 64 bytes, why only copy 63?
3731 * This looks wrong! /Jes
3732 */
3733 sense_sz = CMD_SNSLEN(cmd) - 1;
3734
3735 memcpy(cmd->sense_buffer,
3736 &pkt->req_sense_data, sense_sz);
3737 } else
3738 sense_sz = 0;
3739 memset(cmd->sense_buffer + sense_sz, 0,
3740 SCSI_SENSE_BUFFERSIZE - sense_sz);
3741
3742 dprintk(2, "qla1280_status_entry: Check "
3743 "condition Sense data, b %i, t %i, "
3744 "l %i\n", bus, target, lun);
3745 if (sense_sz)
3746 qla1280_dump_buffer(2,
3747 (char *)cmd->sense_buffer,
3748 sense_sz);
3749 }
3750 }
3751
3752 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3753
3754 /* Place command on done queue. */
3755 list_add_tail(&sp->list, done_q);
3756 out:
3757 LEAVE("qla1280_status_entry");
3758}
3759
3760/*
3761 * qla1280_error_entry
3762 * Processes error entry.
3763 *
3764 * Input:
3765 * ha = adapter block pointer.
3766 * pkt = entry pointer.
3767 * done_q = done queue.
3768 */
3769static void
3770qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3771 struct list_head *done_q)
3772{
3773 struct srb *sp;
3774 uint32_t handle = le32_to_cpu(pkt->handle);
3775
3776 ENTER("qla1280_error_entry");
3777
3778 if (pkt->entry_status & BIT_3)
3779 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3780 else if (pkt->entry_status & BIT_2)
3781 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3782 else if (pkt->entry_status & BIT_1)
3783 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3784 else
3785 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3786
3787 /* Validate handle. */
3788 if (handle < MAX_OUTSTANDING_COMMANDS)
3789 sp = ha->outstanding_cmds[handle];
3790 else
3791 sp = NULL;
3792
3793 if (sp) {
3794 /* Free outstanding command slot. */
3795 ha->outstanding_cmds[handle] = NULL;
3796
3797 /* Bad payload or header */
3798 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3799 /* Bad payload or header, set error status. */
3800 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3801 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3802 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3803 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3804 } else {
3805 /* Set error status. */
3806 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3807 }
3808
3809 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3810
3811 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q);
3813 }
3814#ifdef QLA_64BIT_PTR
3815 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3816 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3817 }
3818#endif
3819
3820 LEAVE("qla1280_error_entry");
3821}
3822
3823/*
3824 * qla1280_abort_isp
3825 * Resets ISP and aborts all outstanding commands.
3826 *
3827 * Input:
3828 * ha = adapter block pointer.
3829 *
3830 * Returns:
3831 * 0 = success
3832 */
3833static int
3834qla1280_abort_isp(struct scsi_qla_host *ha)
3835{
3836 struct device_reg __iomem *reg = ha->iobase;
3837 struct srb *sp;
3838 int status = 0;
3839 int cnt;
3840 int bus;
3841
3842 ENTER("qla1280_abort_isp");
3843
3844 if (ha->flags.abort_isp_active || !ha->flags.online)
3845 goto out;
3846
3847 ha->flags.abort_isp_active = 1;
3848
3849 /* Disable ISP interrupts. */
3850 qla1280_disable_intrs(ha);
3851 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3852 RD_REG_WORD(®->id_l);
3853
3854 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3855 ha->host_no);
3856 /* Dequeue all commands in outstanding command list. */
3857 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3858 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt];
3860 if (sp) {
3861 cmd = sp->cmd;
3862 CMD_RESULT(cmd) = DID_RESET << 16;
3863 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3864 ha->outstanding_cmds[cnt] = NULL;
3865 list_add_tail(&sp->list, &ha->done_q);
3866 }
3867 }
3868
3869 qla1280_done(ha);
3870
3871 status = qla1280_load_firmware(ha);
3872 if (status)
3873 goto out;
3874
3875 /* Setup adapter based on NVRAM parameters. */
3876 qla1280_nvram_config (ha);
3877
3878 status = qla1280_init_rings(ha);
3879 if (status)
3880 goto out;
3881
3882 /* Issue SCSI reset. */
3883 for (bus = 0; bus < ha->ports; bus++)
3884 qla1280_bus_reset(ha, bus);
3885
3886 ha->flags.abort_isp_active = 0;
3887 out:
3888 if (status) {
3889 printk(KERN_WARNING
3890 "qla1280: ISP error recovery failed, board disabled");
3891 qla1280_reset_adapter(ha);
3892 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3893 }
3894
3895 LEAVE("qla1280_abort_isp");
3896 return status;
3897}
3898
3899
3900/*
3901 * qla1280_debounce_register
3902 * Debounce register.
3903 *
3904 * Input:
3905 * port = register address.
3906 *
3907 * Returns:
3908 * register value.
3909 */
3910static u16
3911qla1280_debounce_register(volatile u16 __iomem * addr)
3912{
3913 volatile u16 ret;
3914 volatile u16 ret2;
3915
3916 ret = RD_REG_WORD(addr);
3917 ret2 = RD_REG_WORD(addr);
3918
3919 if (ret == ret2)
3920 return ret;
3921
3922 do {
3923 cpu_relax();
3924 ret = RD_REG_WORD(addr);
3925 ret2 = RD_REG_WORD(addr);
3926 } while (ret != ret2);
3927
3928 return ret;
3929}
3930
3931
3932/************************************************************************
3933 * qla1280_check_for_dead_scsi_bus *
3934 * *
3935 * This routine checks for a dead SCSI bus *
3936 ************************************************************************/
3937#define SET_SXP_BANK 0x0100
3938#define SCSI_PHASE_INVALID 0x87FF
3939static int
3940qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3941{
3942 uint16_t config_reg, scsi_control;
3943 struct device_reg __iomem *reg = ha->iobase;
3944
3945 if (ha->bus_settings[bus].scsi_bus_dead) {
3946 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3947 config_reg = RD_REG_WORD(®->cfg_1);
3948 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3949 scsi_control = RD_REG_WORD(®->scsiControlPins);
3950 WRT_REG_WORD(®->cfg_1, config_reg);
3951 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3952
3953 if (scsi_control == SCSI_PHASE_INVALID) {
3954 ha->bus_settings[bus].scsi_bus_dead = 1;
3955 return 1; /* bus is dead */
3956 } else {
3957 ha->bus_settings[bus].scsi_bus_dead = 0;
3958 ha->bus_settings[bus].failed_reset_count = 0;
3959 }
3960 }
3961 return 0; /* bus is not dead */
3962}
3963
3964static void
3965qla1280_get_target_parameters(struct scsi_qla_host *ha,
3966 struct scsi_device *device)
3967{
3968 uint16_t mb[MAILBOX_REGISTER_COUNT];
3969 int bus, target, lun;
3970
3971 bus = device->channel;
3972 target = device->id;
3973 lun = device->lun;
3974
3975
3976 mb[0] = MBC_GET_TARGET_PARAMETERS;
3977 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3978 mb[1] <<= 8;
3979 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3980 &mb[0]);
3981
3982 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3983
3984 if (mb[3] != 0) {
3985 printk(" Sync: period %d, offset %d",
3986 (mb[3] & 0xff), (mb[3] >> 8));
3987 if (mb[2] & BIT_13)
3988 printk(", Wide");
3989 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3990 printk(", DT");
3991 } else
3992 printk(" Async");
3993
3994 if (device->simple_tags)
3995 printk(", Tagged queuing: depth %d", device->queue_depth);
3996 printk("\n");
3997}
3998
3999
4000#if DEBUG_QLA1280
4001static void
4002__qla1280_dump_buffer(char *b, int size)
4003{
4004 int cnt;
4005 u8 c;
4006
4007 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4008 "Bh Ch Dh Eh Fh\n");
4009 printk(KERN_DEBUG "---------------------------------------------"
4010 "------------------\n");
4011
4012 for (cnt = 0; cnt < size;) {
4013 c = *b++;
4014
4015 printk("0x%02x", c);
4016 cnt++;
4017 if (!(cnt % 16))
4018 printk("\n");
4019 else
4020 printk(" ");
4021 }
4022 if (cnt % 16)
4023 printk("\n");
4024}
4025
4026/**************************************************************************
4027 * ql1280_print_scsi_cmd
4028 *
4029 **************************************************************************/
4030static void
4031__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4032{
4033 struct scsi_qla_host *ha;
4034 struct Scsi_Host *host = CMD_HOST(cmd);
4035 struct srb *sp;
4036 /* struct scatterlist *sg; */
4037
4038 int i;
4039 ha = (struct scsi_qla_host *)host->hostdata;
4040
4041 sp = (struct srb *)CMD_SP(cmd);
4042 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4043 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4044 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4045 CMD_CDBLEN(cmd));
4046 printk(" CDB = ");
4047 for (i = 0; i < cmd->cmd_len; i++) {
4048 printk("0x%02x ", cmd->cmnd[i]);
4049 }
4050 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4051 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4052 scsi_sglist(cmd), scsi_bufflen(cmd));
4053 /* if (cmd->use_sg)
4054 {
4055 sg = (struct scatterlist *) cmd->request_buffer;
4056 printk(" SG buffer: \n");
4057 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4058 } */
4059 printk(" tag=%d, transfersize=0x%x \n",
4060 cmd->tag, cmd->transfersize);
4061 printk(" SP=0x%p\n", CMD_SP(cmd));
4062 printk(" underflow size = 0x%x, direction=0x%x\n",
4063 cmd->underflow, cmd->sc_data_direction);
4064}
4065
4066/**************************************************************************
4067 * ql1280_dump_device
4068 *
4069 **************************************************************************/
4070static void
4071ql1280_dump_device(struct scsi_qla_host *ha)
4072{
4073
4074 struct scsi_cmnd *cp;
4075 struct srb *sp;
4076 int i;
4077
4078 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4079
4080 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4081 if ((sp = ha->outstanding_cmds[i]) == NULL)
4082 continue;
4083 if ((cp = sp->cmd) == NULL)
4084 continue;
4085 qla1280_print_scsi_cmd(1, cp);
4086 }
4087}
4088#endif
4089
4090
4091enum tokens {
4092 TOKEN_NVRAM,
4093 TOKEN_SYNC,
4094 TOKEN_WIDE,
4095 TOKEN_PPR,
4096 TOKEN_VERBOSE,
4097 TOKEN_DEBUG,
4098};
4099
4100struct setup_tokens {
4101 char *token;
4102 int val;
4103};
4104
4105static struct setup_tokens setup_token[] __initdata =
4106{
4107 { "nvram", TOKEN_NVRAM },
4108 { "sync", TOKEN_SYNC },
4109 { "wide", TOKEN_WIDE },
4110 { "ppr", TOKEN_PPR },
4111 { "verbose", TOKEN_VERBOSE },
4112 { "debug", TOKEN_DEBUG },
4113};
4114
4115
4116/**************************************************************************
4117 * qla1280_setup
4118 *
4119 * Handle boot parameters. This really needs to be changed so one
4120 * can specify per adapter parameters.
4121 **************************************************************************/
4122static int __init
4123qla1280_setup(char *s)
4124{
4125 char *cp, *ptr;
4126 unsigned long val;
4127 int toke;
4128
4129 cp = s;
4130
4131 while (cp && (ptr = strchr(cp, ':'))) {
4132 ptr++;
4133 if (!strcmp(ptr, "yes")) {
4134 val = 0x10000;
4135 ptr += 3;
4136 } else if (!strcmp(ptr, "no")) {
4137 val = 0;
4138 ptr += 2;
4139 } else
4140 val = simple_strtoul(ptr, &ptr, 0);
4141
4142 switch ((toke = qla1280_get_token(cp))) {
4143 case TOKEN_NVRAM:
4144 if (!val)
4145 driver_setup.no_nvram = 1;
4146 break;
4147 case TOKEN_SYNC:
4148 if (!val)
4149 driver_setup.no_sync = 1;
4150 else if (val != 0x10000)
4151 driver_setup.sync_mask = val;
4152 break;
4153 case TOKEN_WIDE:
4154 if (!val)
4155 driver_setup.no_wide = 1;
4156 else if (val != 0x10000)
4157 driver_setup.wide_mask = val;
4158 break;
4159 case TOKEN_PPR:
4160 if (!val)
4161 driver_setup.no_ppr = 1;
4162 else if (val != 0x10000)
4163 driver_setup.ppr_mask = val;
4164 break;
4165 case TOKEN_VERBOSE:
4166 qla1280_verbose = val;
4167 break;
4168 default:
4169 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4170 cp);
4171 }
4172
4173 cp = strchr(ptr, ';');
4174 if (cp)
4175 cp++;
4176 else {
4177 break;
4178 }
4179 }
4180 return 1;
4181}
4182
4183
4184static int __init
4185qla1280_get_token(char *str)
4186{
4187 char *sep;
4188 long ret = -1;
4189 int i;
4190
4191 sep = strchr(str, ':');
4192
4193 if (sep) {
4194 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4195 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4196 ret = setup_token[i].val;
4197 break;
4198 }
4199 }
4200 }
4201
4202 return ret;
4203}
4204
4205
4206static struct scsi_host_template qla1280_driver_template = {
4207 .module = THIS_MODULE,
4208 .proc_name = "qla1280",
4209 .name = "Qlogic ISP 1280/12160",
4210 .info = qla1280_info,
4211 .slave_configure = qla1280_slave_configure,
4212 .queuecommand = qla1280_queuecommand,
4213 .eh_abort_handler = qla1280_eh_abort,
4214 .eh_device_reset_handler= qla1280_eh_device_reset,
4215 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4216 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4217 .bios_param = qla1280_biosparam,
4218 .can_queue = 0xfffff,
4219 .this_id = -1,
4220 .sg_tablesize = SG_ALL,
4221 .cmd_per_lun = 1,
4222 .use_clustering = ENABLE_CLUSTERING,
4223};
4224
4225
4226static int
4227qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4228{
4229 int devnum = id->driver_data;
4230 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4231 struct Scsi_Host *host;
4232 struct scsi_qla_host *ha;
4233 int error = -ENODEV;
4234
4235 /* Bypass all AMI SUBSYS VENDOR IDs */
4236 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4237 printk(KERN_INFO
4238 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4239 goto error;
4240 }
4241
4242 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4243 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4244
4245 if (pci_enable_device(pdev)) {
4246 printk(KERN_WARNING
4247 "qla1280: Failed to enabled pci device, aborting.\n");
4248 goto error;
4249 }
4250
4251 pci_set_master(pdev);
4252
4253 error = -ENOMEM;
4254 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4255 if (!host) {
4256 printk(KERN_WARNING
4257 "qla1280: Failed to register host, aborting.\n");
4258 goto error_disable_device;
4259 }
4260
4261 ha = (struct scsi_qla_host *)host->hostdata;
4262 memset(ha, 0, sizeof(struct scsi_qla_host));
4263
4264 ha->pdev = pdev;
4265 ha->devnum = devnum; /* specifies microcode load address */
4266
4267#ifdef QLA_64BIT_PTR
4268 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4269 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4270 printk(KERN_WARNING "scsi(%li): Unable to set a "
4271 "suitable DMA mask - aborting\n", ha->host_no);
4272 error = -ENODEV;
4273 goto error_put_host;
4274 }
4275 } else
4276 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4277 ha->host_no);
4278#else
4279 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4280 printk(KERN_WARNING "scsi(%li): Unable to set a "
4281 "suitable DMA mask - aborting\n", ha->host_no);
4282 error = -ENODEV;
4283 goto error_put_host;
4284 }
4285#endif
4286
4287 ha->request_ring = pci_alloc_consistent(ha->pdev,
4288 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4289 &ha->request_dma);
4290 if (!ha->request_ring) {
4291 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4292 goto error_put_host;
4293 }
4294
4295 ha->response_ring = pci_alloc_consistent(ha->pdev,
4296 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4297 &ha->response_dma);
4298 if (!ha->response_ring) {
4299 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4300 goto error_free_request_ring;
4301 }
4302
4303 ha->ports = bdp->numPorts;
4304
4305 ha->host = host;
4306 ha->host_no = host->host_no;
4307
4308 host->irq = pdev->irq;
4309 host->max_channel = bdp->numPorts - 1;
4310 host->max_lun = MAX_LUNS - 1;
4311 host->max_id = MAX_TARGETS;
4312 host->max_sectors = 1024;
4313 host->unique_id = host->host_no;
4314
4315 error = -ENODEV;
4316
4317#if MEMORY_MAPPED_IO
4318 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4319 if (!ha->mmpbase) {
4320 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4321 goto error_free_response_ring;
4322 }
4323
4324 host->base = (unsigned long)ha->mmpbase;
4325 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4326#else
4327 host->io_port = pci_resource_start(ha->pdev, 0);
4328 if (!request_region(host->io_port, 0xff, "qla1280")) {
4329 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4330 "0x%04lx-0x%04lx - already in use\n",
4331 host->io_port, host->io_port + 0xff);
4332 goto error_free_response_ring;
4333 }
4334
4335 ha->iobase = (struct device_reg *)host->io_port;
4336#endif
4337
4338 INIT_LIST_HEAD(&ha->done_q);
4339
4340 /* Disable ISP interrupts. */
4341 qla1280_disable_intrs(ha);
4342
4343 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4344 "qla1280", ha)) {
4345 printk("qla1280 : Failed to reserve interrupt %d already "
4346 "in use\n", pdev->irq);
4347 goto error_release_region;
4348 }
4349
4350 /* load the F/W, read paramaters, and init the H/W */
4351 if (qla1280_initialize_adapter(ha)) {
4352 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4353 goto error_free_irq;
4354 }
4355
4356 /* set our host ID (need to do something about our two IDs) */
4357 host->this_id = ha->bus_settings[0].id;
4358
4359 pci_set_drvdata(pdev, host);
4360
4361 error = scsi_add_host(host, &pdev->dev);
4362 if (error)
4363 goto error_disable_adapter;
4364 scsi_scan_host(host);
4365
4366 return 0;
4367
4368 error_disable_adapter:
4369 qla1280_disable_intrs(ha);
4370 error_free_irq:
4371 free_irq(pdev->irq, ha);
4372 error_release_region:
4373#if MEMORY_MAPPED_IO
4374 iounmap(ha->mmpbase);
4375#else
4376 release_region(host->io_port, 0xff);
4377#endif
4378 error_free_response_ring:
4379 pci_free_consistent(ha->pdev,
4380 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4381 ha->response_ring, ha->response_dma);
4382 error_free_request_ring:
4383 pci_free_consistent(ha->pdev,
4384 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4385 ha->request_ring, ha->request_dma);
4386 error_put_host:
4387 scsi_host_put(host);
4388 error_disable_device:
4389 pci_disable_device(pdev);
4390 error:
4391 return error;
4392}
4393
4394
4395static void
4396qla1280_remove_one(struct pci_dev *pdev)
4397{
4398 struct Scsi_Host *host = pci_get_drvdata(pdev);
4399 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4400
4401 scsi_remove_host(host);
4402
4403 qla1280_disable_intrs(ha);
4404
4405 free_irq(pdev->irq, ha);
4406
4407#if MEMORY_MAPPED_IO
4408 iounmap(ha->mmpbase);
4409#else
4410 release_region(host->io_port, 0xff);
4411#endif
4412
4413 pci_free_consistent(ha->pdev,
4414 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4415 ha->request_ring, ha->request_dma);
4416 pci_free_consistent(ha->pdev,
4417 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4418 ha->response_ring, ha->response_dma);
4419
4420 pci_disable_device(pdev);
4421
4422 scsi_host_put(host);
4423}
4424
4425static struct pci_driver qla1280_pci_driver = {
4426 .name = "qla1280",
4427 .id_table = qla1280_pci_tbl,
4428 .probe = qla1280_probe_one,
4429 .remove = qla1280_remove_one,
4430};
4431
4432static int __init
4433qla1280_init(void)
4434{
4435 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4436 printk(KERN_WARNING
4437 "qla1280: struct srb too big, aborting\n");
4438 return -EINVAL;
4439 }
4440
4441#ifdef MODULE
4442 /*
4443 * If we are called as a module, the qla1280 pointer may not be null
4444 * and it would point to our bootup string, just like on the lilo
4445 * command line. IF not NULL, then process this config string with
4446 * qla1280_setup
4447 *
4448 * Boot time Options
4449 * To add options at boot time add a line to your lilo.conf file like:
4450 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4451 * which will result in the first four devices on the first two
4452 * controllers being set to a tagged queue depth of 32.
4453 */
4454 if (qla1280)
4455 qla1280_setup(qla1280);
4456#endif
4457
4458 return pci_register_driver(&qla1280_pci_driver);
4459}
4460
4461static void __exit
4462qla1280_exit(void)
4463{
4464 int i;
4465
4466 pci_unregister_driver(&qla1280_pci_driver);
4467 /* release any allocated firmware images */
4468 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4469 release_firmware(qla1280_fw_tbl[i].fw);
4470 qla1280_fw_tbl[i].fw = NULL;
4471 }
4472}
4473
4474module_init(qla1280_init);
4475module_exit(qla1280_exit);
4476
4477MODULE_AUTHOR("Qlogic & Jes Sorensen");
4478MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4479MODULE_LICENSE("GPL");
4480MODULE_FIRMWARE("qlogic/1040.bin");
4481MODULE_FIRMWARE("qlogic/1280.bin");
4482MODULE_FIRMWARE("qlogic/12160.bin");
4483MODULE_VERSION(QLA1280_VERSION);
4484
4485/*
4486 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4487 * Emacs will notice this stuff at the end of the file and automatically
4488 * adjust the settings for this buffer only. This must remain at the end
4489 * of the file.
4490 * ---------------------------------------------------------------------------
4491 * Local variables:
4492 * c-basic-offset: 8
4493 * tab-width: 8
4494 * End:
4495 */