Linux Audio

Check our new training course

Loading...
v6.8
    1/*
    2 *    Disk Array driver for HP Smart Array SAS controllers
    3 *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
    4 *    Copyright 2016 Microsemi Corporation
    5 *    Copyright 2014-2015 PMC-Sierra, Inc.
    6 *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
    7 *
    8 *    This program is free software; you can redistribute it and/or modify
    9 *    it under the terms of the GNU General Public License as published by
   10 *    the Free Software Foundation; version 2 of the License.
   11 *
   12 *    This program is distributed in the hope that it will be useful,
   13 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
   14 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
   15 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
   16 *
   17 *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
 
 
 
 
   18 *
   19 */
   20
   21#include <linux/module.h>
   22#include <linux/interrupt.h>
   23#include <linux/types.h>
   24#include <linux/pci.h>
   25#include <linux/kernel.h>
   26#include <linux/slab.h>
   27#include <linux/delay.h>
   28#include <linux/fs.h>
   29#include <linux/timer.h>
 
   30#include <linux/init.h>
   31#include <linux/spinlock.h>
   32#include <linux/compat.h>
   33#include <linux/blktrace_api.h>
   34#include <linux/uaccess.h>
   35#include <linux/io.h>
   36#include <linux/dma-mapping.h>
   37#include <linux/completion.h>
   38#include <linux/moduleparam.h>
   39#include <scsi/scsi.h>
   40#include <scsi/scsi_cmnd.h>
   41#include <scsi/scsi_device.h>
   42#include <scsi/scsi_host.h>
   43#include <scsi/scsi_tcq.h>
   44#include <scsi/scsi_eh.h>
   45#include <scsi/scsi_transport_sas.h>
   46#include <scsi/scsi_dbg.h>
   47#include <linux/cciss_ioctl.h>
   48#include <linux/string.h>
   49#include <linux/bitmap.h>
   50#include <linux/atomic.h>
   51#include <linux/jiffies.h>
   52#include <linux/percpu-defs.h>
   53#include <linux/percpu.h>
   54#include <asm/unaligned.h>
   55#include <asm/div64.h>
   56#include "hpsa_cmd.h"
   57#include "hpsa.h"
   58
   59/*
   60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
   61 * with an optional trailing '-' followed by a byte value (0-255).
   62 */
   63#define HPSA_DRIVER_VERSION "3.4.20-200"
   64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
   65#define HPSA "hpsa"
   66
   67/* How long to wait for CISS doorbell communication */
   68#define CLEAR_EVENT_WAIT_INTERVAL 20	/* ms for each msleep() call */
   69#define MODE_CHANGE_WAIT_INTERVAL 10	/* ms for each msleep() call */
   70#define MAX_CLEAR_EVENT_WAIT 30000	/* times 20 ms = 600 s */
   71#define MAX_MODE_CHANGE_WAIT 2000	/* times 10 ms = 20 s */
   72#define MAX_IOCTL_CONFIG_WAIT 1000
   73
   74/*define how many times we will try a command because of bus resets */
   75#define MAX_CMD_RETRIES 3
   76/* How long to wait before giving up on a command */
   77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
   78
   79/* Embedded module documentation macros - see modules.h */
   80MODULE_AUTHOR("Hewlett-Packard Company");
   81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
   82	HPSA_DRIVER_VERSION);
 
   83MODULE_VERSION(HPSA_DRIVER_VERSION);
   84MODULE_LICENSE("GPL");
   85MODULE_ALIAS("cciss");
   86
 
 
 
 
   87static int hpsa_simple_mode;
   88module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
   89MODULE_PARM_DESC(hpsa_simple_mode,
   90	"Use 'simple mode' rather than 'performant mode'");
   91
   92/* define the PCI info for the cards we can control */
   93static const struct pci_device_id hpsa_pci_device_id[] = {
   94	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
   95	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
   96	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
   97	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
   98	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
   99	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
  100	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
  101	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
  102	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
  103	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
  104	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
  105	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
  106	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
  107	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
  108	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
  109	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103c, 0x1920},
  110	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
  111	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
  112	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
  113	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
  114	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103c, 0x1925},
  115	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
  116	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
  117	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
  118	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
  119	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
  120	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
  121	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
  122	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
  123	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
  124	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
  125	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
  126	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
  127	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
  128	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
  129	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
  130	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
  131	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
  132	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
  133	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
  134	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
  135	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
  136	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
  137	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
  138	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
  139	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
  140	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
  141	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
  142	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
  143	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
  144	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
  145	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
  146	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
  147	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
  148		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
  149	{PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
  150		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
  151	{0,}
  152};
  153
  154MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
  155
  156/*  board_id = Subsystem Device ID & Vendor ID
  157 *  product = Marketing Name for the board
  158 *  access = Address of the struct of function pointers
  159 */
  160static struct board_type products[] = {
  161	{0x40700E11, "Smart Array 5300", &SA5A_access},
  162	{0x40800E11, "Smart Array 5i", &SA5B_access},
  163	{0x40820E11, "Smart Array 532", &SA5B_access},
  164	{0x40830E11, "Smart Array 5312", &SA5B_access},
  165	{0x409A0E11, "Smart Array 641", &SA5A_access},
  166	{0x409B0E11, "Smart Array 642", &SA5A_access},
  167	{0x409C0E11, "Smart Array 6400", &SA5A_access},
  168	{0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
  169	{0x40910E11, "Smart Array 6i", &SA5A_access},
  170	{0x3225103C, "Smart Array P600", &SA5A_access},
  171	{0x3223103C, "Smart Array P800", &SA5A_access},
  172	{0x3234103C, "Smart Array P400", &SA5A_access},
  173	{0x3235103C, "Smart Array P400i", &SA5A_access},
  174	{0x3211103C, "Smart Array E200i", &SA5A_access},
  175	{0x3212103C, "Smart Array E200", &SA5A_access},
  176	{0x3213103C, "Smart Array E200i", &SA5A_access},
  177	{0x3214103C, "Smart Array E200i", &SA5A_access},
  178	{0x3215103C, "Smart Array E200i", &SA5A_access},
  179	{0x3237103C, "Smart Array E500", &SA5A_access},
  180	{0x323D103C, "Smart Array P700m", &SA5A_access},
  181	{0x3241103C, "Smart Array P212", &SA5_access},
  182	{0x3243103C, "Smart Array P410", &SA5_access},
  183	{0x3245103C, "Smart Array P410i", &SA5_access},
  184	{0x3247103C, "Smart Array P411", &SA5_access},
  185	{0x3249103C, "Smart Array P812", &SA5_access},
  186	{0x324A103C, "Smart Array P712m", &SA5_access},
  187	{0x324B103C, "Smart Array P711m", &SA5_access},
  188	{0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
  189	{0x3350103C, "Smart Array P222", &SA5_access},
  190	{0x3351103C, "Smart Array P420", &SA5_access},
  191	{0x3352103C, "Smart Array P421", &SA5_access},
  192	{0x3353103C, "Smart Array P822", &SA5_access},
  193	{0x3354103C, "Smart Array P420i", &SA5_access},
  194	{0x3355103C, "Smart Array P220i", &SA5_access},
  195	{0x3356103C, "Smart Array P721m", &SA5_access},
  196	{0x1920103C, "Smart Array P430i", &SA5_access},
  197	{0x1921103C, "Smart Array P830i", &SA5_access},
  198	{0x1922103C, "Smart Array P430", &SA5_access},
  199	{0x1923103C, "Smart Array P431", &SA5_access},
  200	{0x1924103C, "Smart Array P830", &SA5_access},
  201	{0x1925103C, "Smart Array P831", &SA5_access},
  202	{0x1926103C, "Smart Array P731m", &SA5_access},
  203	{0x1928103C, "Smart Array P230i", &SA5_access},
  204	{0x1929103C, "Smart Array P530", &SA5_access},
  205	{0x21BD103C, "Smart Array P244br", &SA5_access},
  206	{0x21BE103C, "Smart Array P741m", &SA5_access},
  207	{0x21BF103C, "Smart HBA H240ar", &SA5_access},
  208	{0x21C0103C, "Smart Array P440ar", &SA5_access},
  209	{0x21C1103C, "Smart Array P840ar", &SA5_access},
  210	{0x21C2103C, "Smart Array P440", &SA5_access},
  211	{0x21C3103C, "Smart Array P441", &SA5_access},
  212	{0x21C4103C, "Smart Array", &SA5_access},
  213	{0x21C5103C, "Smart Array P841", &SA5_access},
  214	{0x21C6103C, "Smart HBA H244br", &SA5_access},
  215	{0x21C7103C, "Smart HBA H240", &SA5_access},
  216	{0x21C8103C, "Smart HBA H241", &SA5_access},
  217	{0x21C9103C, "Smart Array", &SA5_access},
  218	{0x21CA103C, "Smart Array P246br", &SA5_access},
  219	{0x21CB103C, "Smart Array P840", &SA5_access},
  220	{0x21CC103C, "Smart Array", &SA5_access},
  221	{0x21CD103C, "Smart Array", &SA5_access},
  222	{0x21CE103C, "Smart HBA", &SA5_access},
  223	{0x05809005, "SmartHBA-SA", &SA5_access},
  224	{0x05819005, "SmartHBA-SA 8i", &SA5_access},
  225	{0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
  226	{0x05839005, "SmartHBA-SA 8e", &SA5_access},
  227	{0x05849005, "SmartHBA-SA 16i", &SA5_access},
  228	{0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
  229	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
  230	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
  231	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
  232	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
  233	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
  234	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
  235};
  236
  237static struct scsi_transport_template *hpsa_sas_transport_template;
  238static int hpsa_add_sas_host(struct ctlr_info *h);
  239static void hpsa_delete_sas_host(struct ctlr_info *h);
  240static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
  241			struct hpsa_scsi_dev_t *device);
  242static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
  243static struct hpsa_scsi_dev_t
  244	*hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
  245		struct sas_rphy *rphy);
  246
  247#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
  248static const struct scsi_cmnd hpsa_cmd_busy;
  249#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
  250static const struct scsi_cmnd hpsa_cmd_idle;
  251static int number_of_controllers;
  252
  253static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
  254static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
  255static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
  256		      void __user *arg);
  257static int hpsa_passthru_ioctl(struct ctlr_info *h,
  258			       IOCTL_Command_struct *iocommand);
  259static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
  260				   BIG_IOCTL_Command_struct *ioc);
  261
  262#ifdef CONFIG_COMPAT
  263static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
  264	void __user *arg);
  265#endif
  266
  267static void cmd_free(struct ctlr_info *h, struct CommandList *c);
 
  268static struct CommandList *cmd_alloc(struct ctlr_info *h);
  269static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
  270static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
  271					    struct scsi_cmnd *scmd);
  272static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
  273	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
  274	int cmd_type);
  275static void hpsa_free_cmd_pool(struct ctlr_info *h);
  276#define VPD_PAGE (1 << 8)
  277#define HPSA_SIMPLE_ERROR_BITS 0x03
  278
  279static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  280static void hpsa_scan_start(struct Scsi_Host *);
  281static int hpsa_scan_finished(struct Scsi_Host *sh,
  282	unsigned long elapsed_time);
  283static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
 
  284
  285static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
  286static int hpsa_slave_alloc(struct scsi_device *sdev);
  287static int hpsa_slave_configure(struct scsi_device *sdev);
  288static void hpsa_slave_destroy(struct scsi_device *sdev);
  289
  290static void hpsa_update_scsi_devices(struct ctlr_info *h);
  291static int check_for_unit_attention(struct ctlr_info *h,
  292	struct CommandList *c);
  293static void check_ioctl_unit_attention(struct ctlr_info *h,
  294	struct CommandList *c);
  295/* performant mode helper functions */
  296static void calc_bucket_map(int *bucket, int num_buckets,
  297	int nsgs, int min_blocks, u32 *bucket_map);
  298static void hpsa_free_performant_mode(struct ctlr_info *h);
  299static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
  300static inline u32 next_command(struct ctlr_info *h, u8 q);
  301static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
  302			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
  303			       u64 *cfg_offset);
  304static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
  305				    unsigned long *memory_bar);
  306static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
  307				bool *legacy_board);
  308static int wait_for_device_to_become_ready(struct ctlr_info *h,
  309					   unsigned char lunaddr[],
  310					   int reply_queue);
  311static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
  312				     int wait_for_ready);
  313static inline void finish_cmd(struct CommandList *c);
  314static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
  315#define BOARD_NOT_READY 0
  316#define BOARD_READY 1
  317static void hpsa_drain_accel_commands(struct ctlr_info *h);
  318static void hpsa_flush_cache(struct ctlr_info *h);
  319static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
  320	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
  321	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
  322static void hpsa_command_resubmit_worker(struct work_struct *work);
  323static u32 lockup_detected(struct ctlr_info *h);
  324static int detect_controller_lockup(struct ctlr_info *h);
  325static void hpsa_disable_rld_caching(struct ctlr_info *h);
  326static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
  327	struct ReportExtendedLUNdata *buf, int bufsize);
  328static bool hpsa_vpd_page_supported(struct ctlr_info *h,
  329	unsigned char scsi3addr[], u8 page);
  330static int hpsa_luns_changed(struct ctlr_info *h);
  331static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
  332			       struct hpsa_scsi_dev_t *dev,
  333			       unsigned char *scsi3addr);
  334
  335static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
  336{
  337	unsigned long *priv = shost_priv(sdev->host);
  338	return (struct ctlr_info *) *priv;
  339}
  340
  341static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
  342{
  343	unsigned long *priv = shost_priv(sh);
  344	return (struct ctlr_info *) *priv;
  345}
  346
  347static inline bool hpsa_is_cmd_idle(struct CommandList *c)
  348{
  349	return c->scsi_cmd == SCSI_CMD_IDLE;
  350}
  351
  352/* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
  353static void decode_sense_data(const u8 *sense_data, int sense_data_len,
  354			u8 *sense_key, u8 *asc, u8 *ascq)
  355{
  356	struct scsi_sense_hdr sshdr;
  357	bool rc;
  358
  359	*sense_key = -1;
  360	*asc = -1;
  361	*ascq = -1;
  362
  363	if (sense_data_len < 1)
  364		return;
  365
  366	rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
  367	if (rc) {
  368		*sense_key = sshdr.sense_key;
  369		*asc = sshdr.asc;
  370		*ascq = sshdr.ascq;
  371	}
  372}
  373
  374static int check_for_unit_attention(struct ctlr_info *h,
  375	struct CommandList *c)
  376{
  377	u8 sense_key, asc, ascq;
  378	int sense_len;
  379
  380	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
  381		sense_len = sizeof(c->err_info->SenseInfo);
  382	else
  383		sense_len = c->err_info->SenseLen;
  384
  385	decode_sense_data(c->err_info->SenseInfo, sense_len,
  386				&sense_key, &asc, &ascq);
  387	if (sense_key != UNIT_ATTENTION || asc == 0xff)
  388		return 0;
  389
  390	switch (asc) {
  391	case STATE_CHANGED:
  392		dev_warn(&h->pdev->dev,
  393			"%s: a state change detected, command retried\n",
  394			h->devname);
  395		break;
  396	case LUN_FAILED:
  397		dev_warn(&h->pdev->dev,
  398			"%s: LUN failure detected\n", h->devname);
  399		break;
  400	case REPORT_LUNS_CHANGED:
  401		dev_warn(&h->pdev->dev,
  402			"%s: report LUN data changed\n", h->devname);
  403	/*
  404	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
  405	 * target (array) devices.
  406	 */
  407		break;
  408	case POWER_OR_RESET:
  409		dev_warn(&h->pdev->dev,
  410			"%s: a power on or device reset detected\n",
  411			h->devname);
  412		break;
  413	case UNIT_ATTENTION_CLEARED:
  414		dev_warn(&h->pdev->dev,
  415			"%s: unit attention cleared by another initiator\n",
  416			h->devname);
  417		break;
  418	default:
  419		dev_warn(&h->pdev->dev,
  420			"%s: unknown unit attention detected\n",
  421			h->devname);
  422		break;
  423	}
  424	return 1;
  425}
  426
  427static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
  428{
  429	if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
  430		(c->err_info->ScsiStatus != SAM_STAT_BUSY &&
  431		 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
  432		return 0;
  433	dev_warn(&h->pdev->dev, HPSA "device busy");
  434	return 1;
  435}
  436
  437static u32 lockup_detected(struct ctlr_info *h);
  438static ssize_t host_show_lockup_detected(struct device *dev,
  439		struct device_attribute *attr, char *buf)
  440{
  441	int ld;
  442	struct ctlr_info *h;
  443	struct Scsi_Host *shost = class_to_shost(dev);
  444
  445	h = shost_to_hba(shost);
  446	ld = lockup_detected(h);
  447
  448	return sprintf(buf, "ld=%d\n", ld);
  449}
  450
  451static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
  452					 struct device_attribute *attr,
  453					 const char *buf, size_t count)
  454{
  455	int status, len;
  456	struct ctlr_info *h;
  457	struct Scsi_Host *shost = class_to_shost(dev);
  458	char tmpbuf[10];
  459
  460	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  461		return -EACCES;
  462	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
  463	strncpy(tmpbuf, buf, len);
  464	tmpbuf[len] = '\0';
  465	if (sscanf(tmpbuf, "%d", &status) != 1)
  466		return -EINVAL;
  467	h = shost_to_hba(shost);
  468	h->acciopath_status = !!status;
  469	dev_warn(&h->pdev->dev,
  470		"hpsa: HP SSD Smart Path %s via sysfs update.\n",
  471		h->acciopath_status ? "enabled" : "disabled");
  472	return count;
  473}
  474
  475static ssize_t host_store_raid_offload_debug(struct device *dev,
  476					 struct device_attribute *attr,
  477					 const char *buf, size_t count)
  478{
  479	int debug_level, len;
  480	struct ctlr_info *h;
  481	struct Scsi_Host *shost = class_to_shost(dev);
  482	char tmpbuf[10];
  483
  484	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  485		return -EACCES;
  486	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
  487	strncpy(tmpbuf, buf, len);
  488	tmpbuf[len] = '\0';
  489	if (sscanf(tmpbuf, "%d", &debug_level) != 1)
  490		return -EINVAL;
  491	if (debug_level < 0)
  492		debug_level = 0;
  493	h = shost_to_hba(shost);
  494	h->raid_offload_debug = debug_level;
  495	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
  496		h->raid_offload_debug);
  497	return count;
  498}
  499
  500static ssize_t host_store_rescan(struct device *dev,
  501				 struct device_attribute *attr,
  502				 const char *buf, size_t count)
  503{
  504	struct ctlr_info *h;
  505	struct Scsi_Host *shost = class_to_shost(dev);
  506	h = shost_to_hba(shost);
  507	hpsa_scan_start(h->scsi_host);
  508	return count;
  509}
  510
  511static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
  512{
  513	device->offload_enabled = 0;
  514	device->offload_to_be_enabled = 0;
  515}
  516
  517static ssize_t host_show_firmware_revision(struct device *dev,
  518	     struct device_attribute *attr, char *buf)
  519{
  520	struct ctlr_info *h;
  521	struct Scsi_Host *shost = class_to_shost(dev);
  522	unsigned char *fwrev;
  523
  524	h = shost_to_hba(shost);
  525	if (!h->hba_inquiry_data)
  526		return 0;
  527	fwrev = &h->hba_inquiry_data[32];
  528	return snprintf(buf, 20, "%c%c%c%c\n",
  529		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
  530}
  531
  532static ssize_t host_show_commands_outstanding(struct device *dev,
  533	     struct device_attribute *attr, char *buf)
  534{
  535	struct Scsi_Host *shost = class_to_shost(dev);
  536	struct ctlr_info *h = shost_to_hba(shost);
  537
  538	return snprintf(buf, 20, "%d\n",
  539			atomic_read(&h->commands_outstanding));
  540}
  541
  542static ssize_t host_show_transport_mode(struct device *dev,
  543	struct device_attribute *attr, char *buf)
  544{
  545	struct ctlr_info *h;
  546	struct Scsi_Host *shost = class_to_shost(dev);
  547
  548	h = shost_to_hba(shost);
  549	return snprintf(buf, 20, "%s\n",
  550		h->transMethod & CFGTBL_Trans_Performant ?
  551			"performant" : "simple");
  552}
  553
  554static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
  555	struct device_attribute *attr, char *buf)
  556{
  557	struct ctlr_info *h;
  558	struct Scsi_Host *shost = class_to_shost(dev);
  559
  560	h = shost_to_hba(shost);
  561	return snprintf(buf, 30, "HP SSD Smart Path %s\n",
  562		(h->acciopath_status == 1) ?  "enabled" : "disabled");
  563}
  564
  565/* List of controllers which cannot be hard reset on kexec with reset_devices */
  566static u32 unresettable_controller[] = {
  567	0x324a103C, /* Smart Array P712m */
  568	0x324b103C, /* Smart Array P711m */
  569	0x3223103C, /* Smart Array P800 */
  570	0x3234103C, /* Smart Array P400 */
  571	0x3235103C, /* Smart Array P400i */
  572	0x3211103C, /* Smart Array E200i */
  573	0x3212103C, /* Smart Array E200 */
  574	0x3213103C, /* Smart Array E200i */
  575	0x3214103C, /* Smart Array E200i */
  576	0x3215103C, /* Smart Array E200i */
  577	0x3237103C, /* Smart Array E500 */
  578	0x323D103C, /* Smart Array P700m */
  579	0x40800E11, /* Smart Array 5i */
  580	0x409C0E11, /* Smart Array 6400 */
  581	0x409D0E11, /* Smart Array 6400 EM */
  582	0x40700E11, /* Smart Array 5300 */
  583	0x40820E11, /* Smart Array 532 */
  584	0x40830E11, /* Smart Array 5312 */
  585	0x409A0E11, /* Smart Array 641 */
  586	0x409B0E11, /* Smart Array 642 */
  587	0x40910E11, /* Smart Array 6i */
  588};
  589
  590/* List of controllers which cannot even be soft reset */
  591static u32 soft_unresettable_controller[] = {
  592	0x40800E11, /* Smart Array 5i */
  593	0x40700E11, /* Smart Array 5300 */
  594	0x40820E11, /* Smart Array 532 */
  595	0x40830E11, /* Smart Array 5312 */
  596	0x409A0E11, /* Smart Array 641 */
  597	0x409B0E11, /* Smart Array 642 */
  598	0x40910E11, /* Smart Array 6i */
  599	/* Exclude 640x boards.  These are two pci devices in one slot
  600	 * which share a battery backed cache module.  One controls the
  601	 * cache, the other accesses the cache through the one that controls
  602	 * it.  If we reset the one controlling the cache, the other will
  603	 * likely not be happy.  Just forbid resetting this conjoined mess.
  604	 * The 640x isn't really supported by hpsa anyway.
  605	 */
  606	0x409C0E11, /* Smart Array 6400 */
  607	0x409D0E11, /* Smart Array 6400 EM */
  608};
  609
  610static int board_id_in_array(u32 a[], int nelems, u32 board_id)
  611{
  612	int i;
  613
  614	for (i = 0; i < nelems; i++)
  615		if (a[i] == board_id)
  616			return 1;
  617	return 0;
  618}
  619
  620static int ctlr_is_hard_resettable(u32 board_id)
  621{
  622	return !board_id_in_array(unresettable_controller,
  623			ARRAY_SIZE(unresettable_controller), board_id);
  624}
  625
  626static int ctlr_is_soft_resettable(u32 board_id)
  627{
  628	return !board_id_in_array(soft_unresettable_controller,
  629			ARRAY_SIZE(soft_unresettable_controller), board_id);
 
 
 
 
  630}
  631
  632static int ctlr_is_resettable(u32 board_id)
  633{
  634	return ctlr_is_hard_resettable(board_id) ||
  635		ctlr_is_soft_resettable(board_id);
  636}
  637
  638static ssize_t host_show_resettable(struct device *dev,
  639	struct device_attribute *attr, char *buf)
  640{
  641	struct ctlr_info *h;
  642	struct Scsi_Host *shost = class_to_shost(dev);
  643
  644	h = shost_to_hba(shost);
  645	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
  646}
  647
  648static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
  649{
  650	return (scsi3addr[3] & 0xC0) == 0x40;
  651}
  652
  653static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
  654	"1(+0)ADM", "UNKNOWN", "PHYS DRV"
  655};
  656#define HPSA_RAID_0	0
  657#define HPSA_RAID_4	1
  658#define HPSA_RAID_1	2	/* also used for RAID 10 */
  659#define HPSA_RAID_5	3	/* also used for RAID 50 */
  660#define HPSA_RAID_51	4
  661#define HPSA_RAID_6	5	/* also used for RAID 60 */
  662#define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */
  663#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
  664#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
  665
  666static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
  667{
  668	return !device->physical_device;
  669}
  670
  671static ssize_t raid_level_show(struct device *dev,
  672	     struct device_attribute *attr, char *buf)
  673{
  674	ssize_t l = 0;
  675	unsigned char rlevel;
  676	struct ctlr_info *h;
  677	struct scsi_device *sdev;
  678	struct hpsa_scsi_dev_t *hdev;
  679	unsigned long flags;
  680
  681	sdev = to_scsi_device(dev);
  682	h = sdev_to_hba(sdev);
  683	spin_lock_irqsave(&h->lock, flags);
  684	hdev = sdev->hostdata;
  685	if (!hdev) {
  686		spin_unlock_irqrestore(&h->lock, flags);
  687		return -ENODEV;
  688	}
  689
  690	/* Is this even a logical drive? */
  691	if (!is_logical_device(hdev)) {
  692		spin_unlock_irqrestore(&h->lock, flags);
  693		l = snprintf(buf, PAGE_SIZE, "N/A\n");
  694		return l;
  695	}
  696
  697	rlevel = hdev->raid_level;
  698	spin_unlock_irqrestore(&h->lock, flags);
  699	if (rlevel > RAID_UNKNOWN)
  700		rlevel = RAID_UNKNOWN;
  701	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
  702	return l;
  703}
  704
  705static ssize_t lunid_show(struct device *dev,
  706	     struct device_attribute *attr, char *buf)
  707{
  708	struct ctlr_info *h;
  709	struct scsi_device *sdev;
  710	struct hpsa_scsi_dev_t *hdev;
  711	unsigned long flags;
  712	unsigned char lunid[8];
  713
  714	sdev = to_scsi_device(dev);
  715	h = sdev_to_hba(sdev);
  716	spin_lock_irqsave(&h->lock, flags);
  717	hdev = sdev->hostdata;
  718	if (!hdev) {
  719		spin_unlock_irqrestore(&h->lock, flags);
  720		return -ENODEV;
  721	}
  722	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
  723	spin_unlock_irqrestore(&h->lock, flags);
  724	return snprintf(buf, 20, "0x%8phN\n", lunid);
 
 
  725}
  726
  727static ssize_t unique_id_show(struct device *dev,
  728	     struct device_attribute *attr, char *buf)
  729{
  730	struct ctlr_info *h;
  731	struct scsi_device *sdev;
  732	struct hpsa_scsi_dev_t *hdev;
  733	unsigned long flags;
  734	unsigned char sn[16];
  735
  736	sdev = to_scsi_device(dev);
  737	h = sdev_to_hba(sdev);
  738	spin_lock_irqsave(&h->lock, flags);
  739	hdev = sdev->hostdata;
  740	if (!hdev) {
  741		spin_unlock_irqrestore(&h->lock, flags);
  742		return -ENODEV;
  743	}
  744	memcpy(sn, hdev->device_id, sizeof(sn));
  745	spin_unlock_irqrestore(&h->lock, flags);
  746	return snprintf(buf, 16 * 2 + 2,
  747			"%02X%02X%02X%02X%02X%02X%02X%02X"
  748			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
  749			sn[0], sn[1], sn[2], sn[3],
  750			sn[4], sn[5], sn[6], sn[7],
  751			sn[8], sn[9], sn[10], sn[11],
  752			sn[12], sn[13], sn[14], sn[15]);
  753}
  754
  755static ssize_t sas_address_show(struct device *dev,
  756	      struct device_attribute *attr, char *buf)
  757{
  758	struct ctlr_info *h;
  759	struct scsi_device *sdev;
  760	struct hpsa_scsi_dev_t *hdev;
  761	unsigned long flags;
  762	u64 sas_address;
  763
  764	sdev = to_scsi_device(dev);
  765	h = sdev_to_hba(sdev);
  766	spin_lock_irqsave(&h->lock, flags);
  767	hdev = sdev->hostdata;
  768	if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
  769		spin_unlock_irqrestore(&h->lock, flags);
  770		return -ENODEV;
  771	}
  772	sas_address = hdev->sas_address;
  773	spin_unlock_irqrestore(&h->lock, flags);
  774
  775	return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
  776}
  777
  778static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
  779	     struct device_attribute *attr, char *buf)
  780{
  781	struct ctlr_info *h;
  782	struct scsi_device *sdev;
  783	struct hpsa_scsi_dev_t *hdev;
  784	unsigned long flags;
  785	int offload_enabled;
  786
  787	sdev = to_scsi_device(dev);
  788	h = sdev_to_hba(sdev);
  789	spin_lock_irqsave(&h->lock, flags);
  790	hdev = sdev->hostdata;
  791	if (!hdev) {
  792		spin_unlock_irqrestore(&h->lock, flags);
  793		return -ENODEV;
  794	}
  795	offload_enabled = hdev->offload_enabled;
  796	spin_unlock_irqrestore(&h->lock, flags);
  797
  798	if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
  799		return snprintf(buf, 20, "%d\n", offload_enabled);
  800	else
  801		return snprintf(buf, 40, "%s\n",
  802				"Not applicable for a controller");
  803}
  804
  805#define MAX_PATHS 8
  806static ssize_t path_info_show(struct device *dev,
  807	     struct device_attribute *attr, char *buf)
  808{
  809	struct ctlr_info *h;
  810	struct scsi_device *sdev;
  811	struct hpsa_scsi_dev_t *hdev;
  812	unsigned long flags;
  813	int i;
  814	int output_len = 0;
  815	u8 box;
  816	u8 bay;
  817	u8 path_map_index = 0;
  818	char *active;
  819	unsigned char phys_connector[2];
  820
  821	sdev = to_scsi_device(dev);
  822	h = sdev_to_hba(sdev);
  823	spin_lock_irqsave(&h->devlock, flags);
  824	hdev = sdev->hostdata;
  825	if (!hdev) {
  826		spin_unlock_irqrestore(&h->devlock, flags);
  827		return -ENODEV;
  828	}
  829
  830	bay = hdev->bay;
  831	for (i = 0; i < MAX_PATHS; i++) {
  832		path_map_index = 1<<i;
  833		if (i == hdev->active_path_index)
  834			active = "Active";
  835		else if (hdev->path_map & path_map_index)
  836			active = "Inactive";
  837		else
  838			continue;
  839
  840		output_len += scnprintf(buf + output_len,
  841				PAGE_SIZE - output_len,
  842				"[%d:%d:%d:%d] %20.20s ",
  843				h->scsi_host->host_no,
  844				hdev->bus, hdev->target, hdev->lun,
  845				scsi_device_type(hdev->devtype));
  846
  847		if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
  848			output_len += scnprintf(buf + output_len,
  849						PAGE_SIZE - output_len,
  850						"%s\n", active);
  851			continue;
  852		}
  853
  854		box = hdev->box[i];
  855		memcpy(&phys_connector, &hdev->phys_connector[i],
  856			sizeof(phys_connector));
  857		if (phys_connector[0] < '0')
  858			phys_connector[0] = '0';
  859		if (phys_connector[1] < '0')
  860			phys_connector[1] = '0';
  861		output_len += scnprintf(buf + output_len,
  862				PAGE_SIZE - output_len,
  863				"PORT: %.2s ",
  864				phys_connector);
  865		if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
  866			hdev->expose_device) {
  867			if (box == 0 || box == 0xFF) {
  868				output_len += scnprintf(buf + output_len,
  869					PAGE_SIZE - output_len,
  870					"BAY: %hhu %s\n",
  871					bay, active);
  872			} else {
  873				output_len += scnprintf(buf + output_len,
  874					PAGE_SIZE - output_len,
  875					"BOX: %hhu BAY: %hhu %s\n",
  876					box, bay, active);
  877			}
  878		} else if (box != 0 && box != 0xFF) {
  879			output_len += scnprintf(buf + output_len,
  880				PAGE_SIZE - output_len, "BOX: %hhu %s\n",
  881				box, active);
  882		} else
  883			output_len += scnprintf(buf + output_len,
  884				PAGE_SIZE - output_len, "%s\n", active);
  885	}
  886
  887	spin_unlock_irqrestore(&h->devlock, flags);
  888	return output_len;
  889}
  890
  891static ssize_t host_show_ctlr_num(struct device *dev,
  892	struct device_attribute *attr, char *buf)
  893{
  894	struct ctlr_info *h;
  895	struct Scsi_Host *shost = class_to_shost(dev);
  896
  897	h = shost_to_hba(shost);
  898	return snprintf(buf, 20, "%d\n", h->ctlr);
  899}
  900
  901static ssize_t host_show_legacy_board(struct device *dev,
  902	struct device_attribute *attr, char *buf)
  903{
  904	struct ctlr_info *h;
  905	struct Scsi_Host *shost = class_to_shost(dev);
  906
  907	h = shost_to_hba(shost);
  908	return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
  909}
  910
  911static DEVICE_ATTR_RO(raid_level);
  912static DEVICE_ATTR_RO(lunid);
  913static DEVICE_ATTR_RO(unique_id);
  914static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
  915static DEVICE_ATTR_RO(sas_address);
  916static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
  917			host_show_hp_ssd_smart_path_enabled, NULL);
  918static DEVICE_ATTR_RO(path_info);
  919static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
  920		host_show_hp_ssd_smart_path_status,
  921		host_store_hp_ssd_smart_path_status);
  922static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
  923			host_store_raid_offload_debug);
  924static DEVICE_ATTR(firmware_revision, S_IRUGO,
  925	host_show_firmware_revision, NULL);
  926static DEVICE_ATTR(commands_outstanding, S_IRUGO,
  927	host_show_commands_outstanding, NULL);
  928static DEVICE_ATTR(transport_mode, S_IRUGO,
  929	host_show_transport_mode, NULL);
  930static DEVICE_ATTR(resettable, S_IRUGO,
  931	host_show_resettable, NULL);
  932static DEVICE_ATTR(lockup_detected, S_IRUGO,
  933	host_show_lockup_detected, NULL);
  934static DEVICE_ATTR(ctlr_num, S_IRUGO,
  935	host_show_ctlr_num, NULL);
  936static DEVICE_ATTR(legacy_board, S_IRUGO,
  937	host_show_legacy_board, NULL);
  938
  939static struct attribute *hpsa_sdev_attrs[] = {
  940	&dev_attr_raid_level.attr,
  941	&dev_attr_lunid.attr,
  942	&dev_attr_unique_id.attr,
  943	&dev_attr_hp_ssd_smart_path_enabled.attr,
  944	&dev_attr_path_info.attr,
  945	&dev_attr_sas_address.attr,
  946	NULL,
  947};
  948
  949ATTRIBUTE_GROUPS(hpsa_sdev);
  950
  951static struct attribute *hpsa_shost_attrs[] = {
  952	&dev_attr_rescan.attr,
  953	&dev_attr_firmware_revision.attr,
  954	&dev_attr_commands_outstanding.attr,
  955	&dev_attr_transport_mode.attr,
  956	&dev_attr_resettable.attr,
  957	&dev_attr_hp_ssd_smart_path_status.attr,
  958	&dev_attr_raid_offload_debug.attr,
  959	&dev_attr_lockup_detected.attr,
  960	&dev_attr_ctlr_num.attr,
  961	&dev_attr_legacy_board.attr,
  962	NULL,
  963};
  964
  965ATTRIBUTE_GROUPS(hpsa_shost);
  966
  967#define HPSA_NRESERVED_CMDS	(HPSA_CMDS_RESERVED_FOR_DRIVER +\
  968				 HPSA_MAX_CONCURRENT_PASSTHRUS)
  969
  970static const struct scsi_host_template hpsa_driver_template = {
  971	.module			= THIS_MODULE,
  972	.name			= HPSA,
  973	.proc_name		= HPSA,
  974	.queuecommand		= hpsa_scsi_queue_command,
  975	.scan_start		= hpsa_scan_start,
  976	.scan_finished		= hpsa_scan_finished,
  977	.change_queue_depth	= hpsa_change_queue_depth,
  978	.this_id		= -1,
 
  979	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
  980	.ioctl			= hpsa_ioctl,
  981	.slave_alloc		= hpsa_slave_alloc,
  982	.slave_configure	= hpsa_slave_configure,
  983	.slave_destroy		= hpsa_slave_destroy,
  984#ifdef CONFIG_COMPAT
  985	.compat_ioctl		= hpsa_compat_ioctl,
  986#endif
  987	.sdev_groups = hpsa_sdev_groups,
  988	.shost_groups = hpsa_shost_groups,
  989	.max_sectors = 2048,
  990	.no_write_same = 1,
  991};
  992
  993static inline u32 next_command(struct ctlr_info *h, u8 q)
 
 
  994{
  995	u32 a;
  996	struct reply_queue_buffer *rq = &h->reply_queue[q];
  997
  998	if (h->transMethod & CFGTBL_Trans_io_accel1)
  999		return h->access.command_completed(h, q);
 
 1000
 1001	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
 1002		return h->access.command_completed(h, q);
 1003
 1004	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
 1005		a = rq->head[rq->current_entry];
 1006		rq->current_entry++;
 1007		atomic_dec(&h->commands_outstanding);
 1008	} else {
 1009		a = FIFO_EMPTY;
 1010	}
 1011	/* Check for wraparound */
 1012	if (rq->current_entry == h->max_commands) {
 1013		rq->current_entry = 0;
 1014		rq->wraparound ^= 1;
 1015	}
 1016	return a;
 1017}
 1018
 1019/*
 1020 * There are some special bits in the bus address of the
 1021 * command that we have to set for the controller to know
 1022 * how to process the command:
 1023 *
 1024 * Normal performant mode:
 1025 * bit 0: 1 means performant mode, 0 means simple mode.
 1026 * bits 1-3 = block fetch table entry
 1027 * bits 4-6 = command type (== 0)
 1028 *
 1029 * ioaccel1 mode:
 1030 * bit 0 = "performant mode" bit.
 1031 * bits 1-3 = block fetch table entry
 1032 * bits 4-6 = command type (== 110)
 1033 * (command type is needed because ioaccel1 mode
 1034 * commands are submitted through the same register as normal
 1035 * mode commands, so this is how the controller knows whether
 1036 * the command is normal mode or ioaccel1 mode.)
 1037 *
 1038 * ioaccel2 mode:
 1039 * bit 0 = "performant mode" bit.
 1040 * bits 1-4 = block fetch table entry (note extra bit)
 1041 * bits 4-6 = not needed, because ioaccel2 mode has
 1042 * a separate special register for submitting commands.
 1043 */
 1044
 1045/*
 1046 * set_performant_mode: Modify the tag for cciss performant
 1047 * set bit 0 for pull model, bits 3-1 for block fetch
 1048 * register number
 1049 */
 1050#define DEFAULT_REPLY_QUEUE (-1)
 1051static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
 1052					int reply_queue)
 1053{
 1054	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
 1055		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
 1056		if (unlikely(!h->msix_vectors))
 1057			return;
 1058		c->Header.ReplyQueue = reply_queue;
 1059	}
 1060}
 1061
 1062static void set_ioaccel1_performant_mode(struct ctlr_info *h,
 1063						struct CommandList *c,
 1064						int reply_queue)
 1065{
 1066	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
 1067
 1068	/*
 1069	 * Tell the controller to post the reply to the queue for this
 1070	 * processor.  This seems to give the best I/O throughput.
 1071	 */
 1072	cp->ReplyQueue = reply_queue;
 1073	/*
 1074	 * Set the bits in the address sent down to include:
 1075	 *  - performant mode bit (bit 0)
 1076	 *  - pull count (bits 1-3)
 1077	 *  - command type (bits 4-6)
 1078	 */
 1079	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
 1080					IOACCEL1_BUSADDR_CMDTYPE;
 1081}
 1082
 1083static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
 1084						struct CommandList *c,
 1085						int reply_queue)
 1086{
 1087	struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
 1088		&h->ioaccel2_cmd_pool[c->cmdindex];
 1089
 1090	/* Tell the controller to post the reply to the queue for this
 1091	 * processor.  This seems to give the best I/O throughput.
 1092	 */
 1093	cp->reply_queue = reply_queue;
 1094	/* Set the bits in the address sent down to include:
 1095	 *  - performant mode bit not used in ioaccel mode 2
 1096	 *  - pull count (bits 0-3)
 1097	 *  - command type isn't needed for ioaccel2
 1098	 */
 1099	c->busaddr |= h->ioaccel2_blockFetchTable[0];
 1100}
 1101
 1102static void set_ioaccel2_performant_mode(struct ctlr_info *h,
 1103						struct CommandList *c,
 1104						int reply_queue)
 1105{
 1106	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
 1107
 1108	/*
 1109	 * Tell the controller to post the reply to the queue for this
 1110	 * processor.  This seems to give the best I/O throughput.
 1111	 */
 1112	cp->reply_queue = reply_queue;
 1113	/*
 1114	 * Set the bits in the address sent down to include:
 1115	 *  - performant mode bit not used in ioaccel mode 2
 1116	 *  - pull count (bits 0-3)
 1117	 *  - command type isn't needed for ioaccel2
 1118	 */
 1119	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
 1120}
 1121
 1122static int is_firmware_flash_cmd(u8 *cdb)
 1123{
 1124	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
 
 
 
 1125}
 1126
 1127/*
 1128 * During firmware flash, the heartbeat register may not update as frequently
 1129 * as it should.  So we dial down lockup detection during firmware flash. and
 1130 * dial it back up when firmware flash completes.
 1131 */
 1132#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
 1133#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
 1134#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
 1135static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
 1136		struct CommandList *c)
 1137{
 1138	if (!is_firmware_flash_cmd(c->Request.CDB))
 1139		return;
 1140	atomic_inc(&h->firmware_flash_in_progress);
 1141	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
 1142}
 1143
 1144static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
 1145		struct CommandList *c)
 1146{
 1147	if (is_firmware_flash_cmd(c->Request.CDB) &&
 1148		atomic_dec_and_test(&h->firmware_flash_in_progress))
 1149		h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
 1150}
 1151
 1152static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
 1153	struct CommandList *c, int reply_queue)
 1154{
 1155	dial_down_lockup_detection_during_fw_flash(h, c);
 1156	atomic_inc(&h->commands_outstanding);
 1157	/*
 1158	 * Check to see if the command is being retried.
 1159	 */
 1160	if (c->device && !c->retry_pending)
 1161		atomic_inc(&c->device->commands_outstanding);
 1162
 1163	reply_queue = h->reply_map[raw_smp_processor_id()];
 1164	switch (c->cmd_type) {
 1165	case CMD_IOACCEL1:
 1166		set_ioaccel1_performant_mode(h, c, reply_queue);
 1167		writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
 1168		break;
 1169	case CMD_IOACCEL2:
 1170		set_ioaccel2_performant_mode(h, c, reply_queue);
 1171		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
 1172		break;
 1173	case IOACCEL2_TMF:
 1174		set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
 1175		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
 1176		break;
 1177	default:
 1178		set_performant_mode(h, c, reply_queue);
 1179		h->access.submit_command(h, c);
 1180	}
 1181}
 1182
 1183static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
 1184{
 1185	__enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
 1186}
 1187
 1188static inline int is_hba_lunid(unsigned char scsi3addr[])
 1189{
 1190	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
 1191}
 1192
 1193static inline int is_scsi_rev_5(struct ctlr_info *h)
 1194{
 1195	if (!h->hba_inquiry_data)
 1196		return 0;
 1197	if ((h->hba_inquiry_data[2] & 0x07) == 5)
 1198		return 1;
 1199	return 0;
 1200}
 1201
 1202static int hpsa_find_target_lun(struct ctlr_info *h,
 1203	unsigned char scsi3addr[], int bus, int *target, int *lun)
 1204{
 1205	/* finds an unused bus, target, lun for a new physical device
 1206	 * assumes h->devlock is held
 1207	 */
 1208	int i, found = 0;
 1209	DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
 1210
 1211	bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
 1212
 1213	for (i = 0; i < h->ndevices; i++) {
 1214		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
 1215			__set_bit(h->dev[i]->target, lun_taken);
 1216	}
 1217
 1218	i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
 1219	if (i < HPSA_MAX_DEVICES) {
 1220		/* *bus = 1; */
 1221		*target = i;
 1222		*lun = 0;
 1223		found = 1;
 
 
 1224	}
 1225	return !found;
 1226}
 1227
 1228static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
 1229	struct hpsa_scsi_dev_t *dev, char *description)
 1230{
 1231#define LABEL_SIZE 25
 1232	char label[LABEL_SIZE];
 1233
 1234	if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
 1235		return;
 1236
 1237	switch (dev->devtype) {
 1238	case TYPE_RAID:
 1239		snprintf(label, LABEL_SIZE, "controller");
 1240		break;
 1241	case TYPE_ENCLOSURE:
 1242		snprintf(label, LABEL_SIZE, "enclosure");
 1243		break;
 1244	case TYPE_DISK:
 1245	case TYPE_ZBC:
 1246		if (dev->external)
 1247			snprintf(label, LABEL_SIZE, "external");
 1248		else if (!is_logical_dev_addr_mode(dev->scsi3addr))
 1249			snprintf(label, LABEL_SIZE, "%s",
 1250				raid_label[PHYSICAL_DRIVE]);
 1251		else
 1252			snprintf(label, LABEL_SIZE, "RAID-%s",
 1253				dev->raid_level > RAID_UNKNOWN ? "?" :
 1254				raid_label[dev->raid_level]);
 1255		break;
 1256	case TYPE_ROM:
 1257		snprintf(label, LABEL_SIZE, "rom");
 1258		break;
 1259	case TYPE_TAPE:
 1260		snprintf(label, LABEL_SIZE, "tape");
 1261		break;
 1262	case TYPE_MEDIUM_CHANGER:
 1263		snprintf(label, LABEL_SIZE, "changer");
 1264		break;
 1265	default:
 1266		snprintf(label, LABEL_SIZE, "UNKNOWN");
 1267		break;
 1268	}
 1269
 1270	dev_printk(level, &h->pdev->dev,
 1271			"scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
 1272			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
 1273			description,
 1274			scsi_device_type(dev->devtype),
 1275			dev->vendor,
 1276			dev->model,
 1277			label,
 1278			dev->offload_config ? '+' : '-',
 1279			dev->offload_to_be_enabled ? '+' : '-',
 1280			dev->expose_device);
 1281}
 1282
 1283/* Add an entry into h->dev[] array. */
 1284static int hpsa_scsi_add_entry(struct ctlr_info *h,
 1285		struct hpsa_scsi_dev_t *device,
 1286		struct hpsa_scsi_dev_t *added[], int *nadded)
 1287{
 1288	/* assumes h->devlock is held */
 1289	int n = h->ndevices;
 1290	int i;
 1291	unsigned char addr1[8], addr2[8];
 1292	struct hpsa_scsi_dev_t *sd;
 1293
 1294	if (n >= HPSA_MAX_DEVICES) {
 1295		dev_err(&h->pdev->dev, "too many devices, some will be "
 1296			"inaccessible.\n");
 1297		return -1;
 1298	}
 1299
 1300	/* physical devices do not have lun or target assigned until now. */
 1301	if (device->lun != -1)
 1302		/* Logical device, lun is already assigned. */
 1303		goto lun_assigned;
 1304
 1305	/* If this device a non-zero lun of a multi-lun device
 1306	 * byte 4 of the 8-byte LUN addr will contain the logical
 1307	 * unit no, zero otherwise.
 1308	 */
 1309	if (device->scsi3addr[4] == 0) {
 1310		/* This is not a non-zero lun of a multi-lun device */
 1311		if (hpsa_find_target_lun(h, device->scsi3addr,
 1312			device->bus, &device->target, &device->lun) != 0)
 1313			return -1;
 1314		goto lun_assigned;
 1315	}
 1316
 1317	/* This is a non-zero lun of a multi-lun device.
 1318	 * Search through our list and find the device which
 1319	 * has the same 8 byte LUN address, excepting byte 4 and 5.
 1320	 * Assign the same bus and target for this new LUN.
 1321	 * Use the logical unit number from the firmware.
 1322	 */
 1323	memcpy(addr1, device->scsi3addr, 8);
 1324	addr1[4] = 0;
 1325	addr1[5] = 0;
 1326	for (i = 0; i < n; i++) {
 1327		sd = h->dev[i];
 1328		memcpy(addr2, sd->scsi3addr, 8);
 1329		addr2[4] = 0;
 1330		addr2[5] = 0;
 1331		/* differ only in byte 4 and 5? */
 1332		if (memcmp(addr1, addr2, 8) == 0) {
 1333			device->bus = sd->bus;
 1334			device->target = sd->target;
 1335			device->lun = device->scsi3addr[4];
 1336			break;
 1337		}
 1338	}
 1339	if (device->lun == -1) {
 1340		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
 1341			" suspect firmware bug or unsupported hardware "
 1342			"configuration.\n");
 1343		return -1;
 1344	}
 1345
 1346lun_assigned:
 1347
 1348	h->dev[n] = device;
 1349	h->ndevices++;
 1350	added[*nadded] = device;
 1351	(*nadded)++;
 1352	hpsa_show_dev_msg(KERN_INFO, h, device,
 1353		device->expose_device ? "added" : "masked");
 1354	return 0;
 1355}
 1356
 1357/*
 1358 * Called during a scan operation.
 1359 *
 1360 * Update an entry in h->dev[] array.
 1361 */
 1362static void hpsa_scsi_update_entry(struct ctlr_info *h,
 1363	int entry, struct hpsa_scsi_dev_t *new_entry)
 1364{
 1365	/* assumes h->devlock is held */
 1366	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
 1367
 1368	/* Raid level changed. */
 1369	h->dev[entry]->raid_level = new_entry->raid_level;
 1370
 1371	/*
 1372	 * ioacccel_handle may have changed for a dual domain disk
 1373	 */
 1374	h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
 1375
 1376	/* Raid offload parameters changed.  Careful about the ordering. */
 1377	if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
 1378		/*
 1379		 * if drive is newly offload_enabled, we want to copy the
 1380		 * raid map data first.  If previously offload_enabled and
 1381		 * offload_config were set, raid map data had better be
 1382		 * the same as it was before. If raid map data has changed
 1383		 * then it had better be the case that
 1384		 * h->dev[entry]->offload_enabled is currently 0.
 1385		 */
 1386		h->dev[entry]->raid_map = new_entry->raid_map;
 1387		h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
 1388	}
 1389	if (new_entry->offload_to_be_enabled) {
 1390		h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
 1391		wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
 1392	}
 1393	h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
 1394	h->dev[entry]->offload_config = new_entry->offload_config;
 1395	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
 1396	h->dev[entry]->queue_depth = new_entry->queue_depth;
 1397
 1398	/*
 1399	 * We can turn off ioaccel offload now, but need to delay turning
 1400	 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
 1401	 * can't do that until all the devices are updated.
 1402	 */
 1403	h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
 1404
 1405	/*
 1406	 * turn ioaccel off immediately if told to do so.
 1407	 */
 1408	if (!new_entry->offload_to_be_enabled)
 1409		h->dev[entry]->offload_enabled = 0;
 1410
 1411	hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
 
 
 
 
 
 
 
 
 1412}
 1413
 1414/* Replace an entry from h->dev[] array. */
 1415static void hpsa_scsi_replace_entry(struct ctlr_info *h,
 1416	int entry, struct hpsa_scsi_dev_t *new_entry,
 1417	struct hpsa_scsi_dev_t *added[], int *nadded,
 1418	struct hpsa_scsi_dev_t *removed[], int *nremoved)
 1419{
 1420	/* assumes h->devlock is held */
 1421	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
 1422	removed[*nremoved] = h->dev[entry];
 1423	(*nremoved)++;
 1424
 1425	/*
 1426	 * New physical devices won't have target/lun assigned yet
 1427	 * so we need to preserve the values in the slot we are replacing.
 1428	 */
 1429	if (new_entry->target == -1) {
 1430		new_entry->target = h->dev[entry]->target;
 1431		new_entry->lun = h->dev[entry]->lun;
 1432	}
 1433
 1434	h->dev[entry] = new_entry;
 1435	added[*nadded] = new_entry;
 1436	(*nadded)++;
 1437
 1438	hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
 
 1439}
 1440
 1441/* Remove an entry from h->dev[] array. */
 1442static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
 1443	struct hpsa_scsi_dev_t *removed[], int *nremoved)
 1444{
 1445	/* assumes h->devlock is held */
 1446	int i;
 1447	struct hpsa_scsi_dev_t *sd;
 1448
 1449	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
 1450
 1451	sd = h->dev[entry];
 1452	removed[*nremoved] = h->dev[entry];
 1453	(*nremoved)++;
 1454
 1455	for (i = entry; i < h->ndevices-1; i++)
 1456		h->dev[i] = h->dev[i+1];
 1457	h->ndevices--;
 1458	hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
 
 
 1459}
 1460
 1461#define SCSI3ADDR_EQ(a, b) ( \
 1462	(a)[7] == (b)[7] && \
 1463	(a)[6] == (b)[6] && \
 1464	(a)[5] == (b)[5] && \
 1465	(a)[4] == (b)[4] && \
 1466	(a)[3] == (b)[3] && \
 1467	(a)[2] == (b)[2] && \
 1468	(a)[1] == (b)[1] && \
 1469	(a)[0] == (b)[0])
 1470
 1471static void fixup_botched_add(struct ctlr_info *h,
 1472	struct hpsa_scsi_dev_t *added)
 1473{
 1474	/* called when scsi_add_device fails in order to re-adjust
 1475	 * h->dev[] to match the mid layer's view.
 1476	 */
 1477	unsigned long flags;
 1478	int i, j;
 1479
 1480	spin_lock_irqsave(&h->lock, flags);
 1481	for (i = 0; i < h->ndevices; i++) {
 1482		if (h->dev[i] == added) {
 1483			for (j = i; j < h->ndevices-1; j++)
 1484				h->dev[j] = h->dev[j+1];
 1485			h->ndevices--;
 1486			break;
 1487		}
 1488	}
 1489	spin_unlock_irqrestore(&h->lock, flags);
 1490	kfree(added);
 1491}
 1492
 1493static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
 1494	struct hpsa_scsi_dev_t *dev2)
 1495{
 1496	/* we compare everything except lun and target as these
 1497	 * are not yet assigned.  Compare parts likely
 1498	 * to differ first
 1499	 */
 1500	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
 1501		sizeof(dev1->scsi3addr)) != 0)
 1502		return 0;
 1503	if (memcmp(dev1->device_id, dev2->device_id,
 1504		sizeof(dev1->device_id)) != 0)
 1505		return 0;
 1506	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
 1507		return 0;
 1508	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
 1509		return 0;
 1510	if (dev1->devtype != dev2->devtype)
 1511		return 0;
 1512	if (dev1->bus != dev2->bus)
 1513		return 0;
 1514	return 1;
 1515}
 1516
 1517static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
 1518	struct hpsa_scsi_dev_t *dev2)
 1519{
 1520	/* Device attributes that can change, but don't mean
 1521	 * that the device is a different device, nor that the OS
 1522	 * needs to be told anything about the change.
 1523	 */
 1524	if (dev1->raid_level != dev2->raid_level)
 1525		return 1;
 1526	if (dev1->offload_config != dev2->offload_config)
 1527		return 1;
 1528	if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
 1529		return 1;
 1530	if (!is_logical_dev_addr_mode(dev1->scsi3addr))
 1531		if (dev1->queue_depth != dev2->queue_depth)
 1532			return 1;
 1533	/*
 1534	 * This can happen for dual domain devices. An active
 1535	 * path change causes the ioaccel handle to change
 1536	 *
 1537	 * for example note the handle differences between p0 and p1
 1538	 * Device                    WWN               ,WWN hash,Handle
 1539	 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
 1540	 *	p1                   0x5000C5005FC4DAC9,0x6798C0,0x00040004
 1541	 */
 1542	if (dev1->ioaccel_handle != dev2->ioaccel_handle)
 1543		return 1;
 1544	return 0;
 1545}
 1546
 1547/* Find needle in haystack.  If exact match found, return DEVICE_SAME,
 1548 * and return needle location in *index.  If scsi3addr matches, but not
 1549 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
 1550 * location in *index.
 1551 * In the case of a minor device attribute change, such as RAID level, just
 1552 * return DEVICE_UPDATED, along with the updated device's location in index.
 1553 * If needle not found, return DEVICE_NOT_FOUND.
 1554 */
 1555static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
 1556	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
 1557	int *index)
 1558{
 1559	int i;
 1560#define DEVICE_NOT_FOUND 0
 1561#define DEVICE_CHANGED 1
 1562#define DEVICE_SAME 2
 1563#define DEVICE_UPDATED 3
 1564	if (needle == NULL)
 1565		return DEVICE_NOT_FOUND;
 1566
 1567	for (i = 0; i < haystack_size; i++) {
 1568		if (haystack[i] == NULL) /* previously removed. */
 1569			continue;
 1570		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
 1571			*index = i;
 1572			if (device_is_the_same(needle, haystack[i])) {
 1573				if (device_updated(needle, haystack[i]))
 1574					return DEVICE_UPDATED;
 1575				return DEVICE_SAME;
 1576			} else {
 1577				/* Keep offline devices offline */
 1578				if (needle->volume_offline)
 1579					return DEVICE_NOT_FOUND;
 1580				return DEVICE_CHANGED;
 1581			}
 1582		}
 1583	}
 1584	*index = -1;
 1585	return DEVICE_NOT_FOUND;
 1586}
 1587
 1588static void hpsa_monitor_offline_device(struct ctlr_info *h,
 1589					unsigned char scsi3addr[])
 1590{
 1591	struct offline_device_entry *device;
 1592	unsigned long flags;
 1593
 1594	/* Check to see if device is already on the list */
 1595	spin_lock_irqsave(&h->offline_device_lock, flags);
 1596	list_for_each_entry(device, &h->offline_device_list, offline_list) {
 1597		if (memcmp(device->scsi3addr, scsi3addr,
 1598			sizeof(device->scsi3addr)) == 0) {
 1599			spin_unlock_irqrestore(&h->offline_device_lock, flags);
 1600			return;
 1601		}
 1602	}
 1603	spin_unlock_irqrestore(&h->offline_device_lock, flags);
 1604
 1605	/* Device is not on the list, add it. */
 1606	device = kmalloc(sizeof(*device), GFP_KERNEL);
 1607	if (!device)
 1608		return;
 1609
 1610	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
 1611	spin_lock_irqsave(&h->offline_device_lock, flags);
 1612	list_add_tail(&device->offline_list, &h->offline_device_list);
 1613	spin_unlock_irqrestore(&h->offline_device_lock, flags);
 1614}
 1615
 1616/* Print a message explaining various offline volume states */
 1617static void hpsa_show_volume_status(struct ctlr_info *h,
 1618	struct hpsa_scsi_dev_t *sd)
 1619{
 1620	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
 1621		dev_info(&h->pdev->dev,
 1622			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
 1623			h->scsi_host->host_no,
 1624			sd->bus, sd->target, sd->lun);
 1625	switch (sd->volume_offline) {
 1626	case HPSA_LV_OK:
 1627		break;
 1628	case HPSA_LV_UNDERGOING_ERASE:
 1629		dev_info(&h->pdev->dev,
 1630			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
 1631			h->scsi_host->host_no,
 1632			sd->bus, sd->target, sd->lun);
 1633		break;
 1634	case HPSA_LV_NOT_AVAILABLE:
 1635		dev_info(&h->pdev->dev,
 1636			"C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
 1637			h->scsi_host->host_no,
 1638			sd->bus, sd->target, sd->lun);
 1639		break;
 1640	case HPSA_LV_UNDERGOING_RPI:
 1641		dev_info(&h->pdev->dev,
 1642			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
 1643			h->scsi_host->host_no,
 1644			sd->bus, sd->target, sd->lun);
 1645		break;
 1646	case HPSA_LV_PENDING_RPI:
 1647		dev_info(&h->pdev->dev,
 1648			"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
 1649			h->scsi_host->host_no,
 1650			sd->bus, sd->target, sd->lun);
 1651		break;
 1652	case HPSA_LV_ENCRYPTED_NO_KEY:
 1653		dev_info(&h->pdev->dev,
 1654			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
 1655			h->scsi_host->host_no,
 1656			sd->bus, sd->target, sd->lun);
 1657		break;
 1658	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
 1659		dev_info(&h->pdev->dev,
 1660			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
 1661			h->scsi_host->host_no,
 1662			sd->bus, sd->target, sd->lun);
 1663		break;
 1664	case HPSA_LV_UNDERGOING_ENCRYPTION:
 1665		dev_info(&h->pdev->dev,
 1666			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
 1667			h->scsi_host->host_no,
 1668			sd->bus, sd->target, sd->lun);
 1669		break;
 1670	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
 1671		dev_info(&h->pdev->dev,
 1672			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
 1673			h->scsi_host->host_no,
 1674			sd->bus, sd->target, sd->lun);
 1675		break;
 1676	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
 1677		dev_info(&h->pdev->dev,
 1678			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
 1679			h->scsi_host->host_no,
 1680			sd->bus, sd->target, sd->lun);
 1681		break;
 1682	case HPSA_LV_PENDING_ENCRYPTION:
 1683		dev_info(&h->pdev->dev,
 1684			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
 1685			h->scsi_host->host_no,
 1686			sd->bus, sd->target, sd->lun);
 1687		break;
 1688	case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
 1689		dev_info(&h->pdev->dev,
 1690			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
 1691			h->scsi_host->host_no,
 1692			sd->bus, sd->target, sd->lun);
 1693		break;
 1694	}
 1695}
 1696
 1697/*
 1698 * Figure the list of physical drive pointers for a logical drive with
 1699 * raid offload configured.
 1700 */
 1701static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
 1702				struct hpsa_scsi_dev_t *dev[], int ndevices,
 1703				struct hpsa_scsi_dev_t *logical_drive)
 1704{
 1705	struct raid_map_data *map = &logical_drive->raid_map;
 1706	struct raid_map_disk_data *dd = &map->data[0];
 1707	int i, j;
 1708	int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
 1709				le16_to_cpu(map->metadata_disks_per_row);
 1710	int nraid_map_entries = le16_to_cpu(map->row_cnt) *
 1711				le16_to_cpu(map->layout_map_count) *
 1712				total_disks_per_row;
 1713	int nphys_disk = le16_to_cpu(map->layout_map_count) *
 1714				total_disks_per_row;
 1715	int qdepth;
 1716
 1717	if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
 1718		nraid_map_entries = RAID_MAP_MAX_ENTRIES;
 1719
 1720	logical_drive->nphysical_disks = nraid_map_entries;
 1721
 1722	qdepth = 0;
 1723	for (i = 0; i < nraid_map_entries; i++) {
 1724		logical_drive->phys_disk[i] = NULL;
 1725		if (!logical_drive->offload_config)
 1726			continue;
 1727		for (j = 0; j < ndevices; j++) {
 1728			if (dev[j] == NULL)
 1729				continue;
 1730			if (dev[j]->devtype != TYPE_DISK &&
 1731			    dev[j]->devtype != TYPE_ZBC)
 1732				continue;
 1733			if (is_logical_device(dev[j]))
 1734				continue;
 1735			if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
 1736				continue;
 1737
 1738			logical_drive->phys_disk[i] = dev[j];
 1739			if (i < nphys_disk)
 1740				qdepth = min(h->nr_cmds, qdepth +
 1741				    logical_drive->phys_disk[i]->queue_depth);
 1742			break;
 1743		}
 1744
 1745		/*
 1746		 * This can happen if a physical drive is removed and
 1747		 * the logical drive is degraded.  In that case, the RAID
 1748		 * map data will refer to a physical disk which isn't actually
 1749		 * present.  And in that case offload_enabled should already
 1750		 * be 0, but we'll turn it off here just in case
 1751		 */
 1752		if (!logical_drive->phys_disk[i]) {
 1753			dev_warn(&h->pdev->dev,
 1754				"%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
 1755				__func__,
 1756				h->scsi_host->host_no, logical_drive->bus,
 1757				logical_drive->target, logical_drive->lun);
 1758			hpsa_turn_off_ioaccel_for_device(logical_drive);
 1759			logical_drive->queue_depth = 8;
 1760		}
 1761	}
 1762	if (nraid_map_entries)
 1763		/*
 1764		 * This is correct for reads, too high for full stripe writes,
 1765		 * way too high for partial stripe writes
 1766		 */
 1767		logical_drive->queue_depth = qdepth;
 1768	else {
 1769		if (logical_drive->external)
 1770			logical_drive->queue_depth = EXTERNAL_QD;
 1771		else
 1772			logical_drive->queue_depth = h->nr_cmds;
 1773	}
 1774}
 1775
 1776static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
 1777				struct hpsa_scsi_dev_t *dev[], int ndevices)
 1778{
 1779	int i;
 1780
 1781	for (i = 0; i < ndevices; i++) {
 1782		if (dev[i] == NULL)
 1783			continue;
 1784		if (dev[i]->devtype != TYPE_DISK &&
 1785		    dev[i]->devtype != TYPE_ZBC)
 1786			continue;
 1787		if (!is_logical_device(dev[i]))
 1788			continue;
 1789
 1790		/*
 1791		 * If offload is currently enabled, the RAID map and
 1792		 * phys_disk[] assignment *better* not be changing
 1793		 * because we would be changing ioaccel phsy_disk[] pointers
 1794		 * on a ioaccel volume processing I/O requests.
 1795		 *
 1796		 * If an ioaccel volume status changed, initially because it was
 1797		 * re-configured and thus underwent a transformation, or
 1798		 * a drive failed, we would have received a state change
 1799		 * request and ioaccel should have been turned off. When the
 1800		 * transformation completes, we get another state change
 1801		 * request to turn ioaccel back on. In this case, we need
 1802		 * to update the ioaccel information.
 1803		 *
 1804		 * Thus: If it is not currently enabled, but will be after
 1805		 * the scan completes, make sure the ioaccel pointers
 1806		 * are up to date.
 1807		 */
 1808
 1809		if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
 1810			hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
 1811	}
 1812}
 1813
 1814static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
 1815{
 1816	int rc = 0;
 1817
 1818	if (!h->scsi_host)
 1819		return 1;
 1820
 1821	if (is_logical_device(device)) /* RAID */
 1822		rc = scsi_add_device(h->scsi_host, device->bus,
 1823					device->target, device->lun);
 1824	else /* HBA */
 1825		rc = hpsa_add_sas_device(h->sas_host, device);
 1826
 1827	return rc;
 1828}
 1829
 1830static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
 1831						struct hpsa_scsi_dev_t *dev)
 1832{
 1833	int i;
 1834	int count = 0;
 1835
 1836	for (i = 0; i < h->nr_cmds; i++) {
 1837		struct CommandList *c = h->cmd_pool + i;
 1838		int refcount = atomic_inc_return(&c->refcount);
 1839
 1840		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
 1841				dev->scsi3addr)) {
 1842			unsigned long flags;
 1843
 1844			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
 1845			if (!hpsa_is_cmd_idle(c))
 1846				++count;
 1847			spin_unlock_irqrestore(&h->lock, flags);
 1848		}
 1849
 1850		cmd_free(h, c);
 1851	}
 1852
 1853	return count;
 1854}
 1855
 1856#define NUM_WAIT 20
 1857static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
 1858						struct hpsa_scsi_dev_t *device)
 1859{
 1860	int cmds = 0;
 1861	int waits = 0;
 1862	int num_wait = NUM_WAIT;
 1863
 1864	if (device->external)
 1865		num_wait = HPSA_EH_PTRAID_TIMEOUT;
 1866
 1867	while (1) {
 1868		cmds = hpsa_find_outstanding_commands_for_dev(h, device);
 1869		if (cmds == 0)
 1870			break;
 1871		if (++waits > num_wait)
 1872			break;
 1873		msleep(1000);
 1874	}
 1875
 1876	if (waits > num_wait) {
 1877		dev_warn(&h->pdev->dev,
 1878			"%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
 1879			__func__,
 1880			h->scsi_host->host_no,
 1881			device->bus, device->target, device->lun, cmds);
 1882	}
 1883}
 1884
 1885static void hpsa_remove_device(struct ctlr_info *h,
 1886			struct hpsa_scsi_dev_t *device)
 1887{
 1888	struct scsi_device *sdev = NULL;
 1889
 1890	if (!h->scsi_host)
 1891		return;
 1892
 1893	/*
 1894	 * Allow for commands to drain
 1895	 */
 1896	device->removed = 1;
 1897	hpsa_wait_for_outstanding_commands_for_dev(h, device);
 1898
 1899	if (is_logical_device(device)) { /* RAID */
 1900		sdev = scsi_device_lookup(h->scsi_host, device->bus,
 1901						device->target, device->lun);
 1902		if (sdev) {
 1903			scsi_remove_device(sdev);
 1904			scsi_device_put(sdev);
 1905		} else {
 1906			/*
 1907			 * We don't expect to get here.  Future commands
 1908			 * to this device will get a selection timeout as
 1909			 * if the device were gone.
 1910			 */
 1911			hpsa_show_dev_msg(KERN_WARNING, h, device,
 1912					"didn't find device for removal.");
 1913		}
 1914	} else { /* HBA */
 1915
 1916		hpsa_remove_sas_device(device);
 1917	}
 1918}
 1919
 1920static void adjust_hpsa_scsi_table(struct ctlr_info *h,
 1921	struct hpsa_scsi_dev_t *sd[], int nsds)
 1922{
 1923	/* sd contains scsi3 addresses and devtypes, and inquiry
 1924	 * data.  This function takes what's in sd to be the current
 1925	 * reality and updates h->dev[] to reflect that reality.
 1926	 */
 1927	int i, entry, device_change, changes = 0;
 1928	struct hpsa_scsi_dev_t *csd;
 1929	unsigned long flags;
 1930	struct hpsa_scsi_dev_t **added, **removed;
 1931	int nadded, nremoved;
 
 1932
 1933	/*
 1934	 * A reset can cause a device status to change
 1935	 * re-schedule the scan to see what happened.
 1936	 */
 1937	spin_lock_irqsave(&h->reset_lock, flags);
 1938	if (h->reset_in_progress) {
 1939		h->drv_req_rescan = 1;
 1940		spin_unlock_irqrestore(&h->reset_lock, flags);
 1941		return;
 1942	}
 1943	spin_unlock_irqrestore(&h->reset_lock, flags);
 1944
 1945	added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
 1946	removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
 1947
 1948	if (!added || !removed) {
 1949		dev_warn(&h->pdev->dev, "out of memory in "
 1950			"adjust_hpsa_scsi_table\n");
 1951		goto free_and_out;
 1952	}
 1953
 1954	spin_lock_irqsave(&h->devlock, flags);
 1955
 1956	/* find any devices in h->dev[] that are not in
 1957	 * sd[] and remove them from h->dev[], and for any
 1958	 * devices which have changed, remove the old device
 1959	 * info and add the new device info.
 1960	 * If minor device attributes change, just update
 1961	 * the existing device structure.
 1962	 */
 1963	i = 0;
 1964	nremoved = 0;
 1965	nadded = 0;
 1966	while (i < h->ndevices) {
 1967		csd = h->dev[i];
 1968		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
 1969		if (device_change == DEVICE_NOT_FOUND) {
 1970			changes++;
 1971			hpsa_scsi_remove_entry(h, i, removed, &nremoved);
 
 1972			continue; /* remove ^^^, hence i not incremented */
 1973		} else if (device_change == DEVICE_CHANGED) {
 1974			changes++;
 1975			hpsa_scsi_replace_entry(h, i, sd[entry],
 1976				added, &nadded, removed, &nremoved);
 1977			/* Set it to NULL to prevent it from being freed
 1978			 * at the bottom of hpsa_update_scsi_devices()
 1979			 */
 1980			sd[entry] = NULL;
 1981		} else if (device_change == DEVICE_UPDATED) {
 1982			hpsa_scsi_update_entry(h, i, sd[entry]);
 1983		}
 1984		i++;
 1985	}
 1986
 1987	/* Now, make sure every device listed in sd[] is also
 1988	 * listed in h->dev[], adding them if they aren't found
 1989	 */
 1990
 1991	for (i = 0; i < nsds; i++) {
 1992		if (!sd[i]) /* if already added above. */
 1993			continue;
 1994
 1995		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS
 1996		 * as the SCSI mid-layer does not handle such devices well.
 1997		 * It relentlessly loops sending TUR at 3Hz, then READ(10)
 1998		 * at 160Hz, and prevents the system from coming up.
 1999		 */
 2000		if (sd[i]->volume_offline) {
 2001			hpsa_show_volume_status(h, sd[i]);
 2002			hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
 2003			continue;
 2004		}
 2005
 2006		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
 2007					h->ndevices, &entry);
 2008		if (device_change == DEVICE_NOT_FOUND) {
 2009			changes++;
 2010			if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
 
 2011				break;
 2012			sd[i] = NULL; /* prevent from being freed later. */
 2013		} else if (device_change == DEVICE_CHANGED) {
 2014			/* should never happen... */
 2015			changes++;
 2016			dev_warn(&h->pdev->dev,
 2017				"device unexpectedly changed.\n");
 2018			/* but if it does happen, we just ignore that device */
 2019		}
 2020	}
 2021	hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
 2022
 2023	/*
 2024	 * Now that h->dev[]->phys_disk[] is coherent, we can enable
 2025	 * any logical drives that need it enabled.
 2026	 *
 2027	 * The raid map should be current by now.
 2028	 *
 2029	 * We are updating the device list used for I/O requests.
 2030	 */
 2031	for (i = 0; i < h->ndevices; i++) {
 2032		if (h->dev[i] == NULL)
 2033			continue;
 2034		h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
 2035	}
 2036
 2037	spin_unlock_irqrestore(&h->devlock, flags);
 2038
 2039	/* Monitor devices which are in one of several NOT READY states to be
 2040	 * brought online later. This must be done without holding h->devlock,
 2041	 * so don't touch h->dev[]
 2042	 */
 2043	for (i = 0; i < nsds; i++) {
 2044		if (!sd[i]) /* if already added above. */
 2045			continue;
 2046		if (sd[i]->volume_offline)
 2047			hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
 2048	}
 2049
 2050	/* Don't notify scsi mid layer of any changes the first time through
 2051	 * (or if there are no changes) scsi_scan_host will do it later the
 2052	 * first time through.
 2053	 */
 2054	if (!changes)
 2055		goto free_and_out;
 2056
 
 2057	/* Notify scsi mid layer of any removed devices */
 2058	for (i = 0; i < nremoved; i++) {
 2059		if (removed[i] == NULL)
 2060			continue;
 2061		if (removed[i]->expose_device)
 2062			hpsa_remove_device(h, removed[i]);
 
 
 
 
 
 
 
 
 
 
 
 2063		kfree(removed[i]);
 2064		removed[i] = NULL;
 2065	}
 2066
 2067	/* Notify scsi mid layer of any added devices */
 2068	for (i = 0; i < nadded; i++) {
 2069		int rc = 0;
 2070
 2071		if (added[i] == NULL)
 2072			continue;
 2073		if (!(added[i]->expose_device))
 2074			continue;
 2075		rc = hpsa_add_device(h, added[i]);
 2076		if (!rc)
 2077			continue;
 2078		dev_warn(&h->pdev->dev,
 2079			"addition failed %d, device not added.", rc);
 2080		/* now we have to remove it from h->dev,
 2081		 * since it didn't get added to scsi mid layer
 2082		 */
 2083		fixup_botched_add(h, added[i]);
 2084		h->drv_req_rescan = 1;
 2085	}
 2086
 2087free_and_out:
 2088	kfree(added);
 2089	kfree(removed);
 2090}
 2091
 2092/*
 2093 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
 2094 * Assume's h->devlock is held.
 2095 */
 2096static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
 2097	int bus, int target, int lun)
 2098{
 2099	int i;
 2100	struct hpsa_scsi_dev_t *sd;
 2101
 2102	for (i = 0; i < h->ndevices; i++) {
 2103		sd = h->dev[i];
 2104		if (sd->bus == bus && sd->target == target && sd->lun == lun)
 2105			return sd;
 2106	}
 2107	return NULL;
 2108}
 2109
 
 2110static int hpsa_slave_alloc(struct scsi_device *sdev)
 2111{
 2112	struct hpsa_scsi_dev_t *sd = NULL;
 2113	unsigned long flags;
 2114	struct ctlr_info *h;
 2115
 2116	h = sdev_to_hba(sdev);
 2117	spin_lock_irqsave(&h->devlock, flags);
 2118	if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
 2119		struct scsi_target *starget;
 2120		struct sas_rphy *rphy;
 2121
 2122		starget = scsi_target(sdev);
 2123		rphy = target_to_rphy(starget);
 2124		sd = hpsa_find_device_by_sas_rphy(h, rphy);
 2125		if (sd) {
 2126			sd->target = sdev_id(sdev);
 2127			sd->lun = sdev->lun;
 2128		}
 2129	}
 2130	if (!sd)
 2131		sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
 2132					sdev_id(sdev), sdev->lun);
 2133
 2134	if (sd && sd->expose_device) {
 2135		atomic_set(&sd->ioaccel_cmds_out, 0);
 2136		sdev->hostdata = sd;
 2137	} else
 2138		sdev->hostdata = NULL;
 2139	spin_unlock_irqrestore(&h->devlock, flags);
 2140	return 0;
 2141}
 2142
 2143/* configure scsi device based on internal per-device structure */
 2144#define CTLR_TIMEOUT (120 * HZ)
 2145static int hpsa_slave_configure(struct scsi_device *sdev)
 2146{
 2147	struct hpsa_scsi_dev_t *sd;
 2148	int queue_depth;
 2149
 2150	sd = sdev->hostdata;
 2151	sdev->no_uld_attach = !sd || !sd->expose_device;
 2152
 2153	if (sd) {
 2154		sd->was_removed = 0;
 2155		queue_depth = sd->queue_depth != 0 ?
 2156				sd->queue_depth : sdev->host->can_queue;
 2157		if (sd->external) {
 2158			queue_depth = EXTERNAL_QD;
 2159			sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
 2160			blk_queue_rq_timeout(sdev->request_queue,
 2161						HPSA_EH_PTRAID_TIMEOUT);
 2162		}
 2163		if (is_hba_lunid(sd->scsi3addr)) {
 2164			sdev->eh_timeout = CTLR_TIMEOUT;
 2165			blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
 2166		}
 2167	} else {
 2168		queue_depth = sdev->host->can_queue;
 2169	}
 2170
 2171	scsi_change_queue_depth(sdev, queue_depth);
 2172
 2173	return 0;
 2174}
 2175
 2176static void hpsa_slave_destroy(struct scsi_device *sdev)
 2177{
 2178	struct hpsa_scsi_dev_t *hdev = NULL;
 2179
 2180	hdev = sdev->hostdata;
 2181
 2182	if (hdev)
 2183		hdev->was_removed = 1;
 2184}
 2185
 2186static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
 2187{
 2188	int i;
 2189
 2190	if (!h->ioaccel2_cmd_sg_list)
 2191		return;
 2192	for (i = 0; i < h->nr_cmds; i++) {
 2193		kfree(h->ioaccel2_cmd_sg_list[i]);
 2194		h->ioaccel2_cmd_sg_list[i] = NULL;
 2195	}
 2196	kfree(h->ioaccel2_cmd_sg_list);
 2197	h->ioaccel2_cmd_sg_list = NULL;
 2198}
 2199
 2200static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
 2201{
 2202	int i;
 2203
 2204	if (h->chainsize <= 0)
 2205		return 0;
 2206
 2207	h->ioaccel2_cmd_sg_list =
 2208		kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
 2209					GFP_KERNEL);
 2210	if (!h->ioaccel2_cmd_sg_list)
 2211		return -ENOMEM;
 2212	for (i = 0; i < h->nr_cmds; i++) {
 2213		h->ioaccel2_cmd_sg_list[i] =
 2214			kmalloc_array(h->maxsgentries,
 2215				      sizeof(*h->ioaccel2_cmd_sg_list[i]),
 2216				      GFP_KERNEL);
 2217		if (!h->ioaccel2_cmd_sg_list[i])
 2218			goto clean;
 2219	}
 2220	return 0;
 2221
 2222clean:
 2223	hpsa_free_ioaccel2_sg_chain_blocks(h);
 2224	return -ENOMEM;
 2225}
 2226
 2227static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
 2228{
 2229	int i;
 2230
 2231	if (!h->cmd_sg_list)
 2232		return;
 2233	for (i = 0; i < h->nr_cmds; i++) {
 2234		kfree(h->cmd_sg_list[i]);
 2235		h->cmd_sg_list[i] = NULL;
 2236	}
 2237	kfree(h->cmd_sg_list);
 2238	h->cmd_sg_list = NULL;
 2239}
 2240
 2241static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
 2242{
 2243	int i;
 2244
 2245	if (h->chainsize <= 0)
 2246		return 0;
 2247
 2248	h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
 2249				 GFP_KERNEL);
 2250	if (!h->cmd_sg_list)
 2251		return -ENOMEM;
 2252
 2253	for (i = 0; i < h->nr_cmds; i++) {
 2254		h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
 2255						  sizeof(*h->cmd_sg_list[i]),
 2256						  GFP_KERNEL);
 2257		if (!h->cmd_sg_list[i])
 2258			goto clean;
 2259
 2260	}
 2261	return 0;
 2262
 2263clean:
 2264	hpsa_free_sg_chain_blocks(h);
 2265	return -ENOMEM;
 2266}
 2267
 2268static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
 2269	struct io_accel2_cmd *cp, struct CommandList *c)
 2270{
 2271	struct ioaccel2_sg_element *chain_block;
 2272	u64 temp64;
 2273	u32 chain_size;
 2274
 2275	chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
 2276	chain_size = le32_to_cpu(cp->sg[0].length);
 2277	temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
 2278				DMA_TO_DEVICE);
 2279	if (dma_mapping_error(&h->pdev->dev, temp64)) {
 2280		/* prevent subsequent unmapping */
 2281		cp->sg->address = 0;
 2282		return -1;
 2283	}
 2284	cp->sg->address = cpu_to_le64(temp64);
 2285	return 0;
 2286}
 2287
 2288static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
 2289	struct io_accel2_cmd *cp)
 2290{
 2291	struct ioaccel2_sg_element *chain_sg;
 2292	u64 temp64;
 2293	u32 chain_size;
 2294
 2295	chain_sg = cp->sg;
 2296	temp64 = le64_to_cpu(chain_sg->address);
 2297	chain_size = le32_to_cpu(cp->sg[0].length);
 2298	dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
 2299}
 2300
 2301static int hpsa_map_sg_chain_block(struct ctlr_info *h,
 2302	struct CommandList *c)
 2303{
 2304	struct SGDescriptor *chain_sg, *chain_block;
 2305	u64 temp64;
 2306	u32 chain_len;
 2307
 2308	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
 2309	chain_block = h->cmd_sg_list[c->cmdindex];
 2310	chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
 2311	chain_len = sizeof(*chain_sg) *
 2312		(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
 2313	chain_sg->Len = cpu_to_le32(chain_len);
 2314	temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
 2315				DMA_TO_DEVICE);
 2316	if (dma_mapping_error(&h->pdev->dev, temp64)) {
 2317		/* prevent subsequent unmapping */
 2318		chain_sg->Addr = cpu_to_le64(0);
 2319		return -1;
 2320	}
 2321	chain_sg->Addr = cpu_to_le64(temp64);
 2322	return 0;
 2323}
 2324
 2325static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
 2326	struct CommandList *c)
 2327{
 2328	struct SGDescriptor *chain_sg;
 
 2329
 2330	if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
 2331		return;
 2332
 2333	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
 2334	dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
 2335			le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
 2336}
 2337
 2338
 2339/* Decode the various types of errors on ioaccel2 path.
 2340 * Return 1 for any error that should generate a RAID path retry.
 2341 * Return 0 for errors that don't require a RAID path retry.
 2342 */
 2343static int handle_ioaccel_mode2_error(struct ctlr_info *h,
 2344					struct CommandList *c,
 2345					struct scsi_cmnd *cmd,
 2346					struct io_accel2_cmd *c2,
 2347					struct hpsa_scsi_dev_t *dev)
 2348{
 2349	int data_len;
 2350	int retry = 0;
 2351	u32 ioaccel2_resid = 0;
 2352
 2353	switch (c2->error_data.serv_response) {
 2354	case IOACCEL2_SERV_RESPONSE_COMPLETE:
 2355		switch (c2->error_data.status) {
 2356		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
 2357			if (cmd)
 2358				cmd->result = 0;
 2359			break;
 2360		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
 2361			cmd->result |= SAM_STAT_CHECK_CONDITION;
 2362			if (c2->error_data.data_present !=
 2363					IOACCEL2_SENSE_DATA_PRESENT) {
 2364				memset(cmd->sense_buffer, 0,
 2365					SCSI_SENSE_BUFFERSIZE);
 2366				break;
 2367			}
 2368			/* copy the sense data */
 2369			data_len = c2->error_data.sense_data_len;
 2370			if (data_len > SCSI_SENSE_BUFFERSIZE)
 2371				data_len = SCSI_SENSE_BUFFERSIZE;
 2372			if (data_len > sizeof(c2->error_data.sense_data_buff))
 2373				data_len =
 2374					sizeof(c2->error_data.sense_data_buff);
 2375			memcpy(cmd->sense_buffer,
 2376				c2->error_data.sense_data_buff, data_len);
 2377			retry = 1;
 2378			break;
 2379		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
 2380			retry = 1;
 2381			break;
 2382		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
 2383			retry = 1;
 2384			break;
 2385		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
 2386			retry = 1;
 2387			break;
 2388		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
 2389			retry = 1;
 2390			break;
 2391		default:
 2392			retry = 1;
 2393			break;
 2394		}
 2395		break;
 2396	case IOACCEL2_SERV_RESPONSE_FAILURE:
 2397		switch (c2->error_data.status) {
 2398		case IOACCEL2_STATUS_SR_IO_ERROR:
 2399		case IOACCEL2_STATUS_SR_IO_ABORTED:
 2400		case IOACCEL2_STATUS_SR_OVERRUN:
 2401			retry = 1;
 2402			break;
 2403		case IOACCEL2_STATUS_SR_UNDERRUN:
 2404			cmd->result = (DID_OK << 16);		/* host byte */
 2405			ioaccel2_resid = get_unaligned_le32(
 2406						&c2->error_data.resid_cnt[0]);
 2407			scsi_set_resid(cmd, ioaccel2_resid);
 2408			break;
 2409		case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
 2410		case IOACCEL2_STATUS_SR_INVALID_DEVICE:
 2411		case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
 2412			/*
 2413			 * Did an HBA disk disappear? We will eventually
 2414			 * get a state change event from the controller but
 2415			 * in the meantime, we need to tell the OS that the
 2416			 * HBA disk is no longer there and stop I/O
 2417			 * from going down. This allows the potential re-insert
 2418			 * of the disk to get the same device node.
 2419			 */
 2420			if (dev->physical_device && dev->expose_device) {
 2421				cmd->result = DID_NO_CONNECT << 16;
 2422				dev->removed = 1;
 2423				h->drv_req_rescan = 1;
 2424				dev_warn(&h->pdev->dev,
 2425					"%s: device is gone!\n", __func__);
 2426			} else
 2427				/*
 2428				 * Retry by sending down the RAID path.
 2429				 * We will get an event from ctlr to
 2430				 * trigger rescan regardless.
 2431				 */
 2432				retry = 1;
 2433			break;
 2434		default:
 2435			retry = 1;
 2436		}
 2437		break;
 2438	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
 2439		break;
 2440	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
 2441		break;
 2442	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
 2443		retry = 1;
 2444		break;
 2445	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
 2446		break;
 2447	default:
 2448		retry = 1;
 2449		break;
 2450	}
 2451
 2452	if (dev->in_reset)
 2453		retry = 0;
 2454
 2455	return retry;	/* retry on raid path? */
 2456}
 2457
 2458static void hpsa_cmd_resolve_events(struct ctlr_info *h,
 2459		struct CommandList *c)
 2460{
 2461	struct hpsa_scsi_dev_t *dev = c->device;
 2462
 2463	/*
 2464	 * Reset c->scsi_cmd here so that the reset handler will know
 2465	 * this command has completed.  Then, check to see if the handler is
 2466	 * waiting for this command, and, if so, wake it.
 2467	 */
 2468	c->scsi_cmd = SCSI_CMD_IDLE;
 2469	mb();	/* Declare command idle before checking for pending events. */
 2470	if (dev) {
 2471		atomic_dec(&dev->commands_outstanding);
 2472		if (dev->in_reset &&
 2473			atomic_read(&dev->commands_outstanding) <= 0)
 2474			wake_up_all(&h->event_sync_wait_queue);
 2475	}
 2476}
 2477
 2478static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
 2479				      struct CommandList *c)
 2480{
 2481	hpsa_cmd_resolve_events(h, c);
 2482	cmd_tagged_free(h, c);
 2483}
 2484
 2485static void hpsa_cmd_free_and_done(struct ctlr_info *h,
 2486		struct CommandList *c, struct scsi_cmnd *cmd)
 2487{
 2488	hpsa_cmd_resolve_and_free(h, c);
 2489	if (cmd)
 2490		scsi_done(cmd);
 2491}
 2492
 2493static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
 2494{
 2495	INIT_WORK(&c->work, hpsa_command_resubmit_worker);
 2496	queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
 2497}
 2498
 2499static void process_ioaccel2_completion(struct ctlr_info *h,
 2500		struct CommandList *c, struct scsi_cmnd *cmd,
 2501		struct hpsa_scsi_dev_t *dev)
 2502{
 2503	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
 2504
 2505	/* check for good status */
 2506	if (likely(c2->error_data.serv_response == 0 &&
 2507			c2->error_data.status == 0)) {
 2508		cmd->result = 0;
 2509		return hpsa_cmd_free_and_done(h, c, cmd);
 2510	}
 2511
 2512	/*
 2513	 * Any RAID offload error results in retry which will use
 2514	 * the normal I/O path so the controller can handle whatever is
 2515	 * wrong.
 2516	 */
 2517	if (is_logical_device(dev) &&
 2518		c2->error_data.serv_response ==
 2519			IOACCEL2_SERV_RESPONSE_FAILURE) {
 2520		if (c2->error_data.status ==
 2521			IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
 2522			hpsa_turn_off_ioaccel_for_device(dev);
 2523		}
 2524
 2525		if (dev->in_reset) {
 2526			cmd->result = DID_RESET << 16;
 2527			return hpsa_cmd_free_and_done(h, c, cmd);
 2528		}
 2529
 2530		return hpsa_retry_cmd(h, c);
 2531	}
 2532
 2533	if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
 2534		return hpsa_retry_cmd(h, c);
 2535
 2536	return hpsa_cmd_free_and_done(h, c, cmd);
 2537}
 2538
 2539/* Returns 0 on success, < 0 otherwise. */
 2540static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
 2541					struct CommandList *cp)
 2542{
 2543	u8 tmf_status = cp->err_info->ScsiStatus;
 2544
 2545	switch (tmf_status) {
 2546	case CISS_TMF_COMPLETE:
 2547		/*
 2548		 * CISS_TMF_COMPLETE never happens, instead,
 2549		 * ei->CommandStatus == 0 for this case.
 2550		 */
 2551	case CISS_TMF_SUCCESS:
 2552		return 0;
 2553	case CISS_TMF_INVALID_FRAME:
 2554	case CISS_TMF_NOT_SUPPORTED:
 2555	case CISS_TMF_FAILED:
 2556	case CISS_TMF_WRONG_LUN:
 2557	case CISS_TMF_OVERLAPPED_TAG:
 2558		break;
 2559	default:
 2560		dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
 2561				tmf_status);
 2562		break;
 2563	}
 2564	return -tmf_status;
 2565}
 2566
 2567static void complete_scsi_command(struct CommandList *cp)
 2568{
 2569	struct scsi_cmnd *cmd;
 2570	struct ctlr_info *h;
 2571	struct ErrorInfo *ei;
 2572	struct hpsa_scsi_dev_t *dev;
 2573	struct io_accel2_cmd *c2;
 2574
 2575	u8 sense_key;
 2576	u8 asc;      /* additional sense code */
 2577	u8 ascq;     /* additional sense code qualifier */
 2578	unsigned long sense_data_size;
 2579
 2580	ei = cp->err_info;
 2581	cmd = cp->scsi_cmd;
 2582	h = cp->h;
 2583
 2584	if (!cmd->device) {
 2585		cmd->result = DID_NO_CONNECT << 16;
 2586		return hpsa_cmd_free_and_done(h, cp, cmd);
 2587	}
 2588
 2589	dev = cmd->device->hostdata;
 2590	if (!dev) {
 2591		cmd->result = DID_NO_CONNECT << 16;
 2592		return hpsa_cmd_free_and_done(h, cp, cmd);
 2593	}
 2594	c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
 2595
 2596	scsi_dma_unmap(cmd); /* undo the DMA mappings */
 2597	if ((cp->cmd_type == CMD_SCSI) &&
 2598		(le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
 2599		hpsa_unmap_sg_chain_block(h, cp);
 2600
 2601	if ((cp->cmd_type == CMD_IOACCEL2) &&
 2602		(c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
 2603		hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
 2604
 2605	cmd->result = (DID_OK << 16);		/* host byte */
 2606
 2607	/* SCSI command has already been cleaned up in SML */
 2608	if (dev->was_removed) {
 2609		hpsa_cmd_resolve_and_free(h, cp);
 2610		return;
 2611	}
 2612
 2613	if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
 2614		if (dev->physical_device && dev->expose_device &&
 2615			dev->removed) {
 2616			cmd->result = DID_NO_CONNECT << 16;
 2617			return hpsa_cmd_free_and_done(h, cp, cmd);
 2618		}
 2619		if (likely(cp->phys_disk != NULL))
 2620			atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
 2621	}
 2622
 2623	/*
 2624	 * We check for lockup status here as it may be set for
 2625	 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
 2626	 * fail_all_oustanding_cmds()
 2627	 */
 2628	if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
 2629		/* DID_NO_CONNECT will prevent a retry */
 2630		cmd->result = DID_NO_CONNECT << 16;
 2631		return hpsa_cmd_free_and_done(h, cp, cmd);
 2632	}
 2633
 2634	if (cp->cmd_type == CMD_IOACCEL2)
 2635		return process_ioaccel2_completion(h, cp, cmd, dev);
 2636
 
 2637	scsi_set_resid(cmd, ei->ResidualCnt);
 2638	if (ei->CommandStatus == 0)
 2639		return hpsa_cmd_free_and_done(h, cp, cmd);
 2640
 2641	/* For I/O accelerator commands, copy over some fields to the normal
 2642	 * CISS header used below for error handling.
 2643	 */
 2644	if (cp->cmd_type == CMD_IOACCEL1) {
 2645		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
 2646		cp->Header.SGList = scsi_sg_count(cmd);
 2647		cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
 2648		cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
 2649			IOACCEL1_IOFLAGS_CDBLEN_MASK;
 2650		cp->Header.tag = c->tag;
 2651		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
 2652		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
 2653
 2654		/* Any RAID offload error results in retry which will use
 2655		 * the normal I/O path so the controller can handle whatever's
 2656		 * wrong.
 2657		 */
 2658		if (is_logical_device(dev)) {
 2659			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
 2660				dev->offload_enabled = 0;
 2661			return hpsa_retry_cmd(h, cp);
 2662		}
 2663	}
 2664
 2665	/* an error has occurred */
 2666	switch (ei->CommandStatus) {
 2667
 2668	case CMD_TARGET_STATUS:
 2669		cmd->result |= ei->ScsiStatus;
 2670		/* copy the sense data */
 2671		if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
 2672			sense_data_size = SCSI_SENSE_BUFFERSIZE;
 2673		else
 2674			sense_data_size = sizeof(ei->SenseInfo);
 2675		if (ei->SenseLen < sense_data_size)
 2676			sense_data_size = ei->SenseLen;
 2677		memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
 2678		if (ei->ScsiStatus)
 2679			decode_sense_data(ei->SenseInfo, sense_data_size,
 2680				&sense_key, &asc, &ascq);
 2681		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
 2682			switch (sense_key) {
 2683			case ABORTED_COMMAND:
 2684				cmd->result |= DID_SOFT_ERROR << 16;
 2685				break;
 2686			case UNIT_ATTENTION:
 2687				if (asc == 0x3F && ascq == 0x0E)
 2688					h->drv_req_rescan = 1;
 2689				break;
 2690			case ILLEGAL_REQUEST:
 2691				if (asc == 0x25 && ascq == 0x00) {
 2692					dev->removed = 1;
 2693					cmd->result = DID_NO_CONNECT << 16;
 
 
 
 
 
 
 
 
 
 
 
 
 2694				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2695				break;
 2696			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2697			break;
 2698		}
 
 
 2699		/* Problem was not a check condition
 2700		 * Pass it up to the upper layers...
 2701		 */
 2702		if (ei->ScsiStatus) {
 2703			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
 2704				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
 2705				"Returning result: 0x%x\n",
 2706				cp, ei->ScsiStatus,
 2707				sense_key, asc, ascq,
 2708				cmd->result);
 2709		} else {  /* scsi status is zero??? How??? */
 2710			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
 2711				"Returning no connection.\n", cp),
 2712
 2713			/* Ordinarily, this case should never happen,
 2714			 * but there is a bug in some released firmware
 2715			 * revisions that allows it to happen if, for
 2716			 * example, a 4100 backplane loses power and
 2717			 * the tape drive is in it.  We assume that
 2718			 * it's a fatal error of some kind because we
 2719			 * can't show that it wasn't. We will make it
 2720			 * look like selection timeout since that is
 2721			 * the most common reason for this to occur,
 2722			 * and it's severe enough.
 2723			 */
 2724
 2725			cmd->result = DID_NO_CONNECT << 16;
 2726		}
 2727		break;
 2728
 2729	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
 2730		break;
 2731	case CMD_DATA_OVERRUN:
 2732		dev_warn(&h->pdev->dev,
 2733			"CDB %16phN data overrun\n", cp->Request.CDB);
 
 2734		break;
 2735	case CMD_INVALID: {
 2736		/* print_bytes(cp, sizeof(*cp), 1, 0);
 2737		print_cmd(cp); */
 2738		/* We get CMD_INVALID if you address a non-existent device
 2739		 * instead of a selection timeout (no response).  You will
 2740		 * see this if you yank out a drive, then try to access it.
 2741		 * This is kind of a shame because it means that any other
 2742		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
 2743		 * missing target. */
 2744		cmd->result = DID_NO_CONNECT << 16;
 2745	}
 2746		break;
 2747	case CMD_PROTOCOL_ERR:
 2748		cmd->result = DID_ERROR << 16;
 2749		dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
 2750				cp->Request.CDB);
 2751		break;
 2752	case CMD_HARDWARE_ERR:
 2753		cmd->result = DID_ERROR << 16;
 2754		dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
 2755			cp->Request.CDB);
 2756		break;
 2757	case CMD_CONNECTION_LOST:
 2758		cmd->result = DID_ERROR << 16;
 2759		dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
 2760			cp->Request.CDB);
 2761		break;
 2762	case CMD_ABORTED:
 2763		cmd->result = DID_ABORT << 16;
 
 
 2764		break;
 2765	case CMD_ABORT_FAILED:
 2766		cmd->result = DID_ERROR << 16;
 2767		dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
 2768			cp->Request.CDB);
 2769		break;
 2770	case CMD_UNSOLICITED_ABORT:
 2771		cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
 2772		dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
 2773			cp->Request.CDB);
 2774		break;
 2775	case CMD_TIMEOUT:
 2776		cmd->result = DID_TIME_OUT << 16;
 2777		dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
 2778			cp->Request.CDB);
 2779		break;
 2780	case CMD_UNABORTABLE:
 2781		cmd->result = DID_ERROR << 16;
 2782		dev_warn(&h->pdev->dev, "Command unabortable\n");
 2783		break;
 2784	case CMD_TMF_STATUS:
 2785		if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
 2786			cmd->result = DID_ERROR << 16;
 2787		break;
 2788	case CMD_IOACCEL_DISABLED:
 2789		/* This only handles the direct pass-through case since RAID
 2790		 * offload is handled above.  Just attempt a retry.
 2791		 */
 2792		cmd->result = DID_SOFT_ERROR << 16;
 2793		dev_warn(&h->pdev->dev,
 2794				"cp %p had HP SSD Smart Path error\n", cp);
 2795		break;
 2796	default:
 2797		cmd->result = DID_ERROR << 16;
 2798		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
 2799				cp, ei->CommandStatus);
 2800	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2801
 2802	return hpsa_cmd_free_and_done(h, cp, cmd);
 
 
 
 
 
 
 
 
 2803}
 2804
 2805static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
 2806		int sg_used, enum dma_data_direction data_direction)
 2807{
 2808	int i;
 
 2809
 2810	for (i = 0; i < sg_used; i++)
 2811		dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
 2812				le32_to_cpu(c->SG[i].Len),
 2813				data_direction);
 
 
 2814}
 2815
 2816static int hpsa_map_one(struct pci_dev *pdev,
 2817		struct CommandList *cp,
 2818		unsigned char *buf,
 2819		size_t buflen,
 2820		enum dma_data_direction data_direction)
 2821{
 2822	u64 addr64;
 2823
 2824	if (buflen == 0 || data_direction == DMA_NONE) {
 2825		cp->Header.SGList = 0;
 2826		cp->Header.SGTotal = cpu_to_le16(0);
 2827		return 0;
 2828	}
 2829
 2830	addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
 2831	if (dma_mapping_error(&pdev->dev, addr64)) {
 2832		/* Prevent subsequent unmap of something never mapped */
 2833		cp->Header.SGList = 0;
 2834		cp->Header.SGTotal = cpu_to_le16(0);
 2835		return -1;
 2836	}
 2837	cp->SG[0].Addr = cpu_to_le64(addr64);
 2838	cp->SG[0].Len = cpu_to_le32(buflen);
 2839	cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
 2840	cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
 2841	cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
 2842	return 0;
 2843}
 2844
 2845#define NO_TIMEOUT ((unsigned long) -1)
 2846#define DEFAULT_TIMEOUT 30000 /* milliseconds */
 2847static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
 2848	struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
 2849{
 2850	DECLARE_COMPLETION_ONSTACK(wait);
 2851
 2852	c->waiting = &wait;
 2853	__enqueue_cmd_and_start_io(h, c, reply_queue);
 2854	if (timeout_msecs == NO_TIMEOUT) {
 2855		/* TODO: get rid of this no-timeout thing */
 2856		wait_for_completion_io(&wait);
 2857		return IO_OK;
 2858	}
 2859	if (!wait_for_completion_io_timeout(&wait,
 2860					msecs_to_jiffies(timeout_msecs))) {
 2861		dev_warn(&h->pdev->dev, "Command timed out.\n");
 2862		return -ETIMEDOUT;
 2863	}
 2864	return IO_OK;
 2865}
 2866
 2867static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
 2868				   int reply_queue, unsigned long timeout_msecs)
 2869{
 2870	if (unlikely(lockup_detected(h))) {
 2871		c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
 2872		return IO_OK;
 2873	}
 2874	return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
 2875}
 2876
 2877static u32 lockup_detected(struct ctlr_info *h)
 2878{
 2879	int cpu;
 2880	u32 rc, *lockup_detected;
 2881
 2882	cpu = get_cpu();
 2883	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
 2884	rc = *lockup_detected;
 2885	put_cpu();
 2886	return rc;
 2887}
 2888
 2889#define MAX_DRIVER_CMD_RETRIES 25
 2890static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
 2891		struct CommandList *c, enum dma_data_direction data_direction,
 2892		unsigned long timeout_msecs)
 2893{
 2894	int backoff_time = 10, retry_count = 0;
 2895	int rc;
 2896
 2897	do {
 2898		memset(c->err_info, 0, sizeof(*c->err_info));
 2899		rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
 2900						  timeout_msecs);
 2901		if (rc)
 2902			break;
 2903		retry_count++;
 2904		if (retry_count > 3) {
 2905			msleep(backoff_time);
 2906			if (backoff_time < 1000)
 2907				backoff_time *= 2;
 2908		}
 2909	} while ((check_for_unit_attention(h, c) ||
 2910			check_for_busy(h, c)) &&
 2911			retry_count <= MAX_DRIVER_CMD_RETRIES);
 2912	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
 2913	if (retry_count > MAX_DRIVER_CMD_RETRIES)
 2914		rc = -EIO;
 2915	return rc;
 2916}
 2917
 2918static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
 2919				struct CommandList *c)
 2920{
 2921	const u8 *cdb = c->Request.CDB;
 2922	const u8 *lun = c->Header.LUN.LunAddrBytes;
 2923
 2924	dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
 2925		 txt, lun, cdb);
 2926}
 2927
 2928static void hpsa_scsi_interpret_error(struct ctlr_info *h,
 2929			struct CommandList *cp)
 2930{
 2931	const struct ErrorInfo *ei = cp->err_info;
 2932	struct device *d = &cp->h->pdev->dev;
 2933	u8 sense_key, asc, ascq;
 2934	int sense_len;
 2935
 
 2936	switch (ei->CommandStatus) {
 2937	case CMD_TARGET_STATUS:
 2938		if (ei->SenseLen > sizeof(ei->SenseInfo))
 2939			sense_len = sizeof(ei->SenseInfo);
 2940		else
 2941			sense_len = ei->SenseLen;
 2942		decode_sense_data(ei->SenseInfo, sense_len,
 2943					&sense_key, &asc, &ascq);
 2944		hpsa_print_cmd(h, "SCSI status", cp);
 2945		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
 2946			dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
 2947				sense_key, asc, ascq);
 2948		else
 2949			dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
 2950		if (ei->ScsiStatus == 0)
 2951			dev_warn(d, "SCSI status is abnormally zero.  "
 2952			"(probably indicates selection timeout "
 2953			"reported incorrectly due to a known "
 2954			"firmware bug, circa July, 2001.)\n");
 2955		break;
 2956	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
 
 2957		break;
 2958	case CMD_DATA_OVERRUN:
 2959		hpsa_print_cmd(h, "overrun condition", cp);
 2960		break;
 2961	case CMD_INVALID: {
 2962		/* controller unfortunately reports SCSI passthru's
 2963		 * to non-existent targets as invalid commands.
 2964		 */
 2965		hpsa_print_cmd(h, "invalid command", cp);
 2966		dev_warn(d, "probably means device no longer present\n");
 
 
 2967		}
 2968		break;
 2969	case CMD_PROTOCOL_ERR:
 2970		hpsa_print_cmd(h, "protocol error", cp);
 2971		break;
 2972	case CMD_HARDWARE_ERR:
 2973		hpsa_print_cmd(h, "hardware error", cp);
 
 2974		break;
 2975	case CMD_CONNECTION_LOST:
 2976		hpsa_print_cmd(h, "connection lost", cp);
 2977		break;
 2978	case CMD_ABORTED:
 2979		hpsa_print_cmd(h, "aborted", cp);
 2980		break;
 2981	case CMD_ABORT_FAILED:
 2982		hpsa_print_cmd(h, "abort failed", cp);
 2983		break;
 2984	case CMD_UNSOLICITED_ABORT:
 2985		hpsa_print_cmd(h, "unsolicited abort", cp);
 2986		break;
 2987	case CMD_TIMEOUT:
 2988		hpsa_print_cmd(h, "timed out", cp);
 2989		break;
 2990	case CMD_UNABORTABLE:
 2991		hpsa_print_cmd(h, "unabortable", cp);
 2992		break;
 2993	case CMD_CTLR_LOCKUP:
 2994		hpsa_print_cmd(h, "controller lockup detected", cp);
 2995		break;
 2996	default:
 2997		hpsa_print_cmd(h, "unknown status", cp);
 2998		dev_warn(d, "Unknown command status %x\n",
 2999				ei->CommandStatus);
 3000	}
 3001}
 3002
 3003static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
 3004					u8 page, u8 *buf, size_t bufsize)
 3005{
 3006	int rc = IO_OK;
 3007	struct CommandList *c;
 3008	struct ErrorInfo *ei;
 3009
 3010	c = cmd_alloc(h);
 3011	if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
 3012			page, scsi3addr, TYPE_CMD)) {
 3013		rc = -1;
 3014		goto out;
 3015	}
 3016	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3017			NO_TIMEOUT);
 3018	if (rc)
 3019		goto out;
 3020	ei = c->err_info;
 3021	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3022		hpsa_scsi_interpret_error(h, c);
 3023		rc = -1;
 3024	}
 3025out:
 3026	cmd_free(h, c);
 3027	return rc;
 3028}
 3029
 3030static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
 3031						u8 *scsi3addr)
 3032{
 3033	u8 *buf;
 3034	u64 sa = 0;
 3035	int rc = 0;
 3036
 3037	buf = kzalloc(1024, GFP_KERNEL);
 3038	if (!buf)
 3039		return 0;
 3040
 3041	rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
 3042					buf, 1024);
 3043
 3044	if (rc)
 3045		goto out;
 3046
 3047	sa = get_unaligned_be64(buf+12);
 3048
 3049out:
 3050	kfree(buf);
 3051	return sa;
 3052}
 3053
 3054static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
 3055			u16 page, unsigned char *buf,
 3056			unsigned char bufsize)
 3057{
 3058	int rc = IO_OK;
 3059	struct CommandList *c;
 3060	struct ErrorInfo *ei;
 3061
 3062	c = cmd_alloc(h);
 3063
 3064	if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
 3065			page, scsi3addr, TYPE_CMD)) {
 3066		rc = -1;
 3067		goto out;
 3068	}
 3069	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3070			NO_TIMEOUT);
 3071	if (rc)
 3072		goto out;
 3073	ei = c->err_info;
 3074	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3075		hpsa_scsi_interpret_error(h, c);
 3076		rc = -1;
 3077	}
 3078out:
 3079	cmd_free(h, c);
 3080	return rc;
 3081}
 3082
 3083static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
 3084	u8 reset_type, int reply_queue)
 3085{
 3086	int rc = IO_OK;
 3087	struct CommandList *c;
 3088	struct ErrorInfo *ei;
 3089
 3090	c = cmd_alloc(h);
 3091	c->device = dev;
 3092
 3093	/* fill_cmd can't fail here, no data buffer to map. */
 3094	(void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
 3095	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
 3096	if (rc) {
 3097		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
 3098		goto out;
 3099	}
 
 
 
 3100	/* no unmap needed here because no data xfer. */
 3101
 3102	ei = c->err_info;
 3103	if (ei->CommandStatus != 0) {
 3104		hpsa_scsi_interpret_error(h, c);
 3105		rc = -1;
 3106	}
 3107out:
 3108	cmd_free(h, c);
 3109	return rc;
 3110}
 3111
 3112static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
 3113			       struct hpsa_scsi_dev_t *dev,
 3114			       unsigned char *scsi3addr)
 3115{
 3116	int i;
 3117	bool match = false;
 3118	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
 3119	struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
 3120
 3121	if (hpsa_is_cmd_idle(c))
 3122		return false;
 3123
 3124	switch (c->cmd_type) {
 3125	case CMD_SCSI:
 3126	case CMD_IOCTL_PEND:
 3127		match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
 3128				sizeof(c->Header.LUN.LunAddrBytes));
 3129		break;
 3130
 3131	case CMD_IOACCEL1:
 3132	case CMD_IOACCEL2:
 3133		if (c->phys_disk == dev) {
 3134			/* HBA mode match */
 3135			match = true;
 3136		} else {
 3137			/* Possible RAID mode -- check each phys dev. */
 3138			/* FIXME:  Do we need to take out a lock here?  If
 3139			 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
 3140			 * instead. */
 3141			for (i = 0; i < dev->nphysical_disks && !match; i++) {
 3142				/* FIXME: an alternate test might be
 3143				 *
 3144				 * match = dev->phys_disk[i]->ioaccel_handle
 3145				 *              == c2->scsi_nexus;      */
 3146				match = dev->phys_disk[i] == c->phys_disk;
 3147			}
 3148		}
 3149		break;
 3150
 3151	case IOACCEL2_TMF:
 3152		for (i = 0; i < dev->nphysical_disks && !match; i++) {
 3153			match = dev->phys_disk[i]->ioaccel_handle ==
 3154					le32_to_cpu(ac->it_nexus);
 3155		}
 3156		break;
 3157
 3158	case 0:		/* The command is in the middle of being initialized. */
 3159		match = false;
 3160		break;
 3161
 3162	default:
 3163		dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
 3164			c->cmd_type);
 3165		BUG();
 3166	}
 3167
 3168	return match;
 3169}
 3170
 3171static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
 3172	u8 reset_type, int reply_queue)
 3173{
 3174	int rc = 0;
 3175
 3176	/* We can really only handle one reset at a time */
 3177	if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
 3178		dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
 3179		return -EINTR;
 3180	}
 3181
 3182	rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
 3183	if (!rc) {
 3184		/* incremented by sending the reset request */
 3185		atomic_dec(&dev->commands_outstanding);
 3186		wait_event(h->event_sync_wait_queue,
 3187			atomic_read(&dev->commands_outstanding) <= 0 ||
 3188			lockup_detected(h));
 3189	}
 3190
 3191	if (unlikely(lockup_detected(h))) {
 3192		dev_warn(&h->pdev->dev,
 3193			 "Controller lockup detected during reset wait\n");
 3194		rc = -ENODEV;
 3195	}
 3196
 3197	if (!rc)
 3198		rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
 3199
 3200	mutex_unlock(&h->reset_mutex);
 3201	return rc;
 3202}
 3203
 3204static void hpsa_get_raid_level(struct ctlr_info *h,
 3205	unsigned char *scsi3addr, unsigned char *raid_level)
 3206{
 3207	int rc;
 3208	unsigned char *buf;
 3209
 3210	*raid_level = RAID_UNKNOWN;
 3211	buf = kzalloc(64, GFP_KERNEL);
 3212	if (!buf)
 3213		return;
 3214
 3215	if (!hpsa_vpd_page_supported(h, scsi3addr,
 3216		HPSA_VPD_LV_DEVICE_GEOMETRY))
 3217		goto exit;
 3218
 3219	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
 3220		HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
 3221
 3222	if (rc == 0)
 3223		*raid_level = buf[8];
 3224	if (*raid_level > RAID_UNKNOWN)
 3225		*raid_level = RAID_UNKNOWN;
 3226exit:
 3227	kfree(buf);
 3228	return;
 3229}
 3230
 3231#define HPSA_MAP_DEBUG
 3232#ifdef HPSA_MAP_DEBUG
 3233static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
 3234				struct raid_map_data *map_buff)
 3235{
 3236	struct raid_map_disk_data *dd = &map_buff->data[0];
 3237	int map, row, col;
 3238	u16 map_cnt, row_cnt, disks_per_row;
 3239
 3240	if (rc != 0)
 3241		return;
 3242
 3243	/* Show details only if debugging has been activated. */
 3244	if (h->raid_offload_debug < 2)
 3245		return;
 3246
 3247	dev_info(&h->pdev->dev, "structure_size = %u\n",
 3248				le32_to_cpu(map_buff->structure_size));
 3249	dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
 3250			le32_to_cpu(map_buff->volume_blk_size));
 3251	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
 3252			le64_to_cpu(map_buff->volume_blk_cnt));
 3253	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
 3254			map_buff->phys_blk_shift);
 3255	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
 3256			map_buff->parity_rotation_shift);
 3257	dev_info(&h->pdev->dev, "strip_size = %u\n",
 3258			le16_to_cpu(map_buff->strip_size));
 3259	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
 3260			le64_to_cpu(map_buff->disk_starting_blk));
 3261	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
 3262			le64_to_cpu(map_buff->disk_blk_cnt));
 3263	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
 3264			le16_to_cpu(map_buff->data_disks_per_row));
 3265	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
 3266			le16_to_cpu(map_buff->metadata_disks_per_row));
 3267	dev_info(&h->pdev->dev, "row_cnt = %u\n",
 3268			le16_to_cpu(map_buff->row_cnt));
 3269	dev_info(&h->pdev->dev, "layout_map_count = %u\n",
 3270			le16_to_cpu(map_buff->layout_map_count));
 3271	dev_info(&h->pdev->dev, "flags = 0x%x\n",
 3272			le16_to_cpu(map_buff->flags));
 3273	dev_info(&h->pdev->dev, "encryption = %s\n",
 3274			le16_to_cpu(map_buff->flags) &
 3275			RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
 3276	dev_info(&h->pdev->dev, "dekindex = %u\n",
 3277			le16_to_cpu(map_buff->dekindex));
 3278	map_cnt = le16_to_cpu(map_buff->layout_map_count);
 3279	for (map = 0; map < map_cnt; map++) {
 3280		dev_info(&h->pdev->dev, "Map%u:\n", map);
 3281		row_cnt = le16_to_cpu(map_buff->row_cnt);
 3282		for (row = 0; row < row_cnt; row++) {
 3283			dev_info(&h->pdev->dev, "  Row%u:\n", row);
 3284			disks_per_row =
 3285				le16_to_cpu(map_buff->data_disks_per_row);
 3286			for (col = 0; col < disks_per_row; col++, dd++)
 3287				dev_info(&h->pdev->dev,
 3288					"    D%02u: h=0x%04x xor=%u,%u\n",
 3289					col, dd->ioaccel_handle,
 3290					dd->xor_mult[0], dd->xor_mult[1]);
 3291			disks_per_row =
 3292				le16_to_cpu(map_buff->metadata_disks_per_row);
 3293			for (col = 0; col < disks_per_row; col++, dd++)
 3294				dev_info(&h->pdev->dev,
 3295					"    M%02u: h=0x%04x xor=%u,%u\n",
 3296					col, dd->ioaccel_handle,
 3297					dd->xor_mult[0], dd->xor_mult[1]);
 3298		}
 3299	}
 3300}
 3301#else
 3302static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
 3303			__attribute__((unused)) int rc,
 3304			__attribute__((unused)) struct raid_map_data *map_buff)
 3305{
 3306}
 3307#endif
 3308
 3309static int hpsa_get_raid_map(struct ctlr_info *h,
 3310	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
 3311{
 3312	int rc = 0;
 3313	struct CommandList *c;
 3314	struct ErrorInfo *ei;
 3315
 3316	c = cmd_alloc(h);
 3317
 3318	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
 3319			sizeof(this_device->raid_map), 0,
 3320			scsi3addr, TYPE_CMD)) {
 3321		dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
 3322		cmd_free(h, c);
 3323		return -1;
 3324	}
 3325	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3326			NO_TIMEOUT);
 3327	if (rc)
 3328		goto out;
 3329	ei = c->err_info;
 3330	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3331		hpsa_scsi_interpret_error(h, c);
 3332		rc = -1;
 3333		goto out;
 3334	}
 3335	cmd_free(h, c);
 3336
 3337	/* @todo in the future, dynamically allocate RAID map memory */
 3338	if (le32_to_cpu(this_device->raid_map.structure_size) >
 3339				sizeof(this_device->raid_map)) {
 3340		dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
 3341		rc = -1;
 3342	}
 3343	hpsa_debug_map_buff(h, rc, &this_device->raid_map);
 3344	return rc;
 3345out:
 3346	cmd_free(h, c);
 3347	return rc;
 3348}
 3349
 3350static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
 3351		unsigned char scsi3addr[], u16 bmic_device_index,
 3352		struct bmic_sense_subsystem_info *buf, size_t bufsize)
 3353{
 3354	int rc = IO_OK;
 3355	struct CommandList *c;
 3356	struct ErrorInfo *ei;
 3357
 3358	c = cmd_alloc(h);
 3359
 3360	rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
 3361		0, RAID_CTLR_LUNID, TYPE_CMD);
 3362	if (rc)
 3363		goto out;
 3364
 3365	c->Request.CDB[2] = bmic_device_index & 0xff;
 3366	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 3367
 3368	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3369			NO_TIMEOUT);
 3370	if (rc)
 3371		goto out;
 3372	ei = c->err_info;
 3373	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3374		hpsa_scsi_interpret_error(h, c);
 3375		rc = -1;
 3376	}
 3377out:
 3378	cmd_free(h, c);
 3379	return rc;
 3380}
 3381
 3382static int hpsa_bmic_id_controller(struct ctlr_info *h,
 3383	struct bmic_identify_controller *buf, size_t bufsize)
 3384{
 3385	int rc = IO_OK;
 3386	struct CommandList *c;
 3387	struct ErrorInfo *ei;
 3388
 3389	c = cmd_alloc(h);
 3390
 3391	rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
 3392		0, RAID_CTLR_LUNID, TYPE_CMD);
 3393	if (rc)
 3394		goto out;
 3395
 3396	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3397			NO_TIMEOUT);
 3398	if (rc)
 3399		goto out;
 3400	ei = c->err_info;
 3401	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3402		hpsa_scsi_interpret_error(h, c);
 3403		rc = -1;
 3404	}
 3405out:
 3406	cmd_free(h, c);
 3407	return rc;
 3408}
 3409
 3410static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
 3411		unsigned char scsi3addr[], u16 bmic_device_index,
 3412		struct bmic_identify_physical_device *buf, size_t bufsize)
 3413{
 3414	int rc = IO_OK;
 3415	struct CommandList *c;
 3416	struct ErrorInfo *ei;
 3417
 3418	c = cmd_alloc(h);
 3419	rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
 3420		0, RAID_CTLR_LUNID, TYPE_CMD);
 3421	if (rc)
 3422		goto out;
 3423
 3424	c->Request.CDB[2] = bmic_device_index & 0xff;
 3425	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 3426
 3427	hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3428						NO_TIMEOUT);
 3429	ei = c->err_info;
 3430	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3431		hpsa_scsi_interpret_error(h, c);
 3432		rc = -1;
 3433	}
 3434out:
 3435	cmd_free(h, c);
 3436
 3437	return rc;
 3438}
 3439
 3440/*
 3441 * get enclosure information
 3442 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
 3443 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
 3444 * Uses id_physical_device to determine the box_index.
 3445 */
 3446static void hpsa_get_enclosure_info(struct ctlr_info *h,
 3447			unsigned char *scsi3addr,
 3448			struct ReportExtendedLUNdata *rlep, int rle_index,
 3449			struct hpsa_scsi_dev_t *encl_dev)
 3450{
 3451	int rc = -1;
 3452	struct CommandList *c = NULL;
 3453	struct ErrorInfo *ei = NULL;
 3454	struct bmic_sense_storage_box_params *bssbp = NULL;
 3455	struct bmic_identify_physical_device *id_phys = NULL;
 3456	struct ext_report_lun_entry *rle;
 3457	u16 bmic_device_index = 0;
 3458
 3459	if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
 3460		return;
 3461
 3462	rle = &rlep->LUN[rle_index];
 3463
 3464	encl_dev->eli =
 3465		hpsa_get_enclosure_logical_identifier(h, scsi3addr);
 3466
 3467	bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
 3468
 3469	if (encl_dev->target == -1 || encl_dev->lun == -1) {
 3470		rc = IO_OK;
 3471		goto out;
 3472	}
 3473
 3474	if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
 3475		rc = IO_OK;
 3476		goto out;
 3477	}
 3478
 3479	bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
 3480	if (!bssbp)
 3481		goto out;
 3482
 3483	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
 3484	if (!id_phys)
 3485		goto out;
 3486
 3487	rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
 3488						id_phys, sizeof(*id_phys));
 3489	if (rc) {
 3490		dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
 3491			__func__, encl_dev->external, bmic_device_index);
 3492		goto out;
 3493	}
 3494
 3495	c = cmd_alloc(h);
 3496
 3497	rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
 3498			sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
 3499
 3500	if (rc)
 3501		goto out;
 3502
 3503	if (id_phys->phys_connector[1] == 'E')
 3504		c->Request.CDB[5] = id_phys->box_index;
 3505	else
 3506		c->Request.CDB[5] = 0;
 3507
 3508	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3509						NO_TIMEOUT);
 3510	if (rc)
 3511		goto out;
 3512
 3513	ei = c->err_info;
 3514	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3515		rc = -1;
 3516		goto out;
 3517	}
 3518
 3519	encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
 3520	memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
 3521		bssbp->phys_connector, sizeof(bssbp->phys_connector));
 3522
 3523	rc = IO_OK;
 3524out:
 3525	kfree(bssbp);
 3526	kfree(id_phys);
 3527
 3528	if (c)
 3529		cmd_free(h, c);
 3530
 3531	if (rc != IO_OK)
 3532		hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
 3533			"Error, could not get enclosure information");
 3534}
 3535
 3536static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
 3537						unsigned char *scsi3addr)
 3538{
 3539	struct ReportExtendedLUNdata *physdev;
 3540	u32 nphysicals;
 3541	u64 sa = 0;
 3542	int i;
 3543
 3544	physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
 3545	if (!physdev)
 3546		return 0;
 3547
 3548	if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
 3549		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
 3550		kfree(physdev);
 3551		return 0;
 3552	}
 3553	nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
 3554
 3555	for (i = 0; i < nphysicals; i++)
 3556		if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
 3557			sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
 3558			break;
 3559		}
 3560
 3561	kfree(physdev);
 3562
 3563	return sa;
 3564}
 3565
 3566static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
 3567					struct hpsa_scsi_dev_t *dev)
 3568{
 3569	int rc;
 3570	u64 sa = 0;
 3571
 3572	if (is_hba_lunid(scsi3addr)) {
 3573		struct bmic_sense_subsystem_info *ssi;
 3574
 3575		ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
 3576		if (!ssi)
 3577			return;
 3578
 3579		rc = hpsa_bmic_sense_subsystem_information(h,
 3580					scsi3addr, 0, ssi, sizeof(*ssi));
 3581		if (rc == 0) {
 3582			sa = get_unaligned_be64(ssi->primary_world_wide_id);
 3583			h->sas_address = sa;
 3584		}
 3585
 3586		kfree(ssi);
 3587	} else
 3588		sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
 3589
 3590	dev->sas_address = sa;
 3591}
 3592
 3593static void hpsa_ext_ctrl_present(struct ctlr_info *h,
 3594	struct ReportExtendedLUNdata *physdev)
 3595{
 3596	u32 nphysicals;
 3597	int i;
 3598
 3599	if (h->discovery_polling)
 3600		return;
 3601
 3602	nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
 3603
 3604	for (i = 0; i < nphysicals; i++) {
 3605		if (physdev->LUN[i].device_type ==
 3606			BMIC_DEVICE_TYPE_CONTROLLER
 3607			&& !is_hba_lunid(physdev->LUN[i].lunid)) {
 3608			dev_info(&h->pdev->dev,
 3609				"External controller present, activate discovery polling and disable rld caching\n");
 3610			hpsa_disable_rld_caching(h);
 3611			h->discovery_polling = 1;
 3612			break;
 3613		}
 3614	}
 3615}
 3616
 3617/* Get a device id from inquiry page 0x83 */
 3618static bool hpsa_vpd_page_supported(struct ctlr_info *h,
 3619	unsigned char scsi3addr[], u8 page)
 3620{
 3621	int rc;
 3622	int i;
 3623	int pages;
 3624	unsigned char *buf, bufsize;
 3625
 3626	buf = kzalloc(256, GFP_KERNEL);
 3627	if (!buf)
 3628		return false;
 3629
 3630	/* Get the size of the page list first */
 3631	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
 3632				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
 3633				buf, HPSA_VPD_HEADER_SZ);
 3634	if (rc != 0)
 3635		goto exit_unsupported;
 3636	pages = buf[3];
 3637	if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
 3638		bufsize = pages + HPSA_VPD_HEADER_SZ;
 3639	else
 3640		bufsize = 255;
 3641
 3642	/* Get the whole VPD page list */
 3643	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
 3644				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
 3645				buf, bufsize);
 3646	if (rc != 0)
 3647		goto exit_unsupported;
 3648
 3649	pages = buf[3];
 3650	for (i = 1; i <= pages; i++)
 3651		if (buf[3 + i] == page)
 3652			goto exit_supported;
 3653exit_unsupported:
 3654	kfree(buf);
 3655	return false;
 3656exit_supported:
 3657	kfree(buf);
 3658	return true;
 3659}
 3660
 3661/*
 3662 * Called during a scan operation.
 3663 * Sets ioaccel status on the new device list, not the existing device list
 3664 *
 3665 * The device list used during I/O will be updated later in
 3666 * adjust_hpsa_scsi_table.
 3667 */
 3668static void hpsa_get_ioaccel_status(struct ctlr_info *h,
 3669	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
 3670{
 3671	int rc;
 3672	unsigned char *buf;
 3673	u8 ioaccel_status;
 3674
 3675	this_device->offload_config = 0;
 3676	this_device->offload_enabled = 0;
 3677	this_device->offload_to_be_enabled = 0;
 3678
 3679	buf = kzalloc(64, GFP_KERNEL);
 3680	if (!buf)
 3681		return;
 3682	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
 3683		goto out;
 3684	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
 3685			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
 3686	if (rc != 0)
 3687		goto out;
 3688
 3689#define IOACCEL_STATUS_BYTE 4
 3690#define OFFLOAD_CONFIGURED_BIT 0x01
 3691#define OFFLOAD_ENABLED_BIT 0x02
 3692	ioaccel_status = buf[IOACCEL_STATUS_BYTE];
 3693	this_device->offload_config =
 3694		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
 3695	if (this_device->offload_config) {
 3696		bool offload_enabled =
 3697			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
 3698		/*
 3699		 * Check to see if offload can be enabled.
 3700		 */
 3701		if (offload_enabled) {
 3702			rc = hpsa_get_raid_map(h, scsi3addr, this_device);
 3703			if (rc) /* could not load raid_map */
 3704				goto out;
 3705			this_device->offload_to_be_enabled = 1;
 3706		}
 3707	}
 3708
 3709out:
 3710	kfree(buf);
 3711	return;
 3712}
 3713
 3714/* Get the device id from inquiry page 0x83 */
 3715static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
 3716	unsigned char *device_id, int index, int buflen)
 3717{
 3718	int rc;
 3719	unsigned char *buf;
 3720
 3721	/* Does controller have VPD for device id? */
 3722	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
 3723		return 1; /* not supported */
 3724
 3725	buf = kzalloc(64, GFP_KERNEL);
 3726	if (!buf)
 3727		return -ENOMEM;
 3728
 3729	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
 3730					HPSA_VPD_LV_DEVICE_ID, buf, 64);
 3731	if (rc == 0) {
 3732		if (buflen > 16)
 3733			buflen = 16;
 3734		memcpy(device_id, &buf[8], buflen);
 3735	}
 3736
 3737	kfree(buf);
 3738
 3739	return rc; /*0 - got id,  otherwise, didn't */
 3740}
 3741
 3742static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 3743		void *buf, int bufsize,
 3744		int extended_response)
 3745{
 3746	int rc = IO_OK;
 3747	struct CommandList *c;
 3748	unsigned char scsi3addr[8];
 3749	struct ErrorInfo *ei;
 3750
 3751	c = cmd_alloc(h);
 3752
 
 
 
 3753	/* address the controller */
 3754	memset(scsi3addr, 0, sizeof(scsi3addr));
 3755	if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
 3756		buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
 3757		rc = -EAGAIN;
 3758		goto out;
 3759	}
 3760	if (extended_response)
 3761		c->Request.CDB[1] = extended_response;
 3762	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 3763			NO_TIMEOUT);
 3764	if (rc)
 3765		goto out;
 3766	ei = c->err_info;
 3767	if (ei->CommandStatus != 0 &&
 3768	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
 3769		hpsa_scsi_interpret_error(h, c);
 3770		rc = -EIO;
 3771	} else {
 3772		struct ReportLUNdata *rld = buf;
 3773
 3774		if (rld->extended_response_flag != extended_response) {
 3775			if (!h->legacy_board) {
 3776				dev_err(&h->pdev->dev,
 3777					"report luns requested format %u, got %u\n",
 3778					extended_response,
 3779					rld->extended_response_flag);
 3780				rc = -EINVAL;
 3781			} else
 3782				rc = -EOPNOTSUPP;
 3783		}
 3784	}
 3785out:
 3786	cmd_free(h, c);
 3787	return rc;
 3788}
 3789
 3790static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 3791		struct ReportExtendedLUNdata *buf, int bufsize)
 
 3792{
 3793	int rc;
 3794	struct ReportLUNdata *lbuf;
 3795
 3796	rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
 3797				      HPSA_REPORT_PHYS_EXTENDED);
 3798	if (!rc || rc != -EOPNOTSUPP)
 3799		return rc;
 3800
 3801	/* REPORT PHYS EXTENDED is not supported */
 3802	lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
 3803	if (!lbuf)
 3804		return -ENOMEM;
 3805
 3806	rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
 3807	if (!rc) {
 3808		int i;
 3809		u32 nphys;
 3810
 3811		/* Copy ReportLUNdata header */
 3812		memcpy(buf, lbuf, 8);
 3813		nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
 3814		for (i = 0; i < nphys; i++)
 3815			memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
 3816	}
 3817	kfree(lbuf);
 3818	return rc;
 3819}
 3820
 3821static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
 3822		struct ReportLUNdata *buf, int bufsize)
 3823{
 3824	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
 3825}
 3826
 3827static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
 3828	int bus, int target, int lun)
 3829{
 3830	device->bus = bus;
 3831	device->target = target;
 3832	device->lun = lun;
 3833}
 3834
 3835/* Use VPD inquiry to get details of volume status */
 3836static int hpsa_get_volume_status(struct ctlr_info *h,
 3837					unsigned char scsi3addr[])
 3838{
 3839	int rc;
 3840	int status;
 3841	int size;
 3842	unsigned char *buf;
 3843
 3844	buf = kzalloc(64, GFP_KERNEL);
 3845	if (!buf)
 3846		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
 3847
 3848	/* Does controller have VPD for logical volume status? */
 3849	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
 3850		goto exit_failed;
 3851
 3852	/* Get the size of the VPD return buffer */
 3853	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
 3854					buf, HPSA_VPD_HEADER_SZ);
 3855	if (rc != 0)
 3856		goto exit_failed;
 3857	size = buf[3];
 3858
 3859	/* Now get the whole VPD buffer */
 3860	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
 3861					buf, size + HPSA_VPD_HEADER_SZ);
 3862	if (rc != 0)
 3863		goto exit_failed;
 3864	status = buf[4]; /* status byte */
 3865
 3866	kfree(buf);
 3867	return status;
 3868exit_failed:
 3869	kfree(buf);
 3870	return HPSA_VPD_LV_STATUS_UNSUPPORTED;
 3871}
 3872
 3873/* Determine offline status of a volume.
 3874 * Return either:
 3875 *  0 (not offline)
 3876 *  0xff (offline for unknown reasons)
 3877 *  # (integer code indicating one of several NOT READY states
 3878 *     describing why a volume is to be kept offline)
 3879 */
 3880static unsigned char hpsa_volume_offline(struct ctlr_info *h,
 3881					unsigned char scsi3addr[])
 3882{
 3883	struct CommandList *c;
 3884	unsigned char *sense;
 3885	u8 sense_key, asc, ascq;
 3886	int sense_len;
 3887	int rc, ldstat = 0;
 3888#define ASC_LUN_NOT_READY 0x04
 3889#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
 3890#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
 3891
 3892	c = cmd_alloc(h);
 3893
 3894	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
 3895	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
 3896					NO_TIMEOUT);
 3897	if (rc) {
 3898		cmd_free(h, c);
 3899		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
 3900	}
 3901	sense = c->err_info->SenseInfo;
 3902	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
 3903		sense_len = sizeof(c->err_info->SenseInfo);
 3904	else
 3905		sense_len = c->err_info->SenseLen;
 3906	decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
 3907	cmd_free(h, c);
 3908
 3909	/* Determine the reason for not ready state */
 3910	ldstat = hpsa_get_volume_status(h, scsi3addr);
 3911
 3912	/* Keep volume offline in certain cases: */
 3913	switch (ldstat) {
 3914	case HPSA_LV_FAILED:
 3915	case HPSA_LV_UNDERGOING_ERASE:
 3916	case HPSA_LV_NOT_AVAILABLE:
 3917	case HPSA_LV_UNDERGOING_RPI:
 3918	case HPSA_LV_PENDING_RPI:
 3919	case HPSA_LV_ENCRYPTED_NO_KEY:
 3920	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
 3921	case HPSA_LV_UNDERGOING_ENCRYPTION:
 3922	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
 3923	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
 3924		return ldstat;
 3925	case HPSA_VPD_LV_STATUS_UNSUPPORTED:
 3926		/* If VPD status page isn't available,
 3927		 * use ASC/ASCQ to determine state
 3928		 */
 3929		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
 3930			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
 3931			return ldstat;
 3932		break;
 3933	default:
 3934		break;
 3935	}
 3936	return HPSA_LV_OK;
 3937}
 3938
 3939static int hpsa_update_device_info(struct ctlr_info *h,
 3940	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
 3941	unsigned char *is_OBDR_device)
 3942{
 3943
 3944#define OBDR_SIG_OFFSET 43
 3945#define OBDR_TAPE_SIG "$DR-10"
 3946#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
 3947#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
 3948
 3949	unsigned char *inq_buff;
 3950	unsigned char *obdr_sig;
 3951	int rc = 0;
 3952
 3953	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
 3954	if (!inq_buff) {
 3955		rc = -ENOMEM;
 3956		goto bail_out;
 3957	}
 3958
 3959	/* Do an inquiry to the device to see what it is. */
 3960	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
 3961		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
 
 3962		dev_err(&h->pdev->dev,
 3963			"%s: inquiry failed, device will be skipped.\n",
 3964			__func__);
 3965		rc = HPSA_INQUIRY_FAILED;
 3966		goto bail_out;
 3967	}
 3968
 3969	scsi_sanitize_inquiry_string(&inq_buff[8], 8);
 3970	scsi_sanitize_inquiry_string(&inq_buff[16], 16);
 3971
 3972	this_device->devtype = (inq_buff[0] & 0x1f);
 3973	memcpy(this_device->scsi3addr, scsi3addr, 8);
 3974	memcpy(this_device->vendor, &inq_buff[8],
 3975		sizeof(this_device->vendor));
 3976	memcpy(this_device->model, &inq_buff[16],
 3977		sizeof(this_device->model));
 3978	this_device->rev = inq_buff[2];
 3979	memset(this_device->device_id, 0,
 3980		sizeof(this_device->device_id));
 3981	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
 3982		sizeof(this_device->device_id)) < 0) {
 3983		dev_err(&h->pdev->dev,
 3984			"hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
 3985			h->ctlr, __func__,
 3986			h->scsi_host->host_no,
 3987			this_device->bus, this_device->target,
 3988			this_device->lun,
 3989			scsi_device_type(this_device->devtype),
 3990			this_device->model);
 3991		rc = HPSA_LV_FAILED;
 3992		goto bail_out;
 3993	}
 3994
 3995	if ((this_device->devtype == TYPE_DISK ||
 3996		this_device->devtype == TYPE_ZBC) &&
 3997		is_logical_dev_addr_mode(scsi3addr)) {
 3998		unsigned char volume_offline;
 3999
 
 
 4000		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
 4001		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
 4002			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
 4003		volume_offline = hpsa_volume_offline(h, scsi3addr);
 4004		if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
 4005		    h->legacy_board) {
 4006			/*
 4007			 * Legacy boards might not support volume status
 4008			 */
 4009			dev_info(&h->pdev->dev,
 4010				 "C0:T%d:L%d Volume status not available, assuming online.\n",
 4011				 this_device->target, this_device->lun);
 4012			volume_offline = 0;
 4013		}
 4014		this_device->volume_offline = volume_offline;
 4015		if (volume_offline == HPSA_LV_FAILED) {
 4016			rc = HPSA_LV_FAILED;
 4017			dev_err(&h->pdev->dev,
 4018				"%s: LV failed, device will be skipped.\n",
 4019				__func__);
 4020			goto bail_out;
 4021		}
 4022	} else {
 4023		this_device->raid_level = RAID_UNKNOWN;
 4024		this_device->offload_config = 0;
 4025		hpsa_turn_off_ioaccel_for_device(this_device);
 4026		this_device->hba_ioaccel_enabled = 0;
 4027		this_device->volume_offline = 0;
 4028		this_device->queue_depth = h->nr_cmds;
 4029	}
 4030
 4031	if (this_device->external)
 4032		this_device->queue_depth = EXTERNAL_QD;
 4033
 4034	if (is_OBDR_device) {
 4035		/* See if this is a One-Button-Disaster-Recovery device
 4036		 * by looking for "$DR-10" at offset 43 in inquiry data.
 4037		 */
 4038		obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
 4039		*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
 4040					strncmp(obdr_sig, OBDR_TAPE_SIG,
 4041						OBDR_SIG_LEN) == 0);
 4042	}
 
 4043	kfree(inq_buff);
 4044	return 0;
 4045
 4046bail_out:
 4047	kfree(inq_buff);
 4048	return rc;
 4049}
 4050
 4051/*
 4052 * Helper function to assign bus, target, lun mapping of devices.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 4053 * Logical drive target and lun are assigned at this time, but
 4054 * physical device lun and target assignment are deferred (assigned
 4055 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
 4056*/
 4057static void figure_bus_target_lun(struct ctlr_info *h,
 4058	u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
 
 4059{
 4060	u32 lunid = get_unaligned_le32(lunaddrbytes);
 4061
 4062	if (!is_logical_dev_addr_mode(lunaddrbytes)) {
 4063		/* physical device, target and lun filled in later */
 4064		if (is_hba_lunid(lunaddrbytes)) {
 4065			int bus = HPSA_HBA_BUS;
 4066
 4067			if (!device->rev)
 4068				bus = HPSA_LEGACY_HBA_BUS;
 4069			hpsa_set_bus_target_lun(device,
 4070					bus, 0, lunid & 0x3fff);
 4071		} else
 4072			/* defer target, lun assignment for physical devices */
 4073			hpsa_set_bus_target_lun(device,
 4074					HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
 4075		return;
 4076	}
 4077	/* It's a logical device */
 4078	if (device->external) {
 4079		hpsa_set_bus_target_lun(device,
 4080			HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
 4081			lunid & 0x00ff);
 4082		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 4083	}
 4084	hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
 4085				0, lunid & 0x3fff);
 4086}
 4087
 4088static int  figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
 4089	int i, int nphysicals, int nlocal_logicals)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 4090{
 4091	/* In report logicals, local logicals are listed first,
 4092	* then any externals.
 4093	*/
 4094	int logicals_start = nphysicals + (raid_ctlr_position == 0);
 4095
 4096	if (i == raid_ctlr_position)
 4097		return 0;
 4098
 4099	if (i < logicals_start)
 
 
 
 
 
 
 4100		return 0;
 4101
 4102	/* i is in logicals range, but still within local logicals */
 4103	if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
 
 
 
 
 
 
 
 
 
 
 
 4104		return 0;
 
 4105
 4106	return 1; /* it's an external lun */
 
 
 
 
 
 4107}
 4108
 4109/*
 4110 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
 4111 * logdev.  The number of luns in physdev and logdev are returned in
 4112 * *nphysicals and *nlogicals, respectively.
 4113 * Returns 0 on success, -1 otherwise.
 4114 */
 4115static int hpsa_gather_lun_info(struct ctlr_info *h,
 4116	struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
 
 4117	struct ReportLUNdata *logdev, u32 *nlogicals)
 4118{
 4119	if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
 4120		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
 4121		return -1;
 4122	}
 4123	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
 4124	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
 4125		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
 4126			HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
 
 4127		*nphysicals = HPSA_MAX_PHYS_LUN;
 4128	}
 4129	if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
 4130		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
 4131		return -1;
 4132	}
 4133	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
 4134	/* Reject Logicals in excess of our max capability. */
 4135	if (*nlogicals > HPSA_MAX_LUN) {
 4136		dev_warn(&h->pdev->dev,
 4137			"maximum logical LUNs (%d) exceeded.  "
 4138			"%d LUNs ignored.\n", HPSA_MAX_LUN,
 4139			*nlogicals - HPSA_MAX_LUN);
 4140		*nlogicals = HPSA_MAX_LUN;
 4141	}
 4142	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
 4143		dev_warn(&h->pdev->dev,
 4144			"maximum logical + physical LUNs (%d) exceeded. "
 4145			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
 4146			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
 4147		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
 4148	}
 4149	return 0;
 4150}
 4151
 4152static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
 4153	int i, int nphysicals, int nlogicals,
 4154	struct ReportExtendedLUNdata *physdev_list,
 4155	struct ReportLUNdata *logdev_list)
 4156{
 4157	/* Helper function, figure out where the LUN ID info is coming from
 4158	 * given index i, lists of physical and logical devices, where in
 4159	 * the list the raid controller is supposed to appear (first or last)
 4160	 */
 4161
 4162	int logicals_start = nphysicals + (raid_ctlr_position == 0);
 4163	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
 4164
 4165	if (i == raid_ctlr_position)
 4166		return RAID_CTLR_LUNID;
 4167
 4168	if (i < logicals_start)
 4169		return &physdev_list->LUN[i -
 4170				(raid_ctlr_position == 0)].lunid[0];
 4171
 4172	if (i < last_device)
 4173		return &logdev_list->LUN[i - nphysicals -
 4174			(raid_ctlr_position == 0)][0];
 4175	BUG();
 4176	return NULL;
 4177}
 4178
 4179/* get physical drive ioaccel handle and queue depth */
 4180static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
 4181		struct hpsa_scsi_dev_t *dev,
 4182		struct ReportExtendedLUNdata *rlep, int rle_index,
 4183		struct bmic_identify_physical_device *id_phys)
 4184{
 4185	int rc;
 4186	struct ext_report_lun_entry *rle;
 4187
 4188	if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
 4189		return;
 4190
 4191	rle = &rlep->LUN[rle_index];
 4192
 4193	dev->ioaccel_handle = rle->ioaccel_handle;
 4194	if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
 4195		dev->hba_ioaccel_enabled = 1;
 4196	memset(id_phys, 0, sizeof(*id_phys));
 4197	rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
 4198			GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
 4199			sizeof(*id_phys));
 4200	if (!rc)
 4201		/* Reserve space for FW operations */
 4202#define DRIVE_CMDS_RESERVED_FOR_FW 2
 4203#define DRIVE_QUEUE_DEPTH 7
 4204		dev->queue_depth =
 4205			le16_to_cpu(id_phys->current_queue_depth_limit) -
 4206				DRIVE_CMDS_RESERVED_FOR_FW;
 4207	else
 4208		dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
 4209}
 4210
 4211static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
 4212	struct ReportExtendedLUNdata *rlep, int rle_index,
 4213	struct bmic_identify_physical_device *id_phys)
 4214{
 4215	struct ext_report_lun_entry *rle;
 4216
 4217	if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
 4218		return;
 4219
 4220	rle = &rlep->LUN[rle_index];
 4221
 4222	if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
 4223		this_device->hba_ioaccel_enabled = 1;
 4224
 4225	memcpy(&this_device->active_path_index,
 4226		&id_phys->active_path_number,
 4227		sizeof(this_device->active_path_index));
 4228	memcpy(&this_device->path_map,
 4229		&id_phys->redundant_path_present_map,
 4230		sizeof(this_device->path_map));
 4231	memcpy(&this_device->box,
 4232		&id_phys->alternate_paths_phys_box_on_port,
 4233		sizeof(this_device->box));
 4234	memcpy(&this_device->phys_connector,
 4235		&id_phys->alternate_paths_phys_connector,
 4236		sizeof(this_device->phys_connector));
 4237	memcpy(&this_device->bay,
 4238		&id_phys->phys_bay_in_box,
 4239		sizeof(this_device->bay));
 4240}
 4241
 4242/* get number of local logical disks. */
 4243static int hpsa_set_local_logical_count(struct ctlr_info *h,
 4244	struct bmic_identify_controller *id_ctlr,
 4245	u32 *nlocals)
 4246{
 4247	int rc;
 4248
 4249	if (!id_ctlr) {
 4250		dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
 4251			__func__);
 4252		return -ENOMEM;
 4253	}
 4254	memset(id_ctlr, 0, sizeof(*id_ctlr));
 4255	rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
 4256	if (!rc)
 4257		if (id_ctlr->configured_logical_drive_count < 255)
 4258			*nlocals = id_ctlr->configured_logical_drive_count;
 4259		else
 4260			*nlocals = le16_to_cpu(
 4261					id_ctlr->extended_logical_unit_count);
 4262	else
 4263		*nlocals = -1;
 4264	return rc;
 4265}
 4266
 4267static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
 4268{
 4269	struct bmic_identify_physical_device *id_phys;
 4270	bool is_spare = false;
 4271	int rc;
 4272
 4273	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
 4274	if (!id_phys)
 4275		return false;
 4276
 4277	rc = hpsa_bmic_id_physical_device(h,
 4278					lunaddrbytes,
 4279					GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
 4280					id_phys, sizeof(*id_phys));
 4281	if (rc == 0)
 4282		is_spare = (id_phys->more_flags >> 6) & 0x01;
 4283
 4284	kfree(id_phys);
 4285	return is_spare;
 4286}
 4287
 4288#define RPL_DEV_FLAG_NON_DISK                           0x1
 4289#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED  0x2
 4290#define RPL_DEV_FLAG_UNCONFIG_DISK                      0x4
 4291
 4292#define BMIC_DEVICE_TYPE_ENCLOSURE  6
 4293
 4294static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
 4295				struct ext_report_lun_entry *rle)
 4296{
 4297	u8 device_flags;
 4298	u8 device_type;
 4299
 4300	if (!MASKED_DEVICE(lunaddrbytes))
 4301		return false;
 4302
 4303	device_flags = rle->device_flags;
 4304	device_type = rle->device_type;
 4305
 4306	if (device_flags & RPL_DEV_FLAG_NON_DISK) {
 4307		if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
 4308			return false;
 4309		return true;
 4310	}
 4311
 4312	if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
 4313		return false;
 4314
 4315	if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
 4316		return false;
 4317
 4318	/*
 4319	 * Spares may be spun down, we do not want to
 4320	 * do an Inquiry to a RAID set spare drive as
 4321	 * that would have them spun up, that is a
 4322	 * performance hit because I/O to the RAID device
 4323	 * stops while the spin up occurs which can take
 4324	 * over 50 seconds.
 4325	 */
 4326	if (hpsa_is_disk_spare(h, lunaddrbytes))
 4327		return true;
 4328
 4329	return false;
 4330}
 4331
 4332static void hpsa_update_scsi_devices(struct ctlr_info *h)
 4333{
 4334	/* the idea here is we could get notified
 4335	 * that some devices have changed, so we do a report
 4336	 * physical luns and report logical luns cmd, and adjust
 4337	 * our list of devices accordingly.
 4338	 *
 4339	 * The scsi3addr's of devices won't change so long as the
 4340	 * adapter is not reset.  That means we can rescan and
 4341	 * tell which devices we already know about, vs. new
 4342	 * devices, vs.  disappearing devices.
 4343	 */
 4344	struct ReportExtendedLUNdata *physdev_list = NULL;
 4345	struct ReportLUNdata *logdev_list = NULL;
 4346	struct bmic_identify_physical_device *id_phys = NULL;
 4347	struct bmic_identify_controller *id_ctlr = NULL;
 4348	u32 nphysicals = 0;
 4349	u32 nlogicals = 0;
 4350	u32 nlocal_logicals = 0;
 4351	u32 ndev_allocated = 0;
 4352	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
 4353	int ncurrent = 0;
 4354	int i, ndevs_to_allocate;
 
 
 4355	int raid_ctlr_position;
 4356	bool physical_device;
 4357
 4358	currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
 4359	physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
 4360	logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
 
 4361	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
 4362	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
 4363	id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
 4364
 4365	if (!currentsd || !physdev_list || !logdev_list ||
 4366		!tmpdevice || !id_phys || !id_ctlr) {
 4367		dev_err(&h->pdev->dev, "out of memory\n");
 4368		goto out;
 4369	}
 
 4370
 4371	h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
 4372
 4373	if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
 4374			logdev_list, &nlogicals)) {
 4375		h->drv_req_rescan = 1;
 4376		goto out;
 4377	}
 4378
 4379	/* Set number of local logicals (non PTRAID) */
 4380	if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
 4381		dev_warn(&h->pdev->dev,
 4382			"%s: Can't determine number of local logical devices.\n",
 4383			__func__);
 4384	}
 4385
 4386	/* We might see up to the maximum number of logical and physical disks
 4387	 * plus external target devices, and a device for the local RAID
 4388	 * controller.
 4389	 */
 4390	ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
 4391
 4392	hpsa_ext_ctrl_present(h, physdev_list);
 4393
 4394	/* Allocate the per device structures */
 4395	for (i = 0; i < ndevs_to_allocate; i++) {
 4396		if (i >= HPSA_MAX_DEVICES) {
 4397			dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
 4398				"  %d devices ignored.\n", HPSA_MAX_DEVICES,
 4399				ndevs_to_allocate - HPSA_MAX_DEVICES);
 4400			break;
 4401		}
 4402
 4403		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
 4404		if (!currentsd[i]) {
 4405			h->drv_req_rescan = 1;
 
 4406			goto out;
 4407		}
 4408		ndev_allocated++;
 4409	}
 4410
 4411	if (is_scsi_rev_5(h))
 4412		raid_ctlr_position = 0;
 4413	else
 4414		raid_ctlr_position = nphysicals + nlogicals;
 4415
 4416	/* adjust our table of devices */
 
 4417	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
 4418		u8 *lunaddrbytes, is_OBDR = 0;
 4419		int rc = 0;
 4420		int phys_dev_index = i - (raid_ctlr_position == 0);
 4421		bool skip_device = false;
 4422
 4423		memset(tmpdevice, 0, sizeof(*tmpdevice));
 4424
 4425		physical_device = i < nphysicals + (raid_ctlr_position == 0);
 4426
 4427		/* Figure out where the LUN ID info is coming from */
 4428		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
 4429			i, nphysicals, nlogicals, physdev_list, logdev_list);
 4430
 4431		/* Determine if this is a lun from an external target array */
 4432		tmpdevice->external =
 4433			figure_external_status(h, raid_ctlr_position, i,
 4434						nphysicals, nlocal_logicals);
 4435
 4436		/*
 4437		 * Skip over some devices such as a spare.
 4438		 */
 4439		if (phys_dev_index >= 0 && !tmpdevice->external &&
 4440			physical_device) {
 4441			skip_device = hpsa_skip_device(h, lunaddrbytes,
 4442					&physdev_list->LUN[phys_dev_index]);
 4443			if (skip_device)
 4444				continue;
 4445		}
 4446
 4447		/* Get device type, vendor, model, device id, raid_map */
 4448		rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
 4449							&is_OBDR);
 4450		if (rc == -ENOMEM) {
 4451			dev_warn(&h->pdev->dev,
 4452				"Out of memory, rescan deferred.\n");
 4453			h->drv_req_rescan = 1;
 4454			goto out;
 4455		}
 4456		if (rc) {
 4457			h->drv_req_rescan = 1;
 4458			continue;
 4459		}
 4460
 4461		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
 
 
 
 
 
 4462		this_device = currentsd[ncurrent];
 4463
 4464		*this_device = *tmpdevice;
 4465		this_device->physical_device = physical_device;
 4466
 4467		/*
 4468		 * Expose all devices except for physical devices that
 4469		 * are masked.
 4470		 */
 4471		if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
 4472			this_device->expose_device = 0;
 4473		else
 4474			this_device->expose_device = 1;
 4475
 
 
 
 
 4476
 4477		/*
 4478		 * Get the SAS address for physical devices that are exposed.
 4479		 */
 4480		if (this_device->physical_device && this_device->expose_device)
 4481			hpsa_get_sas_address(h, lunaddrbytes, this_device);
 4482
 4483		switch (this_device->devtype) {
 4484		case TYPE_ROM:
 4485			/* We don't *really* support actual CD-ROM devices,
 4486			 * just "One Button Disaster Recovery" tape drive
 4487			 * which temporarily pretends to be a CD-ROM drive.
 4488			 * So we check that the device is really an OBDR tape
 4489			 * device by checking for "$DR-10" in bytes 43-48 of
 4490			 * the inquiry data.
 4491			 */
 4492			if (is_OBDR)
 4493				ncurrent++;
 4494			break;
 4495		case TYPE_DISK:
 4496		case TYPE_ZBC:
 4497			if (this_device->physical_device) {
 4498				/* The disk is in HBA mode. */
 4499				/* Never use RAID mapper in HBA mode. */
 4500				this_device->offload_enabled = 0;
 4501				hpsa_get_ioaccel_drive_info(h, this_device,
 4502					physdev_list, phys_dev_index, id_phys);
 4503				hpsa_get_path_info(this_device,
 4504					physdev_list, phys_dev_index, id_phys);
 4505			}
 4506			ncurrent++;
 4507			break;
 4508		case TYPE_TAPE:
 4509		case TYPE_MEDIUM_CHANGER:
 4510			ncurrent++;
 4511			break;
 4512		case TYPE_ENCLOSURE:
 4513			if (!this_device->external)
 4514				hpsa_get_enclosure_info(h, lunaddrbytes,
 4515						physdev_list, phys_dev_index,
 4516						this_device);
 4517			ncurrent++;
 4518			break;
 4519		case TYPE_RAID:
 4520			/* Only present the Smartarray HBA as a RAID controller.
 4521			 * If it's a RAID controller other than the HBA itself
 4522			 * (an external RAID controller, MSA500 or similar)
 4523			 * don't present it.
 4524			 */
 4525			if (!is_hba_lunid(lunaddrbytes))
 4526				break;
 4527			ncurrent++;
 4528			break;
 4529		default:
 4530			break;
 4531		}
 4532		if (ncurrent >= HPSA_MAX_DEVICES)
 4533			break;
 4534	}
 4535
 4536	if (h->sas_host == NULL) {
 4537		int rc = 0;
 4538
 4539		rc = hpsa_add_sas_host(h);
 4540		if (rc) {
 4541			dev_warn(&h->pdev->dev,
 4542				"Could not add sas host %d\n", rc);
 4543			goto out;
 4544		}
 4545	}
 4546
 4547	adjust_hpsa_scsi_table(h, currentsd, ncurrent);
 4548out:
 4549	kfree(tmpdevice);
 4550	for (i = 0; i < ndev_allocated; i++)
 4551		kfree(currentsd[i]);
 4552	kfree(currentsd);
 4553	kfree(physdev_list);
 4554	kfree(logdev_list);
 4555	kfree(id_ctlr);
 4556	kfree(id_phys);
 4557}
 4558
 4559static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
 4560				   struct scatterlist *sg)
 4561{
 4562	u64 addr64 = (u64) sg_dma_address(sg);
 4563	unsigned int len = sg_dma_len(sg);
 4564
 4565	desc->Addr = cpu_to_le64(addr64);
 4566	desc->Len = cpu_to_le32(len);
 4567	desc->Ext = 0;
 4568}
 4569
 4570/*
 4571 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
 4572 * dma mapping  and fills in the scatter gather entries of the
 4573 * hpsa command, cp.
 4574 */
 4575static int hpsa_scatter_gather(struct ctlr_info *h,
 4576		struct CommandList *cp,
 4577		struct scsi_cmnd *cmd)
 4578{
 
 4579	struct scatterlist *sg;
 4580	int use_sg, i, sg_limit, chained;
 
 4581	struct SGDescriptor *curr_sg;
 4582
 4583	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
 4584
 4585	use_sg = scsi_dma_map(cmd);
 4586	if (use_sg < 0)
 4587		return use_sg;
 4588
 4589	if (!use_sg)
 4590		goto sglist_finished;
 4591
 4592	/*
 4593	 * If the number of entries is greater than the max for a single list,
 4594	 * then we have a chained list; we will set up all but one entry in the
 4595	 * first list (the last entry is saved for link information);
 4596	 * otherwise, we don't have a chained list and we'll set up at each of
 4597	 * the entries in the one list.
 4598	 */
 4599	curr_sg = cp->SG;
 4600	chained = use_sg > h->max_cmd_sg_entries;
 4601	sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
 4602	scsi_for_each_sg(cmd, sg, sg_limit, i) {
 4603		hpsa_set_sg_descriptor(curr_sg, sg);
 
 
 
 
 
 
 
 
 
 
 
 4604		curr_sg++;
 4605	}
 4606
 4607	if (chained) {
 4608		/*
 4609		 * Continue with the chained list.  Set curr_sg to the chained
 4610		 * list.  Modify the limit to the total count less the entries
 4611		 * we've already set up.  Resume the scan at the list entry
 4612		 * where the previous loop left off.
 4613		 */
 4614		curr_sg = h->cmd_sg_list[cp->cmdindex];
 4615		sg_limit = use_sg - sg_limit;
 4616		for_each_sg(sg, sg, sg_limit, i) {
 4617			hpsa_set_sg_descriptor(curr_sg, sg);
 4618			curr_sg++;
 4619		}
 4620	}
 4621
 4622	/* Back the pointer up to the last entry and mark it as "last". */
 4623	(curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
 4624
 4625	if (use_sg + chained > h->maxSG)
 4626		h->maxSG = use_sg + chained;
 4627
 4628	if (chained) {
 4629		cp->Header.SGList = h->max_cmd_sg_entries;
 4630		cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
 4631		if (hpsa_map_sg_chain_block(h, cp)) {
 4632			scsi_dma_unmap(cmd);
 4633			return -1;
 4634		}
 4635		return 0;
 4636	}
 4637
 4638sglist_finished:
 4639
 4640	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
 4641	cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
 4642	return 0;
 4643}
 4644
 4645static inline void warn_zero_length_transfer(struct ctlr_info *h,
 4646						u8 *cdb, int cdb_len,
 4647						const char *func)
 4648{
 4649	dev_warn(&h->pdev->dev,
 4650		 "%s: Blocking zero-length request: CDB:%*phN\n",
 4651		 func, cdb_len, cdb);
 4652}
 4653
 4654#define IO_ACCEL_INELIGIBLE 1
 4655/* zero-length transfers trigger hardware errors. */
 4656static bool is_zero_length_transfer(u8 *cdb)
 4657{
 4658	u32 block_cnt;
 4659
 4660	/* Block zero-length transfer sizes on certain commands. */
 4661	switch (cdb[0]) {
 4662	case READ_10:
 4663	case WRITE_10:
 4664	case VERIFY:		/* 0x2F */
 4665	case WRITE_VERIFY:	/* 0x2E */
 4666		block_cnt = get_unaligned_be16(&cdb[7]);
 4667		break;
 4668	case READ_12:
 4669	case WRITE_12:
 4670	case VERIFY_12: /* 0xAF */
 4671	case WRITE_VERIFY_12:	/* 0xAE */
 4672		block_cnt = get_unaligned_be32(&cdb[6]);
 4673		break;
 4674	case READ_16:
 4675	case WRITE_16:
 4676	case VERIFY_16:		/* 0x8F */
 4677		block_cnt = get_unaligned_be32(&cdb[10]);
 4678		break;
 4679	default:
 4680		return false;
 4681	}
 4682
 4683	return block_cnt == 0;
 4684}
 4685
 4686static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
 4687{
 4688	int is_write = 0;
 4689	u32 block;
 4690	u32 block_cnt;
 4691
 4692	/* Perform some CDB fixups if needed using 10 byte reads/writes only */
 4693	switch (cdb[0]) {
 4694	case WRITE_6:
 4695	case WRITE_12:
 4696		is_write = 1;
 4697		fallthrough;
 4698	case READ_6:
 4699	case READ_12:
 4700		if (*cdb_len == 6) {
 4701			block = (((cdb[1] & 0x1F) << 16) |
 4702				(cdb[2] << 8) |
 4703				cdb[3]);
 4704			block_cnt = cdb[4];
 4705			if (block_cnt == 0)
 4706				block_cnt = 256;
 4707		} else {
 4708			BUG_ON(*cdb_len != 12);
 4709			block = get_unaligned_be32(&cdb[2]);
 4710			block_cnt = get_unaligned_be32(&cdb[6]);
 4711		}
 4712		if (block_cnt > 0xffff)
 4713			return IO_ACCEL_INELIGIBLE;
 4714
 4715		cdb[0] = is_write ? WRITE_10 : READ_10;
 4716		cdb[1] = 0;
 4717		cdb[2] = (u8) (block >> 24);
 4718		cdb[3] = (u8) (block >> 16);
 4719		cdb[4] = (u8) (block >> 8);
 4720		cdb[5] = (u8) (block);
 4721		cdb[6] = 0;
 4722		cdb[7] = (u8) (block_cnt >> 8);
 4723		cdb[8] = (u8) (block_cnt);
 4724		cdb[9] = 0;
 4725		*cdb_len = 10;
 4726		break;
 4727	}
 4728	return 0;
 4729}
 4730
 4731static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
 4732	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
 4733	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 4734{
 4735	struct scsi_cmnd *cmd = c->scsi_cmd;
 4736	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
 4737	unsigned int len;
 4738	unsigned int total_len = 0;
 4739	struct scatterlist *sg;
 4740	u64 addr64;
 4741	int use_sg, i;
 4742	struct SGDescriptor *curr_sg;
 4743	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
 4744
 4745	/* TODO: implement chaining support */
 4746	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
 4747		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4748		return IO_ACCEL_INELIGIBLE;
 4749	}
 4750
 4751	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
 4752
 4753	if (is_zero_length_transfer(cdb)) {
 4754		warn_zero_length_transfer(h, cdb, cdb_len, __func__);
 4755		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4756		return IO_ACCEL_INELIGIBLE;
 4757	}
 4758
 4759	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
 4760		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4761		return IO_ACCEL_INELIGIBLE;
 4762	}
 4763
 4764	c->cmd_type = CMD_IOACCEL1;
 4765
 4766	/* Adjust the DMA address to point to the accelerated command buffer */
 4767	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
 4768				(c->cmdindex * sizeof(*cp));
 4769	BUG_ON(c->busaddr & 0x0000007F);
 4770
 4771	use_sg = scsi_dma_map(cmd);
 4772	if (use_sg < 0) {
 4773		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4774		return use_sg;
 4775	}
 4776
 4777	if (use_sg) {
 4778		curr_sg = cp->SG;
 4779		scsi_for_each_sg(cmd, sg, use_sg, i) {
 4780			addr64 = (u64) sg_dma_address(sg);
 4781			len  = sg_dma_len(sg);
 4782			total_len += len;
 4783			curr_sg->Addr = cpu_to_le64(addr64);
 4784			curr_sg->Len = cpu_to_le32(len);
 4785			curr_sg->Ext = cpu_to_le32(0);
 4786			curr_sg++;
 4787		}
 4788		(--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
 4789
 4790		switch (cmd->sc_data_direction) {
 4791		case DMA_TO_DEVICE:
 4792			control |= IOACCEL1_CONTROL_DATA_OUT;
 4793			break;
 4794		case DMA_FROM_DEVICE:
 4795			control |= IOACCEL1_CONTROL_DATA_IN;
 4796			break;
 4797		case DMA_NONE:
 4798			control |= IOACCEL1_CONTROL_NODATAXFER;
 4799			break;
 4800		default:
 4801			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
 4802			cmd->sc_data_direction);
 4803			BUG();
 4804			break;
 4805		}
 4806	} else {
 4807		control |= IOACCEL1_CONTROL_NODATAXFER;
 4808	}
 4809
 4810	c->Header.SGList = use_sg;
 4811	/* Fill out the command structure to submit */
 4812	cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
 4813	cp->transfer_len = cpu_to_le32(total_len);
 4814	cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
 4815			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
 4816	cp->control = cpu_to_le32(control);
 4817	memcpy(cp->CDB, cdb, cdb_len);
 4818	memcpy(cp->CISS_LUN, scsi3addr, 8);
 4819	/* Tag was already set at init time. */
 4820	enqueue_cmd_and_start_io(h, c);
 4821	return 0;
 4822}
 4823
 4824/*
 4825 * Queue a command directly to a device behind the controller using the
 4826 * I/O accelerator path.
 4827 */
 4828static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
 4829	struct CommandList *c)
 4830{
 4831	struct scsi_cmnd *cmd = c->scsi_cmd;
 4832	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 4833
 4834	if (!dev)
 4835		return -1;
 4836
 4837	c->phys_disk = dev;
 4838
 4839	if (dev->in_reset)
 4840		return -1;
 4841
 4842	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
 4843		cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
 4844}
 4845
 4846/*
 4847 * Set encryption parameters for the ioaccel2 request
 4848 */
 4849static void set_encrypt_ioaccel2(struct ctlr_info *h,
 4850	struct CommandList *c, struct io_accel2_cmd *cp)
 4851{
 4852	struct scsi_cmnd *cmd = c->scsi_cmd;
 4853	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 4854	struct raid_map_data *map = &dev->raid_map;
 4855	u64 first_block;
 4856
 4857	/* Are we doing encryption on this device */
 4858	if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
 4859		return;
 4860	/* Set the data encryption key index. */
 4861	cp->dekindex = map->dekindex;
 4862
 4863	/* Set the encryption enable flag, encoded into direction field. */
 4864	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
 4865
 4866	/* Set encryption tweak values based on logical block address
 4867	 * If block size is 512, tweak value is LBA.
 4868	 * For other block sizes, tweak is (LBA * block size)/ 512)
 4869	 */
 4870	switch (cmd->cmnd[0]) {
 4871	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
 4872	case READ_6:
 4873	case WRITE_6:
 4874		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
 4875				(cmd->cmnd[2] << 8) |
 4876				cmd->cmnd[3]);
 4877		break;
 4878	case WRITE_10:
 4879	case READ_10:
 4880	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
 4881	case WRITE_12:
 4882	case READ_12:
 4883		first_block = get_unaligned_be32(&cmd->cmnd[2]);
 4884		break;
 4885	case WRITE_16:
 4886	case READ_16:
 4887		first_block = get_unaligned_be64(&cmd->cmnd[2]);
 4888		break;
 4889	default:
 4890		dev_err(&h->pdev->dev,
 4891			"ERROR: %s: size (0x%x) not supported for encryption\n",
 4892			__func__, cmd->cmnd[0]);
 4893		BUG();
 4894		break;
 4895	}
 4896
 4897	if (le32_to_cpu(map->volume_blk_size) != 512)
 4898		first_block = first_block *
 4899				le32_to_cpu(map->volume_blk_size)/512;
 4900
 4901	cp->tweak_lower = cpu_to_le32(first_block);
 4902	cp->tweak_upper = cpu_to_le32(first_block >> 32);
 4903}
 4904
 4905static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
 4906	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
 4907	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 4908{
 4909	struct scsi_cmnd *cmd = c->scsi_cmd;
 4910	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
 4911	struct ioaccel2_sg_element *curr_sg;
 4912	int use_sg, i;
 4913	struct scatterlist *sg;
 4914	u64 addr64;
 4915	u32 len;
 4916	u32 total_len = 0;
 4917
 4918	if (!cmd->device)
 4919		return -1;
 4920
 4921	if (!cmd->device->hostdata)
 4922		return -1;
 4923
 4924	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
 4925
 4926	if (is_zero_length_transfer(cdb)) {
 4927		warn_zero_length_transfer(h, cdb, cdb_len, __func__);
 4928		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4929		return IO_ACCEL_INELIGIBLE;
 4930	}
 4931
 4932	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
 4933		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4934		return IO_ACCEL_INELIGIBLE;
 4935	}
 4936
 4937	c->cmd_type = CMD_IOACCEL2;
 4938	/* Adjust the DMA address to point to the accelerated command buffer */
 4939	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
 4940				(c->cmdindex * sizeof(*cp));
 4941	BUG_ON(c->busaddr & 0x0000007F);
 4942
 4943	memset(cp, 0, sizeof(*cp));
 4944	cp->IU_type = IOACCEL2_IU_TYPE;
 4945
 4946	use_sg = scsi_dma_map(cmd);
 4947	if (use_sg < 0) {
 4948		atomic_dec(&phys_disk->ioaccel_cmds_out);
 4949		return use_sg;
 4950	}
 4951
 4952	if (use_sg) {
 4953		curr_sg = cp->sg;
 4954		if (use_sg > h->ioaccel_maxsg) {
 4955			addr64 = le64_to_cpu(
 4956				h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
 4957			curr_sg->address = cpu_to_le64(addr64);
 4958			curr_sg->length = 0;
 4959			curr_sg->reserved[0] = 0;
 4960			curr_sg->reserved[1] = 0;
 4961			curr_sg->reserved[2] = 0;
 4962			curr_sg->chain_indicator = IOACCEL2_CHAIN;
 4963
 4964			curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
 4965		}
 4966		scsi_for_each_sg(cmd, sg, use_sg, i) {
 4967			addr64 = (u64) sg_dma_address(sg);
 4968			len  = sg_dma_len(sg);
 4969			total_len += len;
 4970			curr_sg->address = cpu_to_le64(addr64);
 4971			curr_sg->length = cpu_to_le32(len);
 4972			curr_sg->reserved[0] = 0;
 4973			curr_sg->reserved[1] = 0;
 4974			curr_sg->reserved[2] = 0;
 4975			curr_sg->chain_indicator = 0;
 4976			curr_sg++;
 4977		}
 4978
 4979		/*
 4980		 * Set the last s/g element bit
 4981		 */
 4982		(curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
 4983
 4984		switch (cmd->sc_data_direction) {
 4985		case DMA_TO_DEVICE:
 4986			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
 4987			cp->direction |= IOACCEL2_DIR_DATA_OUT;
 4988			break;
 4989		case DMA_FROM_DEVICE:
 4990			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
 4991			cp->direction |= IOACCEL2_DIR_DATA_IN;
 4992			break;
 4993		case DMA_NONE:
 4994			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
 4995			cp->direction |= IOACCEL2_DIR_NO_DATA;
 4996			break;
 4997		default:
 4998			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
 4999				cmd->sc_data_direction);
 5000			BUG();
 5001			break;
 5002		}
 5003	} else {
 5004		cp->direction &= ~IOACCEL2_DIRECTION_MASK;
 5005		cp->direction |= IOACCEL2_DIR_NO_DATA;
 5006	}
 5007
 5008	/* Set encryption parameters, if necessary */
 5009	set_encrypt_ioaccel2(h, c, cp);
 5010
 5011	cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
 5012	cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
 5013	memcpy(cp->cdb, cdb, sizeof(cp->cdb));
 5014
 5015	cp->data_len = cpu_to_le32(total_len);
 5016	cp->err_ptr = cpu_to_le64(c->busaddr +
 5017			offsetof(struct io_accel2_cmd, error_data));
 5018	cp->err_len = cpu_to_le32(sizeof(cp->error_data));
 5019
 5020	/* fill in sg elements */
 5021	if (use_sg > h->ioaccel_maxsg) {
 5022		cp->sg_count = 1;
 5023		cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
 5024		if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
 5025			atomic_dec(&phys_disk->ioaccel_cmds_out);
 5026			scsi_dma_unmap(cmd);
 5027			return -1;
 5028		}
 5029	} else
 5030		cp->sg_count = (u8) use_sg;
 5031
 5032	if (phys_disk->in_reset) {
 5033		cmd->result = DID_RESET << 16;
 5034		return -1;
 5035	}
 5036
 5037	enqueue_cmd_and_start_io(h, c);
 5038	return 0;
 5039}
 5040
 5041/*
 5042 * Queue a command to the correct I/O accelerator path.
 5043 */
 5044static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
 5045	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
 5046	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 5047{
 5048	if (!c->scsi_cmd->device)
 5049		return -1;
 5050
 5051	if (!c->scsi_cmd->device->hostdata)
 5052		return -1;
 5053
 5054	if (phys_disk->in_reset)
 5055		return -1;
 5056
 5057	/* Try to honor the device's queue depth */
 5058	if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
 5059					phys_disk->queue_depth) {
 5060		atomic_dec(&phys_disk->ioaccel_cmds_out);
 5061		return IO_ACCEL_INELIGIBLE;
 5062	}
 5063	if (h->transMethod & CFGTBL_Trans_io_accel1)
 5064		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
 5065						cdb, cdb_len, scsi3addr,
 5066						phys_disk);
 5067	else
 5068		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
 5069						cdb, cdb_len, scsi3addr,
 5070						phys_disk);
 5071}
 5072
 5073static void raid_map_helper(struct raid_map_data *map,
 5074		int offload_to_mirror, u32 *map_index, u32 *current_group)
 5075{
 5076	if (offload_to_mirror == 0)  {
 5077		/* use physical disk in the first mirrored group. */
 5078		*map_index %= le16_to_cpu(map->data_disks_per_row);
 5079		return;
 5080	}
 5081	do {
 5082		/* determine mirror group that *map_index indicates */
 5083		*current_group = *map_index /
 5084			le16_to_cpu(map->data_disks_per_row);
 5085		if (offload_to_mirror == *current_group)
 5086			continue;
 5087		if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
 5088			/* select map index from next group */
 5089			*map_index += le16_to_cpu(map->data_disks_per_row);
 5090			(*current_group)++;
 5091		} else {
 5092			/* select map index from first group */
 5093			*map_index %= le16_to_cpu(map->data_disks_per_row);
 5094			*current_group = 0;
 5095		}
 5096	} while (offload_to_mirror != *current_group);
 5097}
 5098
 5099/*
 5100 * Attempt to perform offload RAID mapping for a logical volume I/O.
 5101 */
 5102static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
 5103	struct CommandList *c)
 5104{
 5105	struct scsi_cmnd *cmd = c->scsi_cmd;
 5106	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 5107	struct raid_map_data *map = &dev->raid_map;
 5108	struct raid_map_disk_data *dd = &map->data[0];
 5109	int is_write = 0;
 5110	u32 map_index;
 5111	u64 first_block, last_block;
 5112	u32 block_cnt;
 5113	u32 blocks_per_row;
 5114	u64 first_row, last_row;
 5115	u32 first_row_offset, last_row_offset;
 5116	u32 first_column, last_column;
 5117	u64 r0_first_row, r0_last_row;
 5118	u32 r5or6_blocks_per_row;
 5119	u64 r5or6_first_row, r5or6_last_row;
 5120	u32 r5or6_first_row_offset, r5or6_last_row_offset;
 5121	u32 r5or6_first_column, r5or6_last_column;
 5122	u32 total_disks_per_row;
 5123	u32 stripesize;
 5124	u32 first_group, last_group, current_group;
 5125	u32 map_row;
 5126	u32 disk_handle;
 5127	u64 disk_block;
 5128	u32 disk_block_cnt;
 5129	u8 cdb[16];
 5130	u8 cdb_len;
 5131	u16 strip_size;
 5132#if BITS_PER_LONG == 32
 5133	u64 tmpdiv;
 5134#endif
 5135	int offload_to_mirror;
 5136
 5137	if (!dev)
 5138		return -1;
 5139
 5140	if (dev->in_reset)
 5141		return -1;
 5142
 5143	/* check for valid opcode, get LBA and block count */
 5144	switch (cmd->cmnd[0]) {
 5145	case WRITE_6:
 5146		is_write = 1;
 5147		fallthrough;
 5148	case READ_6:
 5149		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
 5150				(cmd->cmnd[2] << 8) |
 5151				cmd->cmnd[3]);
 5152		block_cnt = cmd->cmnd[4];
 5153		if (block_cnt == 0)
 5154			block_cnt = 256;
 5155		break;
 5156	case WRITE_10:
 5157		is_write = 1;
 5158		fallthrough;
 5159	case READ_10:
 5160		first_block =
 5161			(((u64) cmd->cmnd[2]) << 24) |
 5162			(((u64) cmd->cmnd[3]) << 16) |
 5163			(((u64) cmd->cmnd[4]) << 8) |
 5164			cmd->cmnd[5];
 5165		block_cnt =
 5166			(((u32) cmd->cmnd[7]) << 8) |
 5167			cmd->cmnd[8];
 5168		break;
 5169	case WRITE_12:
 5170		is_write = 1;
 5171		fallthrough;
 5172	case READ_12:
 5173		first_block =
 5174			(((u64) cmd->cmnd[2]) << 24) |
 5175			(((u64) cmd->cmnd[3]) << 16) |
 5176			(((u64) cmd->cmnd[4]) << 8) |
 5177			cmd->cmnd[5];
 5178		block_cnt =
 5179			(((u32) cmd->cmnd[6]) << 24) |
 5180			(((u32) cmd->cmnd[7]) << 16) |
 5181			(((u32) cmd->cmnd[8]) << 8) |
 5182		cmd->cmnd[9];
 5183		break;
 5184	case WRITE_16:
 5185		is_write = 1;
 5186		fallthrough;
 5187	case READ_16:
 5188		first_block =
 5189			(((u64) cmd->cmnd[2]) << 56) |
 5190			(((u64) cmd->cmnd[3]) << 48) |
 5191			(((u64) cmd->cmnd[4]) << 40) |
 5192			(((u64) cmd->cmnd[5]) << 32) |
 5193			(((u64) cmd->cmnd[6]) << 24) |
 5194			(((u64) cmd->cmnd[7]) << 16) |
 5195			(((u64) cmd->cmnd[8]) << 8) |
 5196			cmd->cmnd[9];
 5197		block_cnt =
 5198			(((u32) cmd->cmnd[10]) << 24) |
 5199			(((u32) cmd->cmnd[11]) << 16) |
 5200			(((u32) cmd->cmnd[12]) << 8) |
 5201			cmd->cmnd[13];
 5202		break;
 5203	default:
 5204		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
 5205	}
 5206	last_block = first_block + block_cnt - 1;
 5207
 5208	/* check for write to non-RAID-0 */
 5209	if (is_write && dev->raid_level != 0)
 5210		return IO_ACCEL_INELIGIBLE;
 5211
 5212	/* check for invalid block or wraparound */
 5213	if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
 5214		last_block < first_block)
 5215		return IO_ACCEL_INELIGIBLE;
 5216
 5217	/* calculate stripe information for the request */
 5218	blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
 5219				le16_to_cpu(map->strip_size);
 5220	strip_size = le16_to_cpu(map->strip_size);
 5221#if BITS_PER_LONG == 32
 5222	tmpdiv = first_block;
 5223	(void) do_div(tmpdiv, blocks_per_row);
 5224	first_row = tmpdiv;
 5225	tmpdiv = last_block;
 5226	(void) do_div(tmpdiv, blocks_per_row);
 5227	last_row = tmpdiv;
 5228	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
 5229	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
 5230	tmpdiv = first_row_offset;
 5231	(void) do_div(tmpdiv, strip_size);
 5232	first_column = tmpdiv;
 5233	tmpdiv = last_row_offset;
 5234	(void) do_div(tmpdiv, strip_size);
 5235	last_column = tmpdiv;
 5236#else
 5237	first_row = first_block / blocks_per_row;
 5238	last_row = last_block / blocks_per_row;
 5239	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
 5240	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
 5241	first_column = first_row_offset / strip_size;
 5242	last_column = last_row_offset / strip_size;
 5243#endif
 5244
 5245	/* if this isn't a single row/column then give to the controller */
 5246	if ((first_row != last_row) || (first_column != last_column))
 5247		return IO_ACCEL_INELIGIBLE;
 5248
 5249	/* proceeding with driver mapping */
 5250	total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
 5251				le16_to_cpu(map->metadata_disks_per_row);
 5252	map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
 5253				le16_to_cpu(map->row_cnt);
 5254	map_index = (map_row * total_disks_per_row) + first_column;
 5255
 5256	switch (dev->raid_level) {
 5257	case HPSA_RAID_0:
 5258		break; /* nothing special to do */
 5259	case HPSA_RAID_1:
 5260		/* Handles load balance across RAID 1 members.
 5261		 * (2-drive R1 and R10 with even # of drives.)
 5262		 * Appropriate for SSDs, not optimal for HDDs
 5263		 * Ensure we have the correct raid_map.
 5264		 */
 5265		if (le16_to_cpu(map->layout_map_count) != 2) {
 5266			hpsa_turn_off_ioaccel_for_device(dev);
 5267			return IO_ACCEL_INELIGIBLE;
 5268		}
 5269		if (dev->offload_to_mirror)
 5270			map_index += le16_to_cpu(map->data_disks_per_row);
 5271		dev->offload_to_mirror = !dev->offload_to_mirror;
 5272		break;
 5273	case HPSA_RAID_ADM:
 5274		/* Handles N-way mirrors  (R1-ADM)
 5275		 * and R10 with # of drives divisible by 3.)
 5276		 * Ensure we have the correct raid_map.
 5277		 */
 5278		if (le16_to_cpu(map->layout_map_count) != 3) {
 5279			hpsa_turn_off_ioaccel_for_device(dev);
 5280			return IO_ACCEL_INELIGIBLE;
 5281		}
 5282
 5283		offload_to_mirror = dev->offload_to_mirror;
 5284		raid_map_helper(map, offload_to_mirror,
 5285				&map_index, &current_group);
 5286		/* set mirror group to use next time */
 5287		offload_to_mirror =
 5288			(offload_to_mirror >=
 5289			le16_to_cpu(map->layout_map_count) - 1)
 5290			? 0 : offload_to_mirror + 1;
 5291		dev->offload_to_mirror = offload_to_mirror;
 5292		/* Avoid direct use of dev->offload_to_mirror within this
 5293		 * function since multiple threads might simultaneously
 5294		 * increment it beyond the range of dev->layout_map_count -1.
 5295		 */
 5296		break;
 5297	case HPSA_RAID_5:
 5298	case HPSA_RAID_6:
 5299		if (le16_to_cpu(map->layout_map_count) <= 1)
 5300			break;
 5301
 5302		/* Verify first and last block are in same RAID group */
 5303		r5or6_blocks_per_row =
 5304			le16_to_cpu(map->strip_size) *
 5305			le16_to_cpu(map->data_disks_per_row);
 5306		if (r5or6_blocks_per_row == 0) {
 5307			hpsa_turn_off_ioaccel_for_device(dev);
 5308			return IO_ACCEL_INELIGIBLE;
 5309		}
 5310		stripesize = r5or6_blocks_per_row *
 5311			le16_to_cpu(map->layout_map_count);
 5312#if BITS_PER_LONG == 32
 5313		tmpdiv = first_block;
 5314		first_group = do_div(tmpdiv, stripesize);
 5315		tmpdiv = first_group;
 5316		(void) do_div(tmpdiv, r5or6_blocks_per_row);
 5317		first_group = tmpdiv;
 5318		tmpdiv = last_block;
 5319		last_group = do_div(tmpdiv, stripesize);
 5320		tmpdiv = last_group;
 5321		(void) do_div(tmpdiv, r5or6_blocks_per_row);
 5322		last_group = tmpdiv;
 5323#else
 5324		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
 5325		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
 5326#endif
 5327		if (first_group != last_group)
 5328			return IO_ACCEL_INELIGIBLE;
 5329
 5330		/* Verify request is in a single row of RAID 5/6 */
 5331#if BITS_PER_LONG == 32
 5332		tmpdiv = first_block;
 5333		(void) do_div(tmpdiv, stripesize);
 5334		first_row = r5or6_first_row = r0_first_row = tmpdiv;
 5335		tmpdiv = last_block;
 5336		(void) do_div(tmpdiv, stripesize);
 5337		r5or6_last_row = r0_last_row = tmpdiv;
 5338#else
 5339		first_row = r5or6_first_row = r0_first_row =
 5340						first_block / stripesize;
 5341		r5or6_last_row = r0_last_row = last_block / stripesize;
 5342#endif
 5343		if (r5or6_first_row != r5or6_last_row)
 5344			return IO_ACCEL_INELIGIBLE;
 5345
 5346
 5347		/* Verify request is in a single column */
 5348#if BITS_PER_LONG == 32
 5349		tmpdiv = first_block;
 5350		first_row_offset = do_div(tmpdiv, stripesize);
 5351		tmpdiv = first_row_offset;
 5352		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
 5353		r5or6_first_row_offset = first_row_offset;
 5354		tmpdiv = last_block;
 5355		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
 5356		tmpdiv = r5or6_last_row_offset;
 5357		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
 5358		tmpdiv = r5or6_first_row_offset;
 5359		(void) do_div(tmpdiv, map->strip_size);
 5360		first_column = r5or6_first_column = tmpdiv;
 5361		tmpdiv = r5or6_last_row_offset;
 5362		(void) do_div(tmpdiv, map->strip_size);
 5363		r5or6_last_column = tmpdiv;
 5364#else
 5365		first_row_offset = r5or6_first_row_offset =
 5366			(u32)((first_block % stripesize) %
 5367						r5or6_blocks_per_row);
 5368
 5369		r5or6_last_row_offset =
 5370			(u32)((last_block % stripesize) %
 5371						r5or6_blocks_per_row);
 5372
 5373		first_column = r5or6_first_column =
 5374			r5or6_first_row_offset / le16_to_cpu(map->strip_size);
 5375		r5or6_last_column =
 5376			r5or6_last_row_offset / le16_to_cpu(map->strip_size);
 5377#endif
 5378		if (r5or6_first_column != r5or6_last_column)
 5379			return IO_ACCEL_INELIGIBLE;
 5380
 5381		/* Request is eligible */
 5382		map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
 5383			le16_to_cpu(map->row_cnt);
 5384
 5385		map_index = (first_group *
 5386			(le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
 5387			(map_row * total_disks_per_row) + first_column;
 5388		break;
 5389	default:
 5390		return IO_ACCEL_INELIGIBLE;
 5391	}
 5392
 5393	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
 5394		return IO_ACCEL_INELIGIBLE;
 5395
 5396	c->phys_disk = dev->phys_disk[map_index];
 5397	if (!c->phys_disk)
 5398		return IO_ACCEL_INELIGIBLE;
 5399
 5400	disk_handle = dd[map_index].ioaccel_handle;
 5401	disk_block = le64_to_cpu(map->disk_starting_blk) +
 5402			first_row * le16_to_cpu(map->strip_size) +
 5403			(first_row_offset - first_column *
 5404			le16_to_cpu(map->strip_size));
 5405	disk_block_cnt = block_cnt;
 5406
 5407	/* handle differing logical/physical block sizes */
 5408	if (map->phys_blk_shift) {
 5409		disk_block <<= map->phys_blk_shift;
 5410		disk_block_cnt <<= map->phys_blk_shift;
 5411	}
 5412	BUG_ON(disk_block_cnt > 0xffff);
 5413
 5414	/* build the new CDB for the physical disk I/O */
 5415	if (disk_block > 0xffffffff) {
 5416		cdb[0] = is_write ? WRITE_16 : READ_16;
 5417		cdb[1] = 0;
 5418		cdb[2] = (u8) (disk_block >> 56);
 5419		cdb[3] = (u8) (disk_block >> 48);
 5420		cdb[4] = (u8) (disk_block >> 40);
 5421		cdb[5] = (u8) (disk_block >> 32);
 5422		cdb[6] = (u8) (disk_block >> 24);
 5423		cdb[7] = (u8) (disk_block >> 16);
 5424		cdb[8] = (u8) (disk_block >> 8);
 5425		cdb[9] = (u8) (disk_block);
 5426		cdb[10] = (u8) (disk_block_cnt >> 24);
 5427		cdb[11] = (u8) (disk_block_cnt >> 16);
 5428		cdb[12] = (u8) (disk_block_cnt >> 8);
 5429		cdb[13] = (u8) (disk_block_cnt);
 5430		cdb[14] = 0;
 5431		cdb[15] = 0;
 5432		cdb_len = 16;
 5433	} else {
 5434		cdb[0] = is_write ? WRITE_10 : READ_10;
 5435		cdb[1] = 0;
 5436		cdb[2] = (u8) (disk_block >> 24);
 5437		cdb[3] = (u8) (disk_block >> 16);
 5438		cdb[4] = (u8) (disk_block >> 8);
 5439		cdb[5] = (u8) (disk_block);
 5440		cdb[6] = 0;
 5441		cdb[7] = (u8) (disk_block_cnt >> 8);
 5442		cdb[8] = (u8) (disk_block_cnt);
 5443		cdb[9] = 0;
 5444		cdb_len = 10;
 5445	}
 5446	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
 5447						dev->scsi3addr,
 5448						dev->phys_disk[map_index]);
 5449}
 5450
 5451/*
 5452 * Submit commands down the "normal" RAID stack path
 5453 * All callers to hpsa_ciss_submit must check lockup_detected
 5454 * beforehand, before (opt.) and after calling cmd_alloc
 5455 */
 5456static int hpsa_ciss_submit(struct ctlr_info *h,
 5457	struct CommandList *c, struct scsi_cmnd *cmd,
 5458	struct hpsa_scsi_dev_t *dev)
 5459{
 5460	cmd->host_scribble = (unsigned char *) c;
 
 5461	c->cmd_type = CMD_SCSI;
 5462	c->scsi_cmd = cmd;
 5463	c->Header.ReplyQueue = 0;  /* unused in simple mode */
 5464	memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
 5465	c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
 
 5466
 5467	/* Fill in the request block... */
 5468
 5469	c->Request.Timeout = 0;
 
 5470	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
 5471	c->Request.CDBLen = cmd->cmd_len;
 5472	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
 
 
 5473	switch (cmd->sc_data_direction) {
 5474	case DMA_TO_DEVICE:
 5475		c->Request.type_attr_dir =
 5476			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
 5477		break;
 5478	case DMA_FROM_DEVICE:
 5479		c->Request.type_attr_dir =
 5480			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
 5481		break;
 5482	case DMA_NONE:
 5483		c->Request.type_attr_dir =
 5484			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
 5485		break;
 5486	case DMA_BIDIRECTIONAL:
 5487		/* This can happen if a buggy application does a scsi passthru
 5488		 * and sets both inlen and outlen to non-zero. ( see
 5489		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
 5490		 */
 5491
 5492		c->Request.type_attr_dir =
 5493			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
 5494		/* This is technically wrong, and hpsa controllers should
 5495		 * reject it with CMD_INVALID, which is the most correct
 5496		 * response, but non-fibre backends appear to let it
 5497		 * slide by, and give the same results as if this field
 5498		 * were set correctly.  Either way is acceptable for
 5499		 * our purposes here.
 5500		 */
 5501
 5502		break;
 5503
 5504	default:
 5505		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
 5506			cmd->sc_data_direction);
 5507		BUG();
 5508		break;
 5509	}
 5510
 5511	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
 5512		hpsa_cmd_resolve_and_free(h, c);
 5513		return SCSI_MLQUEUE_HOST_BUSY;
 5514	}
 5515
 5516	if (dev->in_reset) {
 5517		hpsa_cmd_resolve_and_free(h, c);
 5518		return SCSI_MLQUEUE_HOST_BUSY;
 5519	}
 5520
 5521	c->device = dev;
 5522
 5523	enqueue_cmd_and_start_io(h, c);
 5524	/* the cmd'll come back via intr handler in complete_scsi_command()  */
 5525	return 0;
 5526}
 5527
 5528static void hpsa_cmd_init(struct ctlr_info *h, int index,
 5529				struct CommandList *c)
 5530{
 5531	dma_addr_t cmd_dma_handle, err_dma_handle;
 5532
 5533	/* Zero out all of commandlist except the last field, refcount */
 5534	memset(c, 0, offsetof(struct CommandList, refcount));
 5535	c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
 5536	cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
 5537	c->err_info = h->errinfo_pool + index;
 5538	memset(c->err_info, 0, sizeof(*c->err_info));
 5539	err_dma_handle = h->errinfo_pool_dhandle
 5540	    + index * sizeof(*c->err_info);
 5541	c->cmdindex = index;
 5542	c->busaddr = (u32) cmd_dma_handle;
 5543	c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
 5544	c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
 5545	c->h = h;
 5546	c->scsi_cmd = SCSI_CMD_IDLE;
 5547}
 5548
 5549static void hpsa_preinitialize_commands(struct ctlr_info *h)
 5550{
 5551	int i;
 5552
 5553	for (i = 0; i < h->nr_cmds; i++) {
 5554		struct CommandList *c = h->cmd_pool + i;
 5555
 5556		hpsa_cmd_init(h, i, c);
 5557		atomic_set(&c->refcount, 0);
 5558	}
 5559}
 5560
 5561static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
 5562				struct CommandList *c)
 5563{
 5564	dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
 5565
 5566	BUG_ON(c->cmdindex != index);
 5567
 5568	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
 5569	memset(c->err_info, 0, sizeof(*c->err_info));
 5570	c->busaddr = (u32) cmd_dma_handle;
 5571}
 5572
 5573static int hpsa_ioaccel_submit(struct ctlr_info *h,
 5574		struct CommandList *c, struct scsi_cmnd *cmd,
 5575		bool retry)
 5576{
 5577	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 5578	int rc = IO_ACCEL_INELIGIBLE;
 5579
 5580	if (!dev)
 5581		return SCSI_MLQUEUE_HOST_BUSY;
 5582
 5583	if (dev->in_reset)
 5584		return SCSI_MLQUEUE_HOST_BUSY;
 5585
 5586	if (hpsa_simple_mode)
 5587		return IO_ACCEL_INELIGIBLE;
 5588
 5589	cmd->host_scribble = (unsigned char *) c;
 5590
 5591	if (dev->offload_enabled) {
 5592		hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
 5593		c->cmd_type = CMD_SCSI;
 5594		c->scsi_cmd = cmd;
 5595		c->device = dev;
 5596		if (retry) /* Resubmit but do not increment device->commands_outstanding. */
 5597			c->retry_pending = true;
 5598		rc = hpsa_scsi_ioaccel_raid_map(h, c);
 5599		if (rc < 0)     /* scsi_dma_map failed. */
 5600			rc = SCSI_MLQUEUE_HOST_BUSY;
 5601	} else if (dev->hba_ioaccel_enabled) {
 5602		hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
 5603		c->cmd_type = CMD_SCSI;
 5604		c->scsi_cmd = cmd;
 5605		c->device = dev;
 5606		if (retry) /* Resubmit but do not increment device->commands_outstanding. */
 5607			c->retry_pending = true;
 5608		rc = hpsa_scsi_ioaccel_direct_map(h, c);
 5609		if (rc < 0)     /* scsi_dma_map failed. */
 5610			rc = SCSI_MLQUEUE_HOST_BUSY;
 5611	}
 5612	return rc;
 5613}
 5614
 5615static void hpsa_command_resubmit_worker(struct work_struct *work)
 5616{
 5617	struct scsi_cmnd *cmd;
 5618	struct hpsa_scsi_dev_t *dev;
 5619	struct CommandList *c = container_of(work, struct CommandList, work);
 5620
 5621	cmd = c->scsi_cmd;
 5622	dev = cmd->device->hostdata;
 5623	if (!dev) {
 5624		cmd->result = DID_NO_CONNECT << 16;
 5625		return hpsa_cmd_free_and_done(c->h, c, cmd);
 5626	}
 5627
 5628	if (dev->in_reset) {
 5629		cmd->result = DID_RESET << 16;
 5630		return hpsa_cmd_free_and_done(c->h, c, cmd);
 5631	}
 5632
 5633	if (c->cmd_type == CMD_IOACCEL2) {
 5634		struct ctlr_info *h = c->h;
 5635		struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
 5636		int rc;
 5637
 5638		if (c2->error_data.serv_response ==
 5639				IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
 5640			/* Resubmit with the retry_pending flag set. */
 5641			rc = hpsa_ioaccel_submit(h, c, cmd, true);
 5642			if (rc == 0)
 5643				return;
 5644			if (rc == SCSI_MLQUEUE_HOST_BUSY) {
 5645				/*
 5646				 * If we get here, it means dma mapping failed.
 5647				 * Try again via scsi mid layer, which will
 5648				 * then get SCSI_MLQUEUE_HOST_BUSY.
 5649				 */
 5650				cmd->result = DID_IMM_RETRY << 16;
 5651				return hpsa_cmd_free_and_done(h, c, cmd);
 5652			}
 5653			/* else, fall thru and resubmit down CISS path */
 5654		}
 5655	}
 5656	hpsa_cmd_partial_init(c->h, c->cmdindex, c);
 5657	/*
 5658	 * Here we have not come in though queue_command, so we
 5659	 * can set the retry_pending flag to true for a driver initiated
 5660	 * retry attempt (I.E. not a SML retry).
 5661	 * I.E. We are submitting a driver initiated retry.
 5662	 * Note: hpsa_ciss_submit does not zero out the command fields like
 5663	 *       ioaccel submit does.
 5664	 */
 5665	c->retry_pending = true;
 5666	if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
 5667		/*
 5668		 * If we get here, it means dma mapping failed. Try
 5669		 * again via scsi mid layer, which will then get
 5670		 * SCSI_MLQUEUE_HOST_BUSY.
 5671		 *
 5672		 * hpsa_ciss_submit will have already freed c
 5673		 * if it encountered a dma mapping failure.
 5674		 */
 5675		cmd->result = DID_IMM_RETRY << 16;
 5676		scsi_done(cmd);
 5677	}
 5678}
 5679
 5680/* Running in struct Scsi_Host->host_lock less mode */
 5681static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 5682{
 5683	struct ctlr_info *h;
 5684	struct hpsa_scsi_dev_t *dev;
 5685	struct CommandList *c;
 5686	int rc = 0;
 5687
 5688	/* Get the ptr to our adapter structure out of cmd->host. */
 5689	h = sdev_to_hba(cmd->device);
 5690
 5691	BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
 5692
 5693	dev = cmd->device->hostdata;
 5694	if (!dev) {
 5695		cmd->result = DID_NO_CONNECT << 16;
 5696		scsi_done(cmd);
 5697		return 0;
 5698	}
 5699
 5700	if (dev->removed) {
 5701		cmd->result = DID_NO_CONNECT << 16;
 5702		scsi_done(cmd);
 5703		return 0;
 5704	}
 5705
 5706	if (unlikely(lockup_detected(h))) {
 5707		cmd->result = DID_NO_CONNECT << 16;
 5708		scsi_done(cmd);
 5709		return 0;
 5710	}
 5711
 5712	if (dev->in_reset)
 5713		return SCSI_MLQUEUE_DEVICE_BUSY;
 5714
 5715	c = cmd_tagged_alloc(h, cmd);
 5716	if (c == NULL)
 5717		return SCSI_MLQUEUE_DEVICE_BUSY;
 5718
 5719	/*
 5720	 * This is necessary because the SML doesn't zero out this field during
 5721	 * error recovery.
 5722	 */
 5723	cmd->result = 0;
 5724
 5725	/*
 5726	 * Call alternate submit routine for I/O accelerated commands.
 5727	 * Retries always go down the normal I/O path.
 5728	 * Note: If cmd->retries is non-zero, then this is a SML
 5729	 *       initiated retry and not a driver initiated retry.
 5730	 *       This command has been obtained from cmd_tagged_alloc
 5731	 *       and is therefore a brand-new command.
 5732	 */
 5733	if (likely(cmd->retries == 0 &&
 5734			!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
 5735			h->acciopath_status)) {
 5736		/* Submit with the retry_pending flag unset. */
 5737		rc = hpsa_ioaccel_submit(h, c, cmd, false);
 5738		if (rc == 0)
 5739			return 0;
 5740		if (rc == SCSI_MLQUEUE_HOST_BUSY) {
 5741			hpsa_cmd_resolve_and_free(h, c);
 5742			return SCSI_MLQUEUE_HOST_BUSY;
 5743		}
 5744	}
 5745	return hpsa_ciss_submit(h, c, cmd, dev);
 5746}
 5747
 5748static void hpsa_scan_complete(struct ctlr_info *h)
 5749{
 5750	unsigned long flags;
 5751
 5752	spin_lock_irqsave(&h->scan_lock, flags);
 5753	h->scan_finished = 1;
 5754	wake_up(&h->scan_wait_queue);
 5755	spin_unlock_irqrestore(&h->scan_lock, flags);
 5756}
 5757
 5758static void hpsa_scan_start(struct Scsi_Host *sh)
 5759{
 5760	struct ctlr_info *h = shost_to_hba(sh);
 5761	unsigned long flags;
 5762
 5763	/*
 5764	 * Don't let rescans be initiated on a controller known to be locked
 5765	 * up.  If the controller locks up *during* a rescan, that thread is
 5766	 * probably hosed, but at least we can prevent new rescan threads from
 5767	 * piling up on a locked up controller.
 5768	 */
 5769	if (unlikely(lockup_detected(h)))
 5770		return hpsa_scan_complete(h);
 5771
 5772	/*
 5773	 * If a scan is already waiting to run, no need to add another
 5774	 */
 5775	spin_lock_irqsave(&h->scan_lock, flags);
 5776	if (h->scan_waiting) {
 5777		spin_unlock_irqrestore(&h->scan_lock, flags);
 5778		return;
 5779	}
 5780
 5781	spin_unlock_irqrestore(&h->scan_lock, flags);
 5782
 5783	/* wait until any scan already in progress is finished. */
 5784	while (1) {
 5785		spin_lock_irqsave(&h->scan_lock, flags);
 5786		if (h->scan_finished)
 5787			break;
 5788		h->scan_waiting = 1;
 5789		spin_unlock_irqrestore(&h->scan_lock, flags);
 5790		wait_event(h->scan_wait_queue, h->scan_finished);
 5791		/* Note: We don't need to worry about a race between this
 5792		 * thread and driver unload because the midlayer will
 5793		 * have incremented the reference count, so unload won't
 5794		 * happen if we're in here.
 5795		 */
 5796	}
 5797	h->scan_finished = 0; /* mark scan as in progress */
 5798	h->scan_waiting = 0;
 5799	spin_unlock_irqrestore(&h->scan_lock, flags);
 5800
 5801	if (unlikely(lockup_detected(h)))
 5802		return hpsa_scan_complete(h);
 5803
 5804	/*
 5805	 * Do the scan after a reset completion
 5806	 */
 5807	spin_lock_irqsave(&h->reset_lock, flags);
 5808	if (h->reset_in_progress) {
 5809		h->drv_req_rescan = 1;
 5810		spin_unlock_irqrestore(&h->reset_lock, flags);
 5811		hpsa_scan_complete(h);
 5812		return;
 5813	}
 5814	spin_unlock_irqrestore(&h->reset_lock, flags);
 5815
 5816	hpsa_update_scsi_devices(h);
 5817
 5818	hpsa_scan_complete(h);
 5819}
 5820
 5821static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
 5822{
 5823	struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
 5824
 5825	if (!logical_drive)
 5826		return -ENODEV;
 5827
 5828	if (qdepth < 1)
 5829		qdepth = 1;
 5830	else if (qdepth > logical_drive->queue_depth)
 5831		qdepth = logical_drive->queue_depth;
 5832
 5833	return scsi_change_queue_depth(sdev, qdepth);
 5834}
 5835
 5836static int hpsa_scan_finished(struct Scsi_Host *sh,
 5837	unsigned long elapsed_time)
 5838{
 5839	struct ctlr_info *h = shost_to_hba(sh);
 5840	unsigned long flags;
 5841	int finished;
 5842
 5843	spin_lock_irqsave(&h->scan_lock, flags);
 5844	finished = h->scan_finished;
 5845	spin_unlock_irqrestore(&h->scan_lock, flags);
 5846	return finished;
 5847}
 5848
 5849static int hpsa_scsi_host_alloc(struct ctlr_info *h)
 
 5850{
 5851	struct Scsi_Host *sh;
 5852
 5853	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
 5854	if (sh == NULL) {
 5855		dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
 5856		return -ENOMEM;
 5857	}
 5858
 5859	sh->io_port = 0;
 5860	sh->n_io_port = 0;
 5861	sh->this_id = -1;
 5862	sh->max_channel = 3;
 5863	sh->max_cmd_len = MAX_COMMAND_SIZE;
 5864	sh->max_lun = HPSA_MAX_LUN;
 5865	sh->max_id = HPSA_MAX_LUN;
 5866	sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
 5867	sh->cmd_per_lun = sh->can_queue;
 5868	sh->sg_tablesize = h->maxsgentries;
 5869	sh->transportt = hpsa_sas_transport_template;
 5870	sh->hostdata[0] = (unsigned long) h;
 5871	sh->irq = pci_irq_vector(h->pdev, 0);
 5872	sh->unique_id = sh->irq;
 5873
 5874	h->scsi_host = sh;
 5875	return 0;
 5876}
 5877
 5878static int hpsa_scsi_add_host(struct ctlr_info *h)
 5879{
 5880	int rv;
 5881
 5882	rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
 5883	if (rv) {
 5884		dev_err(&h->pdev->dev, "scsi_add_host failed\n");
 5885		return rv;
 5886	}
 5887	scsi_scan_host(h->scsi_host);
 5888	return 0;
 5889}
 5890
 5891/*
 5892 * The block layer has already gone to the trouble of picking out a unique,
 5893 * small-integer tag for this request.  We use an offset from that value as
 5894 * an index to select our command block.  (The offset allows us to reserve the
 5895 * low-numbered entries for our own uses.)
 5896 */
 5897static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
 5898{
 5899	int idx = scsi_cmd_to_rq(scmd)->tag;
 5900
 5901	if (idx < 0)
 5902		return idx;
 5903
 5904	/* Offset to leave space for internal cmds. */
 5905	return idx += HPSA_NRESERVED_CMDS;
 5906}
 5907
 5908/*
 5909 * Send a TEST_UNIT_READY command to the specified LUN using the specified
 5910 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
 5911 */
 5912static int hpsa_send_test_unit_ready(struct ctlr_info *h,
 5913				struct CommandList *c, unsigned char lunaddr[],
 5914				int reply_queue)
 5915{
 5916	int rc;
 5917
 5918	/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
 5919	(void) fill_cmd(c, TEST_UNIT_READY, h,
 5920			NULL, 0, 0, lunaddr, TYPE_CMD);
 5921	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
 5922	if (rc)
 5923		return rc;
 5924	/* no unmap needed here because no data xfer. */
 5925
 5926	/* Check if the unit is already ready. */
 5927	if (c->err_info->CommandStatus == CMD_SUCCESS)
 5928		return 0;
 5929
 5930	/*
 5931	 * The first command sent after reset will receive "unit attention" to
 5932	 * indicate that the LUN has been reset...this is actually what we're
 5933	 * looking for (but, success is good too).
 5934	 */
 5935	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
 5936		c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
 5937			(c->err_info->SenseInfo[2] == NO_SENSE ||
 5938			 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
 5939		return 0;
 5940
 5941	return 1;
 5942}
 5943
 5944/*
 5945 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
 5946 * returns zero when the unit is ready, and non-zero when giving up.
 5947 */
 5948static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
 5949				struct CommandList *c,
 5950				unsigned char lunaddr[], int reply_queue)
 5951{
 5952	int rc;
 5953	int count = 0;
 5954	int waittime = 1; /* seconds */
 
 
 
 
 
 
 
 
 5955
 5956	/* Send test unit ready until device ready, or give up. */
 5957	for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
 5958
 5959		/*
 5960		 * Wait for a bit.  do this first, because if we send
 5961		 * the TUR right away, the reset will just abort it.
 5962		 */
 5963		msleep(1000 * waittime);
 5964
 5965		rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
 5966		if (!rc)
 5967			break;
 5968
 5969		/* Increase wait time with each try, up to a point. */
 5970		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
 5971			waittime *= 2;
 5972
 5973		dev_warn(&h->pdev->dev,
 5974			 "waiting %d secs for device to become ready.\n",
 5975			 waittime);
 5976	}
 5977
 5978	return rc;
 5979}
 5980
 5981static int wait_for_device_to_become_ready(struct ctlr_info *h,
 5982					   unsigned char lunaddr[],
 5983					   int reply_queue)
 5984{
 5985	int first_queue;
 5986	int last_queue;
 5987	int rq;
 5988	int rc = 0;
 5989	struct CommandList *c;
 5990
 5991	c = cmd_alloc(h);
 5992
 5993	/*
 5994	 * If no specific reply queue was requested, then send the TUR
 5995	 * repeatedly, requesting a reply on each reply queue; otherwise execute
 5996	 * the loop exactly once using only the specified queue.
 5997	 */
 5998	if (reply_queue == DEFAULT_REPLY_QUEUE) {
 5999		first_queue = 0;
 6000		last_queue = h->nreply_queues - 1;
 6001	} else {
 6002		first_queue = reply_queue;
 6003		last_queue = reply_queue;
 6004	}
 6005
 6006	for (rq = first_queue; rq <= last_queue; rq++) {
 6007		rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
 6008		if (rc)
 
 6009			break;
 
 
 
 
 6010	}
 6011
 6012	if (rc)
 6013		dev_warn(&h->pdev->dev, "giving up on device.\n");
 6014	else
 6015		dev_warn(&h->pdev->dev, "device is ready.\n");
 6016
 6017	cmd_free(h, c);
 6018	return rc;
 6019}
 6020
 6021/* Need at least one of these error handlers to keep ../scsi/hosts.c from
 6022 * complaining.  Doing a host- or bus-reset can't do anything good here.
 6023 */
 6024static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
 6025{
 6026	int rc = SUCCESS;
 6027	int i;
 6028	struct ctlr_info *h;
 6029	struct hpsa_scsi_dev_t *dev = NULL;
 6030	u8 reset_type;
 6031	char msg[48];
 6032	unsigned long flags;
 6033
 6034	/* find the controller to which the command to be aborted was sent */
 6035	h = sdev_to_hba(scsicmd->device);
 6036	if (h == NULL) /* paranoia */
 6037		return FAILED;
 6038
 6039	spin_lock_irqsave(&h->reset_lock, flags);
 6040	h->reset_in_progress = 1;
 6041	spin_unlock_irqrestore(&h->reset_lock, flags);
 6042
 6043	if (lockup_detected(h)) {
 6044		rc = FAILED;
 6045		goto return_reset_status;
 6046	}
 6047
 6048	dev = scsicmd->device->hostdata;
 6049	if (!dev) {
 6050		dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
 6051		rc = FAILED;
 6052		goto return_reset_status;
 6053	}
 6054
 6055	if (dev->devtype == TYPE_ENCLOSURE) {
 6056		rc = SUCCESS;
 6057		goto return_reset_status;
 6058	}
 6059
 6060	/* if controller locked up, we can guarantee command won't complete */
 6061	if (lockup_detected(h)) {
 6062		snprintf(msg, sizeof(msg),
 6063			 "cmd %d RESET FAILED, lockup detected",
 6064			 hpsa_get_cmd_index(scsicmd));
 6065		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 6066		rc = FAILED;
 6067		goto return_reset_status;
 6068	}
 6069
 6070	/* this reset request might be the result of a lockup; check */
 6071	if (detect_controller_lockup(h)) {
 6072		snprintf(msg, sizeof(msg),
 6073			 "cmd %d RESET FAILED, new lockup detected",
 6074			 hpsa_get_cmd_index(scsicmd));
 6075		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 6076		rc = FAILED;
 6077		goto return_reset_status;
 6078	}
 6079
 6080	/* Do not attempt on controller */
 6081	if (is_hba_lunid(dev->scsi3addr)) {
 6082		rc = SUCCESS;
 6083		goto return_reset_status;
 6084	}
 6085
 6086	if (is_logical_dev_addr_mode(dev->scsi3addr))
 6087		reset_type = HPSA_DEVICE_RESET_MSG;
 6088	else
 6089		reset_type = HPSA_PHYS_TARGET_RESET;
 6090
 6091	sprintf(msg, "resetting %s",
 6092		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
 6093	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 6094
 6095	/*
 6096	 * wait to see if any commands will complete before sending reset
 6097	 */
 6098	dev->in_reset = true; /* block any new cmds from OS for this device */
 6099	for (i = 0; i < 10; i++) {
 6100		if (atomic_read(&dev->commands_outstanding) > 0)
 6101			msleep(1000);
 6102		else
 6103			break;
 6104	}
 6105
 
 6106	/* send a reset to the SCSI LUN which the command was sent to */
 6107	rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
 6108	if (rc == 0)
 6109		rc = SUCCESS;
 6110	else
 6111		rc = FAILED;
 6112
 6113	sprintf(msg, "reset %s %s",
 6114		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
 6115		rc == SUCCESS ? "completed successfully" : "failed");
 6116	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 6117
 6118return_reset_status:
 6119	spin_lock_irqsave(&h->reset_lock, flags);
 6120	h->reset_in_progress = 0;
 6121	if (dev)
 6122		dev->in_reset = false;
 6123	spin_unlock_irqrestore(&h->reset_lock, flags);
 6124	return rc;
 6125}
 6126
 6127/*
 6128 * For operations with an associated SCSI command, a command block is allocated
 6129 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
 6130 * block request tag as an index into a table of entries.  cmd_tagged_free() is
 6131 * the complement, although cmd_free() may be called instead.
 6132 * This function is only called for new requests from queue_command.
 6133 */
 6134static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
 6135					    struct scsi_cmnd *scmd)
 6136{
 6137	int idx = hpsa_get_cmd_index(scmd);
 6138	struct CommandList *c = h->cmd_pool + idx;
 6139
 6140	if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
 6141		dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
 6142			idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
 6143		/* The index value comes from the block layer, so if it's out of
 6144		 * bounds, it's probably not our bug.
 6145		 */
 6146		BUG();
 6147	}
 6148
 6149	if (unlikely(!hpsa_is_cmd_idle(c))) {
 6150		/*
 6151		 * We expect that the SCSI layer will hand us a unique tag
 6152		 * value.  Thus, there should never be a collision here between
 6153		 * two requests...because if the selected command isn't idle
 6154		 * then someone is going to be very disappointed.
 6155		 */
 6156		if (idx != h->last_collision_tag) { /* Print once per tag */
 6157			dev_warn(&h->pdev->dev,
 6158				"%s: tag collision (tag=%d)\n", __func__, idx);
 6159			if (scmd)
 6160				scsi_print_command(scmd);
 6161			h->last_collision_tag = idx;
 6162		}
 6163		return NULL;
 6164	}
 6165
 6166	atomic_inc(&c->refcount);
 6167	hpsa_cmd_partial_init(h, idx, c);
 6168
 6169	/*
 6170	 * This is a new command obtained from queue_command so
 6171	 * there have not been any driver initiated retry attempts.
 6172	 */
 6173	c->retry_pending = false;
 
 6174
 
 6175	return c;
 6176}
 6177
 6178static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
 6179{
 6180	/*
 6181	 * Release our reference to the block.  We don't need to do anything
 6182	 * else to free it, because it is accessed by index.
 6183	 */
 6184	(void)atomic_dec(&c->refcount);
 6185}
 6186
 6187/*
 6188 * For operations that cannot sleep, a command block is allocated at init,
 6189 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
 6190 * which ones are free or in use.  Lock must be held when calling this.
 6191 * cmd_free() is the complement.
 6192 * This function never gives up and returns NULL.  If it hangs,
 6193 * another thread must call cmd_free() to free some tags.
 6194 */
 6195
 6196static struct CommandList *cmd_alloc(struct ctlr_info *h)
 6197{
 6198	struct CommandList *c;
 6199	int refcount, i;
 6200	int offset = 0;
 6201
 6202	/*
 6203	 * There is some *extremely* small but non-zero chance that that
 6204	 * multiple threads could get in here, and one thread could
 6205	 * be scanning through the list of bits looking for a free
 6206	 * one, but the free ones are always behind him, and other
 6207	 * threads sneak in behind him and eat them before he can
 6208	 * get to them, so that while there is always a free one, a
 6209	 * very unlucky thread might be starved anyway, never able to
 6210	 * beat the other threads.  In reality, this happens so
 6211	 * infrequently as to be indistinguishable from never.
 6212	 *
 6213	 * Note that we start allocating commands before the SCSI host structure
 6214	 * is initialized.  Since the search starts at bit zero, this
 6215	 * all works, since we have at least one command structure available;
 6216	 * however, it means that the structures with the low indexes have to be
 6217	 * reserved for driver-initiated requests, while requests from the block
 6218	 * layer will use the higher indexes.
 6219	 */
 6220
 6221	for (;;) {
 6222		i = find_next_zero_bit(h->cmd_pool_bits,
 6223					HPSA_NRESERVED_CMDS,
 6224					offset);
 6225		if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
 6226			offset = 0;
 6227			continue;
 6228		}
 6229		c = h->cmd_pool + i;
 6230		refcount = atomic_inc_return(&c->refcount);
 6231		if (unlikely(refcount > 1)) {
 6232			cmd_free(h, c); /* already in use */
 6233			offset = (i + 1) % HPSA_NRESERVED_CMDS;
 6234			continue;
 6235		}
 6236		set_bit(i, h->cmd_pool_bits);
 6237		break; /* it's ours now. */
 6238	}
 6239	hpsa_cmd_partial_init(h, i, c);
 6240	c->device = NULL;
 6241
 6242	/*
 6243	 * cmd_alloc is for "internal" commands and they are never
 6244	 * retried.
 6245	 */
 6246	c->retry_pending = false;
 
 6247
 
 6248	return c;
 6249}
 6250
 6251/*
 6252 * This is the complementary operation to cmd_alloc().  Note, however, in some
 6253 * corner cases it may also be used to free blocks allocated by
 6254 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
 6255 * the clear-bit is harmless.
 6256 */
 6257static void cmd_free(struct ctlr_info *h, struct CommandList *c)
 6258{
 6259	if (atomic_dec_and_test(&c->refcount)) {
 6260		int i;
 6261
 6262		i = c - h->cmd_pool;
 6263		clear_bit(i, h->cmd_pool_bits);
 6264	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 6265}
 6266
 6267#ifdef CONFIG_COMPAT
 6268
 6269static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
 6270	void __user *arg)
 6271{
 6272	struct ctlr_info *h = sdev_to_hba(dev);
 6273	IOCTL32_Command_struct __user *arg32 = arg;
 6274	IOCTL_Command_struct arg64;
 
 6275	int err;
 6276	u32 cp;
 6277
 6278	if (!arg)
 6279		return -EINVAL;
 6280
 6281	memset(&arg64, 0, sizeof(arg64));
 6282	if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
 6283		return -EFAULT;
 6284	if (get_user(cp, &arg32->buf))
 6285		return -EFAULT;
 
 
 
 
 
 6286	arg64.buf = compat_ptr(cp);
 
 
 
 
 6287
 6288	if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
 6289		return -EAGAIN;
 6290	err = hpsa_passthru_ioctl(h, &arg64);
 6291	atomic_inc(&h->passthru_cmds_avail);
 6292	if (err)
 6293		return err;
 6294	if (copy_to_user(&arg32->error_info, &arg64.error_info,
 6295			 sizeof(arg32->error_info)))
 
 6296		return -EFAULT;
 6297	return 0;
 6298}
 6299
 6300static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
 6301	unsigned int cmd, void __user *arg)
 6302{
 6303	struct ctlr_info *h = sdev_to_hba(dev);
 6304	BIG_IOCTL32_Command_struct __user *arg32 = arg;
 6305	BIG_IOCTL_Command_struct arg64;
 
 
 6306	int err;
 6307	u32 cp;
 6308
 6309	if (!arg)
 6310		return -EINVAL;
 6311	memset(&arg64, 0, sizeof(arg64));
 6312	if (copy_from_user(&arg64, arg32,
 6313			   offsetof(BIG_IOCTL32_Command_struct, buf)))
 6314		return -EFAULT;
 6315	if (get_user(cp, &arg32->buf))
 6316		return -EFAULT;
 
 
 
 
 
 6317	arg64.buf = compat_ptr(cp);
 
 6318
 6319	if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
 6320		return -EAGAIN;
 6321	err = hpsa_big_passthru_ioctl(h, &arg64);
 6322	atomic_inc(&h->passthru_cmds_avail);
 6323	if (err)
 6324		return err;
 6325	if (copy_to_user(&arg32->error_info, &arg64.error_info,
 6326			 sizeof(arg32->error_info)))
 
 6327		return -EFAULT;
 6328	return 0;
 6329}
 6330
 6331static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
 6332			     void __user *arg)
 6333{
 6334	switch (cmd) {
 6335	case CCISS_GETPCIINFO:
 6336	case CCISS_GETINTINFO:
 6337	case CCISS_SETINTINFO:
 6338	case CCISS_GETNODENAME:
 6339	case CCISS_SETNODENAME:
 6340	case CCISS_GETHEARTBEAT:
 6341	case CCISS_GETBUSTYPES:
 6342	case CCISS_GETFIRMVER:
 6343	case CCISS_GETDRIVVER:
 6344	case CCISS_REVALIDVOLS:
 6345	case CCISS_DEREGDISK:
 6346	case CCISS_REGNEWDISK:
 6347	case CCISS_REGNEWD:
 6348	case CCISS_RESCANDISK:
 6349	case CCISS_GETLUNINFO:
 6350		return hpsa_ioctl(dev, cmd, arg);
 6351
 6352	case CCISS_PASSTHRU32:
 6353		return hpsa_ioctl32_passthru(dev, cmd, arg);
 6354	case CCISS_BIG_PASSTHRU32:
 6355		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
 6356
 6357	default:
 6358		return -ENOIOCTLCMD;
 6359	}
 6360}
 6361#endif
 6362
 6363static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
 6364{
 6365	struct hpsa_pci_info pciinfo;
 6366
 6367	if (!argp)
 6368		return -EINVAL;
 6369	pciinfo.domain = pci_domain_nr(h->pdev->bus);
 6370	pciinfo.bus = h->pdev->bus->number;
 6371	pciinfo.dev_fn = h->pdev->devfn;
 6372	pciinfo.board_id = h->board_id;
 6373	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
 6374		return -EFAULT;
 6375	return 0;
 6376}
 6377
 6378static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
 6379{
 6380	DriverVer_type DriverVer;
 6381	unsigned char vmaj, vmin, vsubmin;
 6382	int rc;
 6383
 6384	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
 6385		&vmaj, &vmin, &vsubmin);
 6386	if (rc != 3) {
 6387		dev_info(&h->pdev->dev, "driver version string '%s' "
 6388			"unrecognized.", HPSA_DRIVER_VERSION);
 6389		vmaj = 0;
 6390		vmin = 0;
 6391		vsubmin = 0;
 6392	}
 6393	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
 6394	if (!argp)
 6395		return -EINVAL;
 6396	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
 6397		return -EFAULT;
 6398	return 0;
 6399}
 6400
 6401static int hpsa_passthru_ioctl(struct ctlr_info *h,
 6402			       IOCTL_Command_struct *iocommand)
 6403{
 
 6404	struct CommandList *c;
 6405	char *buff = NULL;
 6406	u64 temp64;
 6407	int rc = 0;
 6408
 
 
 6409	if (!capable(CAP_SYS_RAWIO))
 6410		return -EPERM;
 6411	if ((iocommand->buf_size < 1) &&
 6412	    (iocommand->Request.Type.Direction != XFER_NONE)) {
 
 
 6413		return -EINVAL;
 6414	}
 6415	if (iocommand->buf_size > 0) {
 6416		buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
 6417		if (buff == NULL)
 6418			return -ENOMEM;
 6419		if (iocommand->Request.Type.Direction & XFER_WRITE) {
 6420			/* Copy the data into the buffer we created */
 6421			if (copy_from_user(buff, iocommand->buf,
 6422				iocommand->buf_size)) {
 6423				rc = -EFAULT;
 6424				goto out_kfree;
 6425			}
 6426		} else {
 6427			memset(buff, 0, iocommand->buf_size);
 6428		}
 6429	}
 6430	c = cmd_alloc(h);
 6431
 
 
 
 6432	/* Fill in the command type */
 6433	c->cmd_type = CMD_IOCTL_PEND;
 6434	c->scsi_cmd = SCSI_CMD_BUSY;
 6435	/* Fill in Command Header */
 6436	c->Header.ReplyQueue = 0; /* unused in simple mode */
 6437	if (iocommand->buf_size > 0) {	/* buffer to fill */
 6438		c->Header.SGList = 1;
 6439		c->Header.SGTotal = cpu_to_le16(1);
 6440	} else	{ /* no buffers to fill */
 6441		c->Header.SGList = 0;
 6442		c->Header.SGTotal = cpu_to_le16(0);
 6443	}
 6444	memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
 
 
 6445
 6446	/* Fill in Request block */
 6447	memcpy(&c->Request, &iocommand->Request,
 6448		sizeof(c->Request));
 6449
 6450	/* Fill in the scatter gather information */
 6451	if (iocommand->buf_size > 0) {
 6452		temp64 = dma_map_single(&h->pdev->dev, buff,
 6453			iocommand->buf_size, DMA_BIDIRECTIONAL);
 6454		if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
 6455			c->SG[0].Addr = cpu_to_le64(0);
 6456			c->SG[0].Len = cpu_to_le32(0);
 6457			rc = -ENOMEM;
 6458			goto out;
 6459		}
 6460		c->SG[0].Addr = cpu_to_le64(temp64);
 6461		c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
 6462		c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
 6463	}
 6464	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
 6465					NO_TIMEOUT);
 6466	if (iocommand->buf_size > 0)
 6467		hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
 6468	check_ioctl_unit_attention(h, c);
 6469	if (rc) {
 6470		rc = -EIO;
 6471		goto out;
 6472	}
 6473
 6474	/* Copy the error information out */
 6475	memcpy(&iocommand->error_info, c->err_info,
 6476		sizeof(iocommand->error_info));
 6477	if ((iocommand->Request.Type.Direction & XFER_READ) &&
 6478		iocommand->buf_size > 0) {
 
 
 
 
 
 6479		/* Copy the data out of the buffer we created */
 6480		if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
 6481			rc = -EFAULT;
 6482			goto out;
 
 6483		}
 6484	}
 6485out:
 6486	cmd_free(h, c);
 6487out_kfree:
 6488	kfree(buff);
 6489	return rc;
 
 6490}
 6491
 6492static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
 6493				   BIG_IOCTL_Command_struct *ioc)
 6494{
 
 6495	struct CommandList *c;
 6496	unsigned char **buff = NULL;
 6497	int *buff_size = NULL;
 6498	u64 temp64;
 6499	BYTE sg_used = 0;
 6500	int status = 0;
 
 6501	u32 left;
 6502	u32 sz;
 6503	BYTE __user *data_ptr;
 6504
 
 
 6505	if (!capable(CAP_SYS_RAWIO))
 6506		return -EPERM;
 6507
 
 
 
 
 
 
 
 
 
 6508	if ((ioc->buf_size < 1) &&
 6509	    (ioc->Request.Type.Direction != XFER_NONE))
 6510		return -EINVAL;
 
 
 6511	/* Check kmalloc limits  using all SGs */
 6512	if (ioc->malloc_size > MAX_KMALLOC_SIZE)
 6513		return -EINVAL;
 6514	if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
 6515		return -EINVAL;
 6516	buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
 
 
 
 
 6517	if (!buff) {
 6518		status = -ENOMEM;
 6519		goto cleanup1;
 6520	}
 6521	buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
 6522	if (!buff_size) {
 6523		status = -ENOMEM;
 6524		goto cleanup1;
 6525	}
 6526	left = ioc->buf_size;
 6527	data_ptr = ioc->buf;
 6528	while (left) {
 6529		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
 6530		buff_size[sg_used] = sz;
 6531		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
 6532		if (buff[sg_used] == NULL) {
 6533			status = -ENOMEM;
 6534			goto cleanup1;
 6535		}
 6536		if (ioc->Request.Type.Direction & XFER_WRITE) {
 6537			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
 6538				status = -EFAULT;
 6539				goto cleanup1;
 6540			}
 6541		} else
 6542			memset(buff[sg_used], 0, sz);
 6543		left -= sz;
 6544		data_ptr += sz;
 6545		sg_used++;
 6546	}
 6547	c = cmd_alloc(h);
 6548
 
 
 
 6549	c->cmd_type = CMD_IOCTL_PEND;
 6550	c->scsi_cmd = SCSI_CMD_BUSY;
 6551	c->Header.ReplyQueue = 0;
 6552	c->Header.SGList = (u8) sg_used;
 6553	c->Header.SGTotal = cpu_to_le16(sg_used);
 6554	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
 
 6555	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
 6556	if (ioc->buf_size > 0) {
 6557		int i;
 6558		for (i = 0; i < sg_used; i++) {
 6559			temp64 = dma_map_single(&h->pdev->dev, buff[i],
 6560				    buff_size[i], DMA_BIDIRECTIONAL);
 6561			if (dma_mapping_error(&h->pdev->dev,
 6562							(dma_addr_t) temp64)) {
 6563				c->SG[i].Addr = cpu_to_le64(0);
 6564				c->SG[i].Len = cpu_to_le32(0);
 6565				hpsa_pci_unmap(h->pdev, c, i,
 6566					DMA_BIDIRECTIONAL);
 6567				status = -ENOMEM;
 6568				goto cleanup0;
 6569			}
 6570			c->SG[i].Addr = cpu_to_le64(temp64);
 6571			c->SG[i].Len = cpu_to_le32(buff_size[i]);
 6572			c->SG[i].Ext = cpu_to_le32(0);
 6573		}
 6574		c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
 6575	}
 6576	status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
 6577						NO_TIMEOUT);
 6578	if (sg_used)
 6579		hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
 6580	check_ioctl_unit_attention(h, c);
 6581	if (status) {
 6582		status = -EIO;
 6583		goto cleanup0;
 6584	}
 6585
 6586	/* Copy the error information out */
 6587	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
 6588	if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
 6589		int i;
 6590
 
 
 
 6591		/* Copy the data out of the buffer we created */
 6592		BYTE __user *ptr = ioc->buf;
 6593		for (i = 0; i < sg_used; i++) {
 6594			if (copy_to_user(ptr, buff[i], buff_size[i])) {
 
 6595				status = -EFAULT;
 6596				goto cleanup0;
 6597			}
 6598			ptr += buff_size[i];
 6599		}
 6600	}
 
 6601	status = 0;
 6602cleanup0:
 6603	cmd_free(h, c);
 6604cleanup1:
 6605	if (buff) {
 6606		int i;
 6607
 6608		for (i = 0; i < sg_used; i++)
 6609			kfree(buff[i]);
 6610		kfree(buff);
 6611	}
 6612	kfree(buff_size);
 
 6613	return status;
 6614}
 6615
 6616static void check_ioctl_unit_attention(struct ctlr_info *h,
 6617	struct CommandList *c)
 6618{
 6619	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
 6620			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
 6621		(void) check_for_unit_attention(h, c);
 6622}
 6623
 6624/*
 6625 * ioctl
 6626 */
 6627static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
 6628		      void __user *argp)
 6629{
 6630	struct ctlr_info *h = sdev_to_hba(dev);
 6631	int rc;
 
 
 6632
 6633	switch (cmd) {
 6634	case CCISS_DEREGDISK:
 6635	case CCISS_REGNEWDISK:
 6636	case CCISS_REGNEWD:
 6637		hpsa_scan_start(h->scsi_host);
 6638		return 0;
 6639	case CCISS_GETPCIINFO:
 6640		return hpsa_getpciinfo_ioctl(h, argp);
 6641	case CCISS_GETDRIVVER:
 6642		return hpsa_getdrivver_ioctl(h, argp);
 6643	case CCISS_PASSTHRU: {
 6644		IOCTL_Command_struct iocommand;
 6645
 6646		if (!argp)
 6647			return -EINVAL;
 6648		if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
 6649			return -EFAULT;
 6650		if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
 6651			return -EAGAIN;
 6652		rc = hpsa_passthru_ioctl(h, &iocommand);
 6653		atomic_inc(&h->passthru_cmds_avail);
 6654		if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
 6655			rc = -EFAULT;
 6656		return rc;
 6657	}
 6658	case CCISS_BIG_PASSTHRU: {
 6659		BIG_IOCTL_Command_struct ioc;
 6660		if (!argp)
 6661			return -EINVAL;
 6662		if (copy_from_user(&ioc, argp, sizeof(ioc)))
 6663			return -EFAULT;
 6664		if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
 6665			return -EAGAIN;
 6666		rc = hpsa_big_passthru_ioctl(h, &ioc);
 6667		atomic_inc(&h->passthru_cmds_avail);
 6668		if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
 6669			rc = -EFAULT;
 6670		return rc;
 6671	}
 6672	default:
 6673		return -ENOTTY;
 6674	}
 6675}
 6676
 6677static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
 
 6678{
 6679	struct CommandList *c;
 6680
 6681	c = cmd_alloc(h);
 6682
 6683	/* fill_cmd can't fail here, no data buffer to map */
 6684	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
 6685		RAID_CTLR_LUNID, TYPE_MSG);
 6686	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
 6687	c->waiting = NULL;
 6688	enqueue_cmd_and_start_io(h, c);
 6689	/* Don't wait for completion, the reset won't complete.  Don't free
 6690	 * the command either.  This is the last command we will send before
 6691	 * re-initializing everything, so it doesn't matter and won't leak.
 6692	 */
 6693	return;
 6694}
 6695
 6696static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 6697	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 6698	int cmd_type)
 6699{
 6700	enum dma_data_direction dir = DMA_NONE;
 6701
 6702	c->cmd_type = CMD_IOCTL_PEND;
 6703	c->scsi_cmd = SCSI_CMD_BUSY;
 6704	c->Header.ReplyQueue = 0;
 6705	if (buff != NULL && size > 0) {
 6706		c->Header.SGList = 1;
 6707		c->Header.SGTotal = cpu_to_le16(1);
 6708	} else {
 6709		c->Header.SGList = 0;
 6710		c->Header.SGTotal = cpu_to_le16(0);
 6711	}
 
 6712	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
 6713
 
 6714	if (cmd_type == TYPE_CMD) {
 6715		switch (cmd) {
 6716		case HPSA_INQUIRY:
 6717			/* are we trying to read a vital product page */
 6718			if (page_code & VPD_PAGE) {
 6719				c->Request.CDB[1] = 0x01;
 6720				c->Request.CDB[2] = (page_code & 0xff);
 6721			}
 6722			c->Request.CDBLen = 6;
 6723			c->Request.type_attr_dir =
 6724				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6725			c->Request.Timeout = 0;
 6726			c->Request.CDB[0] = HPSA_INQUIRY;
 6727			c->Request.CDB[4] = size & 0xFF;
 6728			break;
 6729		case RECEIVE_DIAGNOSTIC:
 6730			c->Request.CDBLen = 6;
 6731			c->Request.type_attr_dir =
 6732				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6733			c->Request.Timeout = 0;
 6734			c->Request.CDB[0] = cmd;
 6735			c->Request.CDB[1] = 1;
 6736			c->Request.CDB[2] = 1;
 6737			c->Request.CDB[3] = (size >> 8) & 0xFF;
 6738			c->Request.CDB[4] = size & 0xFF;
 6739			break;
 6740		case HPSA_REPORT_LOG:
 6741		case HPSA_REPORT_PHYS:
 6742			/* Talking to controller so It's a physical command
 6743			   mode = 00 target = 0.  Nothing to write.
 6744			 */
 6745			c->Request.CDBLen = 12;
 6746			c->Request.type_attr_dir =
 6747				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6748			c->Request.Timeout = 0;
 6749			c->Request.CDB[0] = cmd;
 6750			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
 6751			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6752			c->Request.CDB[8] = (size >> 8) & 0xFF;
 6753			c->Request.CDB[9] = size & 0xFF;
 6754			break;
 6755		case BMIC_SENSE_DIAG_OPTIONS:
 6756			c->Request.CDBLen = 16;
 6757			c->Request.type_attr_dir =
 6758				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6759			c->Request.Timeout = 0;
 6760			/* Spec says this should be BMIC_WRITE */
 6761			c->Request.CDB[0] = BMIC_READ;
 6762			c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
 6763			break;
 6764		case BMIC_SET_DIAG_OPTIONS:
 6765			c->Request.CDBLen = 16;
 6766			c->Request.type_attr_dir =
 6767					TYPE_ATTR_DIR(cmd_type,
 6768						ATTR_SIMPLE, XFER_WRITE);
 6769			c->Request.Timeout = 0;
 6770			c->Request.CDB[0] = BMIC_WRITE;
 6771			c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
 6772			break;
 6773		case HPSA_CACHE_FLUSH:
 6774			c->Request.CDBLen = 12;
 6775			c->Request.type_attr_dir =
 6776					TYPE_ATTR_DIR(cmd_type,
 6777						ATTR_SIMPLE, XFER_WRITE);
 6778			c->Request.Timeout = 0;
 6779			c->Request.CDB[0] = BMIC_WRITE;
 6780			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
 6781			c->Request.CDB[7] = (size >> 8) & 0xFF;
 6782			c->Request.CDB[8] = size & 0xFF;
 6783			break;
 6784		case TEST_UNIT_READY:
 6785			c->Request.CDBLen = 6;
 6786			c->Request.type_attr_dir =
 6787				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
 6788			c->Request.Timeout = 0;
 6789			break;
 6790		case HPSA_GET_RAID_MAP:
 6791			c->Request.CDBLen = 12;
 6792			c->Request.type_attr_dir =
 6793				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6794			c->Request.Timeout = 0;
 6795			c->Request.CDB[0] = HPSA_CISS_READ;
 6796			c->Request.CDB[1] = cmd;
 6797			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
 6798			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6799			c->Request.CDB[8] = (size >> 8) & 0xFF;
 6800			c->Request.CDB[9] = size & 0xFF;
 6801			break;
 6802		case BMIC_SENSE_CONTROLLER_PARAMETERS:
 6803			c->Request.CDBLen = 10;
 6804			c->Request.type_attr_dir =
 6805				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6806			c->Request.Timeout = 0;
 6807			c->Request.CDB[0] = BMIC_READ;
 6808			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
 6809			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6810			c->Request.CDB[8] = (size >> 8) & 0xFF;
 6811			break;
 6812		case BMIC_IDENTIFY_PHYSICAL_DEVICE:
 6813			c->Request.CDBLen = 10;
 6814			c->Request.type_attr_dir =
 6815				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6816			c->Request.Timeout = 0;
 6817			c->Request.CDB[0] = BMIC_READ;
 6818			c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
 6819			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6820			c->Request.CDB[8] = (size >> 8) & 0XFF;
 6821			break;
 6822		case BMIC_SENSE_SUBSYSTEM_INFORMATION:
 6823			c->Request.CDBLen = 10;
 6824			c->Request.type_attr_dir =
 6825				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6826			c->Request.Timeout = 0;
 6827			c->Request.CDB[0] = BMIC_READ;
 6828			c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
 6829			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6830			c->Request.CDB[8] = (size >> 8) & 0XFF;
 6831			break;
 6832		case BMIC_SENSE_STORAGE_BOX_PARAMS:
 6833			c->Request.CDBLen = 10;
 6834			c->Request.type_attr_dir =
 6835				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6836			c->Request.Timeout = 0;
 6837			c->Request.CDB[0] = BMIC_READ;
 6838			c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
 6839			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6840			c->Request.CDB[8] = (size >> 8) & 0XFF;
 6841			break;
 6842		case BMIC_IDENTIFY_CONTROLLER:
 6843			c->Request.CDBLen = 10;
 6844			c->Request.type_attr_dir =
 6845				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
 6846			c->Request.Timeout = 0;
 6847			c->Request.CDB[0] = BMIC_READ;
 6848			c->Request.CDB[1] = 0;
 6849			c->Request.CDB[2] = 0;
 6850			c->Request.CDB[3] = 0;
 6851			c->Request.CDB[4] = 0;
 6852			c->Request.CDB[5] = 0;
 6853			c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
 6854			c->Request.CDB[7] = (size >> 16) & 0xFF;
 6855			c->Request.CDB[8] = (size >> 8) & 0XFF;
 6856			c->Request.CDB[9] = 0;
 6857			break;
 6858		default:
 6859			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
 6860			BUG();
 
 6861		}
 6862	} else if (cmd_type == TYPE_MSG) {
 6863		switch (cmd) {
 6864
 6865		case  HPSA_PHYS_TARGET_RESET:
 6866			c->Request.CDBLen = 16;
 6867			c->Request.type_attr_dir =
 6868				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
 6869			c->Request.Timeout = 0; /* Don't time out */
 6870			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
 6871			c->Request.CDB[0] = HPSA_RESET;
 6872			c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
 6873			/* Physical target reset needs no control bytes 4-7*/
 6874			c->Request.CDB[4] = 0x00;
 6875			c->Request.CDB[5] = 0x00;
 6876			c->Request.CDB[6] = 0x00;
 6877			c->Request.CDB[7] = 0x00;
 6878			break;
 6879		case  HPSA_DEVICE_RESET_MSG:
 6880			c->Request.CDBLen = 16;
 6881			c->Request.type_attr_dir =
 6882				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
 
 6883			c->Request.Timeout = 0; /* Don't time out */
 6884			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
 6885			c->Request.CDB[0] =  cmd;
 6886			c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
 6887			/* If bytes 4-7 are zero, it means reset the */
 6888			/* LunID device */
 6889			c->Request.CDB[4] = 0x00;
 6890			c->Request.CDB[5] = 0x00;
 6891			c->Request.CDB[6] = 0x00;
 6892			c->Request.CDB[7] = 0x00;
 6893			break;
 
 6894		default:
 6895			dev_warn(&h->pdev->dev, "unknown message type %d\n",
 6896				cmd);
 6897			BUG();
 6898		}
 6899	} else {
 6900		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
 6901		BUG();
 6902	}
 6903
 6904	switch (GET_DIR(c->Request.type_attr_dir)) {
 6905	case XFER_READ:
 6906		dir = DMA_FROM_DEVICE;
 6907		break;
 6908	case XFER_WRITE:
 6909		dir = DMA_TO_DEVICE;
 6910		break;
 6911	case XFER_NONE:
 6912		dir = DMA_NONE;
 6913		break;
 6914	default:
 6915		dir = DMA_BIDIRECTIONAL;
 6916	}
 6917	if (hpsa_map_one(h->pdev, c, buff, size, dir))
 6918		return -1;
 6919	return 0;
 
 6920}
 6921
 6922/*
 6923 * Map (physical) PCI mem into (virtual) kernel space
 6924 */
 6925static void __iomem *remap_pci_mem(ulong base, ulong size)
 6926{
 6927	ulong page_base = ((ulong) base) & PAGE_MASK;
 6928	ulong page_offs = ((ulong) base) - page_base;
 6929	void __iomem *page_remapped = ioremap(page_base,
 6930		page_offs + size);
 6931
 6932	return page_remapped ? (page_remapped + page_offs) : NULL;
 6933}
 6934
 6935static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
 
 
 
 6936{
 6937	return h->access.command_completed(h, q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6938}
 6939
 6940static inline bool interrupt_pending(struct ctlr_info *h)
 6941{
 6942	return h->access.intr_pending(h);
 6943}
 6944
 6945static inline long interrupt_not_for_us(struct ctlr_info *h)
 6946{
 6947	return (h->access.intr_pending(h) == 0) ||
 6948		(h->interrupts_enabled == 0);
 6949}
 6950
 6951static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
 6952	u32 raw_tag)
 6953{
 6954	if (unlikely(tag_index >= h->nr_cmds)) {
 6955		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
 6956		return 1;
 6957	}
 6958	return 0;
 6959}
 6960
 6961static inline void finish_cmd(struct CommandList *c)
 6962{
 6963	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
 6964	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
 6965			|| c->cmd_type == CMD_IOACCEL2))
 6966		complete_scsi_command(c);
 6967	else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
 6968		complete(c->waiting);
 6969}
 6970
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6971/* process completion of an indexed ("direct lookup") command */
 6972static inline void process_indexed_cmd(struct ctlr_info *h,
 6973	u32 raw_tag)
 6974{
 6975	u32 tag_index;
 6976	struct CommandList *c;
 6977
 6978	tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
 6979	if (!bad_tag(h, tag_index, raw_tag)) {
 6980		c = h->cmd_pool + tag_index;
 6981		finish_cmd(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6982	}
 
 
 6983}
 6984
 6985/* Some controllers, like p400, will give us one interrupt
 6986 * after a soft reset, even if we turned interrupts off.
 6987 * Only need to check for this in the hpsa_xxx_discard_completions
 6988 * functions.
 6989 */
 6990static int ignore_bogus_interrupt(struct ctlr_info *h)
 6991{
 6992	if (likely(!reset_devices))
 6993		return 0;
 6994
 6995	if (likely(h->interrupts_enabled))
 6996		return 0;
 6997
 6998	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
 6999		"(known firmware bug.)  Ignoring.\n");
 7000
 7001	return 1;
 7002}
 7003
 7004/*
 7005 * Convert &h->q[x] (passed to interrupt handlers) back to h.
 7006 * Relies on (h-q[x] == x) being true for x such that
 7007 * 0 <= x < MAX_REPLY_QUEUES.
 7008 */
 7009static struct ctlr_info *queue_to_hba(u8 *queue)
 7010{
 7011	return container_of((queue - *queue), struct ctlr_info, q[0]);
 7012}
 7013
 7014static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
 7015{
 7016	struct ctlr_info *h = queue_to_hba(queue);
 7017	u8 q = *(u8 *) queue;
 7018	u32 raw_tag;
 7019
 7020	if (ignore_bogus_interrupt(h))
 7021		return IRQ_NONE;
 7022
 7023	if (interrupt_not_for_us(h))
 7024		return IRQ_NONE;
 7025	h->last_intr_timestamp = get_jiffies_64();
 7026	while (interrupt_pending(h)) {
 7027		raw_tag = get_next_completion(h, q);
 7028		while (raw_tag != FIFO_EMPTY)
 7029			raw_tag = next_command(h, q);
 7030	}
 
 7031	return IRQ_HANDLED;
 7032}
 7033
 7034static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
 7035{
 7036	struct ctlr_info *h = queue_to_hba(queue);
 
 7037	u32 raw_tag;
 7038	u8 q = *(u8 *) queue;
 7039
 7040	if (ignore_bogus_interrupt(h))
 7041		return IRQ_NONE;
 7042
 7043	h->last_intr_timestamp = get_jiffies_64();
 7044	raw_tag = get_next_completion(h, q);
 7045	while (raw_tag != FIFO_EMPTY)
 7046		raw_tag = next_command(h, q);
 
 7047	return IRQ_HANDLED;
 7048}
 7049
 7050static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
 7051{
 7052	struct ctlr_info *h = queue_to_hba((u8 *) queue);
 
 7053	u32 raw_tag;
 7054	u8 q = *(u8 *) queue;
 7055
 7056	if (interrupt_not_for_us(h))
 7057		return IRQ_NONE;
 7058	h->last_intr_timestamp = get_jiffies_64();
 7059	while (interrupt_pending(h)) {
 7060		raw_tag = get_next_completion(h, q);
 7061		while (raw_tag != FIFO_EMPTY) {
 7062			process_indexed_cmd(h, raw_tag);
 7063			raw_tag = next_command(h, q);
 
 
 7064		}
 7065	}
 
 7066	return IRQ_HANDLED;
 7067}
 7068
 7069static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
 7070{
 7071	struct ctlr_info *h = queue_to_hba(queue);
 
 7072	u32 raw_tag;
 7073	u8 q = *(u8 *) queue;
 7074
 7075	h->last_intr_timestamp = get_jiffies_64();
 7076	raw_tag = get_next_completion(h, q);
 7077	while (raw_tag != FIFO_EMPTY) {
 7078		process_indexed_cmd(h, raw_tag);
 7079		raw_tag = next_command(h, q);
 
 
 7080	}
 
 7081	return IRQ_HANDLED;
 7082}
 7083
 7084/* Send a message CDB to the firmware. Careful, this only works
 7085 * in simple mode, not performant mode due to the tag lookup.
 7086 * We only ever use this immediately after a controller reset.
 7087 */
 7088static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 7089			unsigned char type)
 7090{
 7091	struct Command {
 7092		struct CommandListHeader CommandHeader;
 7093		struct RequestBlock Request;
 7094		struct ErrDescriptor ErrorDescriptor;
 7095	};
 7096	struct Command *cmd;
 7097	static const size_t cmd_sz = sizeof(*cmd) +
 7098					sizeof(cmd->ErrorDescriptor);
 7099	dma_addr_t paddr64;
 7100	__le32 paddr32;
 7101	u32 tag;
 7102	void __iomem *vaddr;
 7103	int i, err;
 7104
 7105	vaddr = pci_ioremap_bar(pdev, 0);
 7106	if (vaddr == NULL)
 7107		return -ENOMEM;
 7108
 7109	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
 7110	 * CCISS commands, so they must be allocated from the lower 4GiB of
 7111	 * memory.
 7112	 */
 7113	err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 7114	if (err) {
 7115		iounmap(vaddr);
 7116		return err;
 7117	}
 7118
 7119	cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
 7120	if (cmd == NULL) {
 7121		iounmap(vaddr);
 7122		return -ENOMEM;
 7123	}
 7124
 7125	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
 7126	 * although there's no guarantee, we assume that the address is at
 7127	 * least 4-byte aligned (most likely, it's page-aligned).
 7128	 */
 7129	paddr32 = cpu_to_le32(paddr64);
 7130
 7131	cmd->CommandHeader.ReplyQueue = 0;
 7132	cmd->CommandHeader.SGList = 0;
 7133	cmd->CommandHeader.SGTotal = cpu_to_le16(0);
 7134	cmd->CommandHeader.tag = cpu_to_le64(paddr64);
 
 7135	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
 7136
 7137	cmd->Request.CDBLen = 16;
 7138	cmd->Request.type_attr_dir =
 7139			TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
 
 7140	cmd->Request.Timeout = 0; /* Don't time out */
 7141	cmd->Request.CDB[0] = opcode;
 7142	cmd->Request.CDB[1] = type;
 7143	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
 7144	cmd->ErrorDescriptor.Addr =
 7145			cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
 7146	cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
 7147
 7148	writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
 7149
 7150	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
 7151		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
 7152		if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
 7153			break;
 7154		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
 7155	}
 7156
 7157	iounmap(vaddr);
 7158
 7159	/* we leak the DMA buffer here ... no choice since the controller could
 7160	 *  still complete the command.
 7161	 */
 7162	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
 7163		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
 7164			opcode, type);
 7165		return -ETIMEDOUT;
 7166	}
 7167
 7168	dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
 7169
 7170	if (tag & HPSA_ERROR_BIT) {
 7171		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
 7172			opcode, type);
 7173		return -EIO;
 7174	}
 7175
 7176	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
 7177		opcode, type);
 7178	return 0;
 7179}
 7180
 7181#define hpsa_noop(p) hpsa_message(p, 3, 0)
 7182
 7183static int hpsa_controller_hard_reset(struct pci_dev *pdev,
 7184	void __iomem *vaddr, u32 use_doorbell)
 7185{
 
 
 7186
 7187	if (use_doorbell) {
 7188		/* For everything after the P600, the PCI power state method
 7189		 * of resetting the controller doesn't work, so we have this
 7190		 * other way using the doorbell register.
 7191		 */
 7192		dev_info(&pdev->dev, "using doorbell to reset controller\n");
 7193		writel(use_doorbell, vaddr + SA5_DOORBELL);
 7194
 7195		/* PMC hardware guys tell us we need a 10 second delay after
 7196		 * doorbell reset and before any attempt to talk to the board
 7197		 * at all to ensure that this actually works and doesn't fall
 7198		 * over in some weird corner cases.
 7199		 */
 7200		msleep(10000);
 7201	} else { /* Try to do it the PCI power state way */
 7202
 7203		/* Quoting from the Open CISS Specification: "The Power
 7204		 * Management Control/Status Register (CSR) controls the power
 7205		 * state of the device.  The normal operating state is D0,
 7206		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
 7207		 * the controller, place the interface device in D3 then to D0,
 7208		 * this causes a secondary PCI reset which will reset the
 7209		 * controller." */
 7210
 7211		int rc = 0;
 7212
 
 
 
 
 
 7213		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
 7214
 7215		/* enter the D3hot power management state */
 7216		rc = pci_set_power_state(pdev, PCI_D3hot);
 7217		if (rc)
 7218			return rc;
 
 7219
 7220		msleep(500);
 7221
 7222		/* enter the D0 power management state */
 7223		rc = pci_set_power_state(pdev, PCI_D0);
 7224		if (rc)
 7225			return rc;
 7226
 7227		/*
 7228		 * The P600 requires a small delay when changing states.
 7229		 * Otherwise we may think the board did not reset and we bail.
 7230		 * This for kdump only and is particular to the P600.
 7231		 */
 7232		msleep(500);
 7233	}
 7234	return 0;
 7235}
 7236
 7237static void init_driver_version(char *driver_version, int len)
 7238{
 7239	memset(driver_version, 0, len);
 7240	strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
 7241}
 7242
 7243static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
 
 7244{
 7245	char *driver_version;
 7246	int i, size = sizeof(cfgtable->driver_version);
 7247
 7248	driver_version = kmalloc(size, GFP_KERNEL);
 7249	if (!driver_version)
 7250		return -ENOMEM;
 7251
 7252	init_driver_version(driver_version, size);
 7253	for (i = 0; i < size; i++)
 7254		writeb(driver_version[i], &cfgtable->driver_version[i]);
 7255	kfree(driver_version);
 7256	return 0;
 7257}
 7258
 7259static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
 7260					  unsigned char *driver_ver)
 7261{
 7262	int i;
 7263
 7264	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
 7265		driver_ver[i] = readb(&cfgtable->driver_version[i]);
 7266}
 7267
 7268static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
 
 7269{
 7270
 7271	char *driver_ver, *old_driver_ver;
 7272	int rc, size = sizeof(cfgtable->driver_version);
 7273
 7274	old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
 7275	if (!old_driver_ver)
 7276		return -ENOMEM;
 7277	driver_ver = old_driver_ver + size;
 7278
 7279	/* After a reset, the 32 bytes of "driver version" in the cfgtable
 7280	 * should have been changed, otherwise we know the reset failed.
 7281	 */
 7282	init_driver_version(old_driver_ver, size);
 7283	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
 7284	rc = !memcmp(driver_ver, old_driver_ver, size);
 7285	kfree(old_driver_ver);
 7286	return rc;
 7287}
 7288/* This does a hard reset of the controller using PCI power management
 7289 * states or the using the doorbell register.
 7290 */
 7291static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
 7292{
 7293	u64 cfg_offset;
 7294	u32 cfg_base_addr;
 7295	u64 cfg_base_addr_index;
 7296	void __iomem *vaddr;
 7297	unsigned long paddr;
 7298	u32 misc_fw_support;
 7299	int rc;
 7300	struct CfgTable __iomem *cfgtable;
 7301	u32 use_doorbell;
 
 7302	u16 command_register;
 7303
 7304	/* For controllers as old as the P600, this is very nearly
 7305	 * the same thing as
 7306	 *
 7307	 * pci_save_state(pci_dev);
 7308	 * pci_set_power_state(pci_dev, PCI_D3hot);
 7309	 * pci_set_power_state(pci_dev, PCI_D0);
 7310	 * pci_restore_state(pci_dev);
 7311	 *
 7312	 * For controllers newer than the P600, the pci power state
 7313	 * method of resetting doesn't work so we have another way
 7314	 * using the doorbell register.
 7315	 */
 7316
 7317	if (!ctlr_is_resettable(board_id)) {
 7318		dev_warn(&pdev->dev, "Controller not resettable\n");
 
 7319		return -ENODEV;
 7320	}
 7321
 7322	/* if controller is soft- but not hard resettable... */
 7323	if (!ctlr_is_hard_resettable(board_id))
 7324		return -ENOTSUPP; /* try soft reset later. */
 7325
 7326	/* Save the PCI command register */
 7327	pci_read_config_word(pdev, 4, &command_register);
 
 
 
 
 7328	pci_save_state(pdev);
 7329
 7330	/* find the first memory BAR, so we can find the cfg table */
 7331	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
 7332	if (rc)
 7333		return rc;
 7334	vaddr = remap_pci_mem(paddr, 0x250);
 7335	if (!vaddr)
 7336		return -ENOMEM;
 7337
 7338	/* find cfgtable in order to check if reset via doorbell is supported */
 7339	rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
 7340					&cfg_base_addr_index, &cfg_offset);
 7341	if (rc)
 7342		goto unmap_vaddr;
 7343	cfgtable = remap_pci_mem(pci_resource_start(pdev,
 7344		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
 7345	if (!cfgtable) {
 7346		rc = -ENOMEM;
 7347		goto unmap_vaddr;
 7348	}
 7349	rc = write_driver_ver_to_cfgtable(cfgtable);
 7350	if (rc)
 7351		goto unmap_cfgtable;
 7352
 7353	/* If reset via doorbell register is supported, use that.
 7354	 * There are two such methods.  Favor the newest method.
 7355	 */
 7356	misc_fw_support = readl(&cfgtable->misc_fw_support);
 7357	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
 7358	if (use_doorbell) {
 7359		use_doorbell = DOORBELL_CTLR_RESET2;
 7360	} else {
 7361		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 7362		if (use_doorbell) {
 7363			dev_warn(&pdev->dev,
 7364				"Soft reset not supported. Firmware update is required.\n");
 
 
 7365			rc = -ENOTSUPP; /* try soft reset */
 7366			goto unmap_cfgtable;
 7367		}
 7368	}
 7369
 7370	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
 7371	if (rc)
 7372		goto unmap_cfgtable;
 7373
 7374	pci_restore_state(pdev);
 
 
 
 
 
 7375	pci_write_config_word(pdev, 4, command_register);
 7376
 7377	/* Some devices (notably the HP Smart Array 5i Controller)
 7378	   need a little pause here */
 7379	msleep(HPSA_POST_RESET_PAUSE_MSECS);
 7380
 
 
 
 
 
 
 
 
 
 
 7381	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
 7382	if (rc) {
 7383		dev_warn(&pdev->dev,
 7384			"Failed waiting for board to become ready after hard reset\n");
 
 7385		goto unmap_cfgtable;
 7386	}
 7387
 7388	rc = controller_reset_failed(vaddr);
 7389	if (rc < 0)
 7390		goto unmap_cfgtable;
 7391	if (rc) {
 7392		dev_warn(&pdev->dev, "Unable to successfully reset "
 7393			"controller. Will try soft reset.\n");
 7394		rc = -ENOTSUPP;
 7395	} else {
 7396		dev_info(&pdev->dev, "board ready after hard reset.\n");
 7397	}
 7398
 7399unmap_cfgtable:
 7400	iounmap(cfgtable);
 7401
 7402unmap_vaddr:
 7403	iounmap(vaddr);
 7404	return rc;
 7405}
 7406
 7407/*
 7408 *  We cannot read the structure directly, for portability we must use
 7409 *   the io functions.
 7410 *   This is for debug only.
 7411 */
 7412static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
 7413{
 7414#ifdef HPSA_DEBUG
 7415	int i;
 7416	char temp_name[17];
 7417
 7418	dev_info(dev, "Controller Configuration information\n");
 7419	dev_info(dev, "------------------------------------\n");
 7420	for (i = 0; i < 4; i++)
 7421		temp_name[i] = readb(&(tb->Signature[i]));
 7422	temp_name[4] = '\0';
 7423	dev_info(dev, "   Signature = %s\n", temp_name);
 7424	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
 7425	dev_info(dev, "   Transport methods supported = 0x%x\n",
 7426	       readl(&(tb->TransportSupport)));
 7427	dev_info(dev, "   Transport methods active = 0x%x\n",
 7428	       readl(&(tb->TransportActive)));
 7429	dev_info(dev, "   Requested transport Method = 0x%x\n",
 7430	       readl(&(tb->HostWrite.TransportRequest)));
 7431	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
 7432	       readl(&(tb->HostWrite.CoalIntDelay)));
 7433	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
 7434	       readl(&(tb->HostWrite.CoalIntCount)));
 7435	dev_info(dev, "   Max outstanding commands = %d\n",
 7436	       readl(&(tb->CmdsOutMax)));
 7437	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
 7438	for (i = 0; i < 16; i++)
 7439		temp_name[i] = readb(&(tb->ServerName[i]));
 7440	temp_name[16] = '\0';
 7441	dev_info(dev, "   Server Name = %s\n", temp_name);
 7442	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
 7443		readl(&(tb->HeartBeat)));
 7444#endif				/* HPSA_DEBUG */
 7445}
 7446
 7447static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 7448{
 7449	int i, offset, mem_type, bar_type;
 7450
 7451	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
 7452		return 0;
 7453	offset = 0;
 7454	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 7455		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
 7456		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
 7457			offset += 4;
 7458		else {
 7459			mem_type = pci_resource_flags(pdev, i) &
 7460			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
 7461			switch (mem_type) {
 7462			case PCI_BASE_ADDRESS_MEM_TYPE_32:
 7463			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
 7464				offset += 4;	/* 32 bit */
 7465				break;
 7466			case PCI_BASE_ADDRESS_MEM_TYPE_64:
 7467				offset += 8;
 7468				break;
 7469			default:	/* reserved in PCI 2.2 */
 7470				dev_warn(&pdev->dev,
 7471				       "base address is invalid\n");
 7472				return -1;
 
 7473			}
 7474		}
 7475		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
 7476			return i + 1;
 7477	}
 7478	return -1;
 7479}
 7480
 7481static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
 7482{
 7483	pci_free_irq_vectors(h->pdev);
 7484	h->msix_vectors = 0;
 7485}
 7486
 7487static void hpsa_setup_reply_map(struct ctlr_info *h)
 7488{
 7489	const struct cpumask *mask;
 7490	unsigned int queue, cpu;
 7491
 7492	for (queue = 0; queue < h->msix_vectors; queue++) {
 7493		mask = pci_irq_get_affinity(h->pdev, queue);
 7494		if (!mask)
 7495			goto fallback;
 7496
 7497		for_each_cpu(cpu, mask)
 7498			h->reply_map[cpu] = queue;
 7499	}
 7500	return;
 7501
 7502fallback:
 7503	for_each_possible_cpu(cpu)
 7504		h->reply_map[cpu] = 0;
 7505}
 7506
 7507/* If MSI/MSI-X is supported by the kernel we will try to enable it on
 7508 * controllers that are capable. If not, we use legacy INTx mode.
 7509 */
 7510static int hpsa_interrupt_mode(struct ctlr_info *h)
 
 7511{
 7512	unsigned int flags = PCI_IRQ_LEGACY;
 7513	int ret;
 
 
 
 7514
 7515	/* Some boards advertise MSI but don't really support it */
 7516	switch (h->board_id) {
 7517	case 0x40700E11:
 7518	case 0x40800E11:
 7519	case 0x40820E11:
 7520	case 0x40830E11:
 7521		break;
 7522	default:
 7523		ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
 7524				PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
 7525		if (ret > 0) {
 7526			h->msix_vectors = ret;
 7527			return 0;
 
 
 
 
 
 
 
 
 
 
 7528		}
 7529
 7530		flags |= PCI_IRQ_MSI;
 7531		break;
 7532	}
 7533
 7534	ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
 7535	if (ret < 0)
 7536		return ret;
 7537	return 0;
 
 
 
 
 
 
 7538}
 7539
 7540static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
 7541				bool *legacy_board)
 7542{
 7543	int i;
 7544	u32 subsystem_vendor_id, subsystem_device_id;
 7545
 7546	subsystem_vendor_id = pdev->subsystem_vendor;
 7547	subsystem_device_id = pdev->subsystem_device;
 7548	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
 7549		    subsystem_vendor_id;
 7550
 7551	if (legacy_board)
 7552		*legacy_board = false;
 7553	for (i = 0; i < ARRAY_SIZE(products); i++)
 7554		if (*board_id == products[i].board_id) {
 7555			if (products[i].access != &SA5A_access &&
 7556			    products[i].access != &SA5B_access)
 7557				return i;
 7558			dev_warn(&pdev->dev,
 7559				 "legacy board ID: 0x%08x\n",
 7560				 *board_id);
 7561			if (legacy_board)
 7562			    *legacy_board = true;
 7563			return i;
 7564		}
 7565
 7566	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
 7567	if (legacy_board)
 7568		*legacy_board = true;
 
 
 
 
 7569	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
 7570}
 7571
 7572static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 7573				    unsigned long *memory_bar)
 
 
 
 
 
 
 
 
 7574{
 7575	int i;
 7576
 7577	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
 7578		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
 7579			/* addressing mode bits already removed */
 7580			*memory_bar = pci_resource_start(pdev, i);
 7581			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
 7582				*memory_bar);
 7583			return 0;
 7584		}
 7585	dev_warn(&pdev->dev, "no memory BAR found\n");
 7586	return -ENODEV;
 7587}
 7588
 7589static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
 7590				     int wait_for_ready)
 7591{
 7592	int i, iterations;
 7593	u32 scratchpad;
 7594	if (wait_for_ready)
 7595		iterations = HPSA_BOARD_READY_ITERATIONS;
 7596	else
 7597		iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
 7598
 7599	for (i = 0; i < iterations; i++) {
 7600		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
 7601		if (wait_for_ready) {
 7602			if (scratchpad == HPSA_FIRMWARE_READY)
 7603				return 0;
 7604		} else {
 7605			if (scratchpad != HPSA_FIRMWARE_READY)
 7606				return 0;
 7607		}
 7608		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
 7609	}
 7610	dev_warn(&pdev->dev, "board not ready, timed out.\n");
 7611	return -ENODEV;
 7612}
 7613
 7614static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
 7615			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
 7616			       u64 *cfg_offset)
 7617{
 7618	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
 7619	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
 7620	*cfg_base_addr &= (u32) 0x0000ffff;
 7621	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
 7622	if (*cfg_base_addr_index == -1) {
 7623		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
 7624		return -ENODEV;
 7625	}
 7626	return 0;
 7627}
 7628
 7629static void hpsa_free_cfgtables(struct ctlr_info *h)
 7630{
 7631	if (h->transtable) {
 7632		iounmap(h->transtable);
 7633		h->transtable = NULL;
 7634	}
 7635	if (h->cfgtable) {
 7636		iounmap(h->cfgtable);
 7637		h->cfgtable = NULL;
 7638	}
 7639}
 7640
 7641/* Find and map CISS config table and transfer table
 7642+ * several items must be unmapped (freed) later
 7643+ * */
 7644static int hpsa_find_cfgtables(struct ctlr_info *h)
 7645{
 7646	u64 cfg_offset;
 7647	u32 cfg_base_addr;
 7648	u64 cfg_base_addr_index;
 7649	u32 trans_offset;
 7650	int rc;
 7651
 7652	rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
 7653		&cfg_base_addr_index, &cfg_offset);
 7654	if (rc)
 7655		return rc;
 7656	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
 7657		       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
 7658	if (!h->cfgtable) {
 7659		dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
 7660		return -ENOMEM;
 7661	}
 7662	rc = write_driver_ver_to_cfgtable(h->cfgtable);
 7663	if (rc)
 7664		return rc;
 7665	/* Find performant mode table. */
 7666	trans_offset = readl(&h->cfgtable->TransMethodOffset);
 7667	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
 7668				cfg_base_addr_index)+cfg_offset+trans_offset,
 7669				sizeof(*h->transtable));
 7670	if (!h->transtable) {
 7671		dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
 7672		hpsa_free_cfgtables(h);
 7673		return -ENOMEM;
 7674	}
 7675	return 0;
 7676}
 7677
 7678static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
 7679{
 7680#define MIN_MAX_COMMANDS 16
 7681	BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
 7682
 7683	h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
 7684
 7685	/* Limit commands in memory limited kdump scenario. */
 7686	if (reset_devices && h->max_commands > 32)
 7687		h->max_commands = 32;
 7688
 7689	if (h->max_commands < MIN_MAX_COMMANDS) {
 7690		dev_warn(&h->pdev->dev,
 7691			"Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
 7692			h->max_commands,
 7693			MIN_MAX_COMMANDS);
 7694		h->max_commands = MIN_MAX_COMMANDS;
 7695	}
 7696}
 7697
 7698/* If the controller reports that the total max sg entries is greater than 512,
 7699 * then we know that chained SG blocks work.  (Original smart arrays did not
 7700 * support chained SG blocks and would return zero for max sg entries.)
 7701 */
 7702static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
 7703{
 7704	return h->maxsgentries > 512;
 7705}
 7706
 7707/* Interrogate the hardware for some limits:
 7708 * max commands, max SG elements without chaining, and with chaining,
 7709 * SG chain block size, etc.
 7710 */
 7711static void hpsa_find_board_params(struct ctlr_info *h)
 7712{
 7713	hpsa_get_max_perf_mode_cmds(h);
 7714	h->nr_cmds = h->max_commands;
 7715	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
 7716	h->fw_support = readl(&(h->cfgtable->misc_fw_support));
 7717	if (hpsa_supports_chained_sg_blocks(h)) {
 7718		/* Limit in-command s/g elements to 32 save dma'able memory. */
 
 
 
 7719		h->max_cmd_sg_entries = 32;
 7720		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
 7721		h->maxsgentries--; /* save one for chain pointer */
 7722	} else {
 7723		/*
 7724		 * Original smart arrays supported at most 31 s/g entries
 7725		 * embedded inline in the command (trying to use more
 7726		 * would lock up the controller)
 7727		 */
 7728		h->max_cmd_sg_entries = 31;
 7729		h->maxsgentries = 31; /* default to traditional values */
 7730		h->chainsize = 0;
 7731	}
 7732
 7733	/* Find out what task management functions are supported and cache */
 7734	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
 7735	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
 7736		dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
 7737	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
 7738		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
 7739	if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
 7740		dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
 7741}
 7742
 7743static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
 7744{
 7745	if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
 7746		dev_err(&h->pdev->dev, "not a valid CISS config table\n");
 
 
 
 7747		return false;
 7748	}
 7749	return true;
 7750}
 7751
 7752static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
 
 7753{
 7754	u32 driver_support;
 7755
 7756	driver_support = readl(&(h->cfgtable->driver_support));
 7757	/* Need to enable prefetch in the SCSI core for 6400 in x86 */
 7758#ifdef CONFIG_X86
 7759	driver_support |= ENABLE_SCSI_PREFETCH;
 
 
 
 
 7760#endif
 7761	driver_support |= ENABLE_UNIT_ATTN;
 7762	writel(driver_support, &(h->cfgtable->driver_support));
 7763}
 7764
 7765/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
 7766 * in a prefetch beyond physical memory.
 7767 */
 7768static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
 7769{
 7770	u32 dma_prefetch;
 7771
 7772	if (h->board_id != 0x3225103C)
 7773		return;
 7774	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
 7775	dma_prefetch |= 0x8000;
 7776	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
 7777}
 7778
 7779static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
 7780{
 7781	int i;
 7782	u32 doorbell_value;
 7783	unsigned long flags;
 7784	/* wait until the clear_event_notify bit 6 is cleared by controller. */
 7785	for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
 7786		spin_lock_irqsave(&h->lock, flags);
 7787		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
 7788		spin_unlock_irqrestore(&h->lock, flags);
 7789		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
 7790			goto done;
 7791		/* delay and try again */
 7792		msleep(CLEAR_EVENT_WAIT_INTERVAL);
 7793	}
 7794	return -ENODEV;
 7795done:
 7796	return 0;
 7797}
 7798
 7799static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 7800{
 7801	int i;
 7802	u32 doorbell_value;
 7803	unsigned long flags;
 7804
 7805	/* under certain very rare conditions, this can take awhile.
 7806	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
 7807	 * as we enter this code.)
 7808	 */
 7809	for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
 7810		if (h->remove_in_progress)
 7811			goto done;
 7812		spin_lock_irqsave(&h->lock, flags);
 7813		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
 7814		spin_unlock_irqrestore(&h->lock, flags);
 7815		if (!(doorbell_value & CFGTBL_ChangeReq))
 7816			goto done;
 7817		/* delay and try again */
 7818		msleep(MODE_CHANGE_WAIT_INTERVAL);
 7819	}
 7820	return -ENODEV;
 7821done:
 7822	return 0;
 7823}
 7824
 7825/* return -ENODEV or other reason on error, 0 on success */
 7826static int hpsa_enter_simple_mode(struct ctlr_info *h)
 7827{
 7828	u32 trans_support;
 7829
 7830	trans_support = readl(&(h->cfgtable->TransportSupport));
 7831	if (!(trans_support & SIMPLE_MODE))
 7832		return -ENOTSUPP;
 7833
 7834	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
 7835
 7836	/* Update the field, and then ring the doorbell */
 7837	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
 7838	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
 7839	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 7840	if (hpsa_wait_for_mode_change_ack(h))
 7841		goto error;
 7842	print_cfg_table(&h->pdev->dev, h->cfgtable);
 7843	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
 7844		goto error;
 
 
 
 7845	h->transMethod = CFGTBL_Trans_Simple;
 7846	return 0;
 7847error:
 7848	dev_err(&h->pdev->dev, "failed to enter simple mode\n");
 7849	return -ENODEV;
 7850}
 7851
 7852/* free items allocated or mapped by hpsa_pci_init */
 7853static void hpsa_free_pci_init(struct ctlr_info *h)
 7854{
 7855	hpsa_free_cfgtables(h);			/* pci_init 4 */
 7856	iounmap(h->vaddr);			/* pci_init 3 */
 7857	h->vaddr = NULL;
 7858	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
 7859	/*
 7860	 * call pci_disable_device before pci_release_regions per
 7861	 * Documentation/driver-api/pci/pci.rst
 7862	 */
 7863	pci_disable_device(h->pdev);		/* pci_init 1 */
 7864	pci_release_regions(h->pdev);		/* pci_init 2 */
 7865}
 7866
 7867/* several items must be freed later */
 7868static int hpsa_pci_init(struct ctlr_info *h)
 7869{
 7870	int prod_index, err;
 7871	bool legacy_board;
 7872
 7873	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
 7874	if (prod_index < 0)
 7875		return prod_index;
 7876	h->product_name = products[prod_index].product_name;
 7877	h->access = *(products[prod_index].access);
 7878	h->legacy_board = legacy_board;
 7879	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
 7880			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
 7881
 
 
 
 
 7882	err = pci_enable_device(h->pdev);
 7883	if (err) {
 7884		dev_err(&h->pdev->dev, "failed to enable PCI device\n");
 7885		pci_disable_device(h->pdev);
 7886		return err;
 7887	}
 7888
 7889	err = pci_request_regions(h->pdev, HPSA);
 7890	if (err) {
 7891		dev_err(&h->pdev->dev,
 7892			"failed to obtain PCI resources\n");
 7893		pci_disable_device(h->pdev);
 7894		return err;
 7895	}
 7896
 7897	pci_set_master(h->pdev);
 7898
 7899	err = hpsa_interrupt_mode(h);
 7900	if (err)
 7901		goto clean1;
 7902
 7903	/* setup mapping between CPU and reply queue */
 7904	hpsa_setup_reply_map(h);
 7905
 7906	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
 7907	if (err)
 7908		goto clean2;	/* intmode+region, pci */
 7909	h->vaddr = remap_pci_mem(h->paddr, 0x250);
 7910	if (!h->vaddr) {
 7911		dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
 7912		err = -ENOMEM;
 7913		goto clean2;	/* intmode+region, pci */
 7914	}
 7915	err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
 7916	if (err)
 7917		goto clean3;	/* vaddr, intmode+region, pci */
 7918	err = hpsa_find_cfgtables(h);
 7919	if (err)
 7920		goto clean3;	/* vaddr, intmode+region, pci */
 7921	hpsa_find_board_params(h);
 7922
 7923	if (!hpsa_CISS_signature_present(h)) {
 7924		err = -ENODEV;
 7925		goto clean4;	/* cfgtables, vaddr, intmode+region, pci */
 7926	}
 7927	hpsa_set_driver_support_bits(h);
 7928	hpsa_p600_dma_prefetch_quirk(h);
 7929	err = hpsa_enter_simple_mode(h);
 7930	if (err)
 7931		goto clean4;	/* cfgtables, vaddr, intmode+region, pci */
 7932	return 0;
 7933
 7934clean4:	/* cfgtables, vaddr, intmode+region, pci */
 7935	hpsa_free_cfgtables(h);
 7936clean3:	/* vaddr, intmode+region, pci */
 7937	iounmap(h->vaddr);
 7938	h->vaddr = NULL;
 7939clean2:	/* intmode+region, pci */
 7940	hpsa_disable_interrupt_mode(h);
 7941clean1:
 7942	/*
 7943	 * call pci_disable_device before pci_release_regions per
 7944	 * Documentation/driver-api/pci/pci.rst
 7945	 */
 7946	pci_disable_device(h->pdev);
 7947	pci_release_regions(h->pdev);
 7948	return err;
 7949}
 7950
 7951static void hpsa_hba_inquiry(struct ctlr_info *h)
 7952{
 7953	int rc;
 7954
 7955#define HBA_INQUIRY_BYTE_COUNT 64
 7956	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
 7957	if (!h->hba_inquiry_data)
 7958		return;
 7959	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
 7960		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
 7961	if (rc != 0) {
 7962		kfree(h->hba_inquiry_data);
 7963		h->hba_inquiry_data = NULL;
 7964	}
 7965}
 7966
 7967static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
 7968{
 7969	int rc, i;
 7970	void __iomem *vaddr;
 7971
 7972	if (!reset_devices)
 7973		return 0;
 7974
 7975	/* kdump kernel is loading, we don't know in which state is
 7976	 * the pci interface. The dev->enable_cnt is equal zero
 7977	 * so we call enable+disable, wait a while and switch it on.
 7978	 */
 7979	rc = pci_enable_device(pdev);
 7980	if (rc) {
 7981		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
 7982		return -ENODEV;
 7983	}
 7984	pci_disable_device(pdev);
 7985	msleep(260);			/* a randomly chosen number */
 7986	rc = pci_enable_device(pdev);
 7987	if (rc) {
 7988		dev_warn(&pdev->dev, "failed to enable device.\n");
 7989		return -ENODEV;
 7990	}
 7991
 7992	pci_set_master(pdev);
 7993
 7994	vaddr = pci_ioremap_bar(pdev, 0);
 7995	if (vaddr == NULL) {
 7996		rc = -ENOMEM;
 7997		goto out_disable;
 7998	}
 7999	writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
 8000	iounmap(vaddr);
 8001
 8002	/* Reset the controller with a PCI power-cycle or via doorbell */
 8003	rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
 8004
 8005	/* -ENOTSUPP here means we cannot reset the controller
 8006	 * but it's already (and still) up and running in
 8007	 * "performant mode".  Or, it might be 640x, which can't reset
 8008	 * due to concerns about shared bbwc between 6402/6404 pair.
 8009	 */
 
 
 8010	if (rc)
 8011		goto out_disable;
 8012
 8013	/* Now try to get the controller to respond to a no-op */
 8014	dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
 8015	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
 8016		if (hpsa_noop(pdev) == 0)
 8017			break;
 8018		else
 8019			dev_warn(&pdev->dev, "no-op failed%s\n",
 8020					(i < 11 ? "; re-trying" : ""));
 8021	}
 8022
 8023out_disable:
 8024
 8025	pci_disable_device(pdev);
 8026	return rc;
 8027}
 8028
 8029static void hpsa_free_cmd_pool(struct ctlr_info *h)
 8030{
 8031	bitmap_free(h->cmd_pool_bits);
 8032	h->cmd_pool_bits = NULL;
 8033	if (h->cmd_pool) {
 8034		dma_free_coherent(&h->pdev->dev,
 8035				h->nr_cmds * sizeof(struct CommandList),
 8036				h->cmd_pool,
 8037				h->cmd_pool_dhandle);
 8038		h->cmd_pool = NULL;
 8039		h->cmd_pool_dhandle = 0;
 8040	}
 8041	if (h->errinfo_pool) {
 8042		dma_free_coherent(&h->pdev->dev,
 8043				h->nr_cmds * sizeof(struct ErrorInfo),
 8044				h->errinfo_pool,
 8045				h->errinfo_pool_dhandle);
 8046		h->errinfo_pool = NULL;
 8047		h->errinfo_pool_dhandle = 0;
 8048	}
 8049}
 8050
 8051static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
 8052{
 8053	h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL);
 8054	h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
 
 
 8055		    h->nr_cmds * sizeof(*h->cmd_pool),
 8056		    &h->cmd_pool_dhandle, GFP_KERNEL);
 8057	h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
 8058		    h->nr_cmds * sizeof(*h->errinfo_pool),
 8059		    &h->errinfo_pool_dhandle, GFP_KERNEL);
 8060	if ((h->cmd_pool_bits == NULL)
 8061	    || (h->cmd_pool == NULL)
 8062	    || (h->errinfo_pool == NULL)) {
 8063		dev_err(&h->pdev->dev, "out of memory in %s", __func__);
 8064		goto clean_up;
 8065	}
 8066	hpsa_preinitialize_commands(h);
 8067	return 0;
 8068clean_up:
 8069	hpsa_free_cmd_pool(h);
 8070	return -ENOMEM;
 8071}
 8072
 8073/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
 8074static void hpsa_free_irqs(struct ctlr_info *h)
 8075{
 8076	int i;
 8077	int irq_vector = 0;
 8078
 8079	if (hpsa_simple_mode)
 8080		irq_vector = h->intr_mode;
 8081
 8082	if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
 8083		/* Single reply queue, only one irq to free */
 8084		free_irq(pci_irq_vector(h->pdev, irq_vector),
 8085				&h->q[h->intr_mode]);
 8086		h->q[h->intr_mode] = 0;
 8087		return;
 8088	}
 8089
 8090	for (i = 0; i < h->msix_vectors; i++) {
 8091		free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
 8092		h->q[i] = 0;
 8093	}
 8094	for (; i < MAX_REPLY_QUEUES; i++)
 8095		h->q[i] = 0;
 8096}
 8097
 8098/* returns 0 on success; cleans up and returns -Enn on error */
 8099static int hpsa_request_irqs(struct ctlr_info *h,
 8100	irqreturn_t (*msixhandler)(int, void *),
 8101	irqreturn_t (*intxhandler)(int, void *))
 8102{
 8103	int rc, i;
 8104	int irq_vector = 0;
 8105
 8106	if (hpsa_simple_mode)
 8107		irq_vector = h->intr_mode;
 8108
 8109	/*
 8110	 * initialize h->q[x] = x so that interrupt handlers know which
 8111	 * queue to process.
 8112	 */
 8113	for (i = 0; i < MAX_REPLY_QUEUES; i++)
 8114		h->q[i] = (u8) i;
 8115
 8116	if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
 8117		/* If performant mode and MSI-X, use multiple reply queues */
 8118		for (i = 0; i < h->msix_vectors; i++) {
 8119			sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
 8120			rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
 8121					0, h->intrname[i],
 8122					&h->q[i]);
 8123			if (rc) {
 8124				int j;
 8125
 8126				dev_err(&h->pdev->dev,
 8127					"failed to get irq %d for %s\n",
 8128				       pci_irq_vector(h->pdev, i), h->devname);
 8129				for (j = 0; j < i; j++) {
 8130					free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
 8131					h->q[j] = 0;
 8132				}
 8133				for (; j < MAX_REPLY_QUEUES; j++)
 8134					h->q[j] = 0;
 8135				return rc;
 8136			}
 8137		}
 8138	} else {
 8139		/* Use single reply pool */
 8140		if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
 8141			sprintf(h->intrname[0], "%s-msi%s", h->devname,
 8142				h->msix_vectors ? "x" : "");
 8143			rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
 8144				msixhandler, 0,
 8145				h->intrname[0],
 8146				&h->q[h->intr_mode]);
 8147		} else {
 8148			sprintf(h->intrname[h->intr_mode],
 8149				"%s-intx", h->devname);
 8150			rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
 8151				intxhandler, IRQF_SHARED,
 8152				h->intrname[0],
 8153				&h->q[h->intr_mode]);
 8154		}
 8155	}
 8156	if (rc) {
 8157		dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
 8158		       pci_irq_vector(h->pdev, irq_vector), h->devname);
 8159		hpsa_free_irqs(h);
 8160		return -ENODEV;
 8161	}
 8162	return 0;
 8163}
 8164
 8165static int hpsa_kdump_soft_reset(struct ctlr_info *h)
 8166{
 8167	int rc;
 8168	hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
 
 
 
 8169
 8170	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
 8171	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
 8172	if (rc) {
 8173		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
 8174		return rc;
 8175	}
 8176
 8177	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
 8178	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
 8179	if (rc) {
 8180		dev_warn(&h->pdev->dev, "Board failed to become ready "
 8181			"after soft reset.\n");
 8182		return rc;
 8183	}
 8184
 8185	return 0;
 8186}
 8187
 8188static void hpsa_free_reply_queues(struct ctlr_info *h)
 8189{
 8190	int i;
 8191
 8192	for (i = 0; i < h->nreply_queues; i++) {
 8193		if (!h->reply_queue[i].head)
 8194			continue;
 8195		dma_free_coherent(&h->pdev->dev,
 8196					h->reply_queue_size,
 8197					h->reply_queue[i].head,
 8198					h->reply_queue[i].busaddr);
 8199		h->reply_queue[i].head = NULL;
 8200		h->reply_queue[i].busaddr = 0;
 8201	}
 8202	h->reply_queue_size = 0;
 8203}
 8204
 8205static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
 8206{
 8207	hpsa_free_performant_mode(h);		/* init_one 7 */
 8208	hpsa_free_sg_chain_blocks(h);		/* init_one 6 */
 8209	hpsa_free_cmd_pool(h);			/* init_one 5 */
 8210	hpsa_free_irqs(h);			/* init_one 4 */
 8211	scsi_host_put(h->scsi_host);		/* init_one 3 */
 8212	h->scsi_host = NULL;			/* init_one 3 */
 8213	hpsa_free_pci_init(h);			/* init_one 2_5 */
 8214	free_percpu(h->lockup_detected);	/* init_one 2 */
 8215	h->lockup_detected = NULL;		/* init_one 2 */
 8216	if (h->resubmit_wq) {
 8217		destroy_workqueue(h->resubmit_wq);	/* init_one 1 */
 8218		h->resubmit_wq = NULL;
 8219	}
 8220	if (h->rescan_ctlr_wq) {
 8221		destroy_workqueue(h->rescan_ctlr_wq);
 8222		h->rescan_ctlr_wq = NULL;
 8223	}
 8224	if (h->monitor_ctlr_wq) {
 8225		destroy_workqueue(h->monitor_ctlr_wq);
 8226		h->monitor_ctlr_wq = NULL;
 8227	}
 8228
 8229	kfree(h);				/* init_one 1 */
 8230}
 8231
 8232/* Called when controller lockup detected. */
 8233static void fail_all_outstanding_cmds(struct ctlr_info *h)
 8234{
 8235	int i, refcount;
 8236	struct CommandList *c;
 8237	int failcount = 0;
 8238
 8239	flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
 8240	for (i = 0; i < h->nr_cmds; i++) {
 8241		c = h->cmd_pool + i;
 8242		refcount = atomic_inc_return(&c->refcount);
 8243		if (refcount > 1) {
 8244			c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
 8245			finish_cmd(c);
 8246			atomic_dec(&h->commands_outstanding);
 8247			failcount++;
 8248		}
 8249		cmd_free(h, c);
 8250	}
 8251	dev_warn(&h->pdev->dev,
 8252		"failed %d commands in fail_all\n", failcount);
 8253}
 8254
 8255static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
 8256{
 8257	int cpu;
 8258
 8259	for_each_online_cpu(cpu) {
 8260		u32 *lockup_detected;
 8261		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
 8262		*lockup_detected = value;
 8263	}
 8264	wmb(); /* be sure the per-cpu variables are out to memory */
 8265}
 8266
 8267static void controller_lockup_detected(struct ctlr_info *h)
 8268{
 8269	unsigned long flags;
 8270	u32 lockup_detected;
 8271
 8272	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 8273	spin_lock_irqsave(&h->lock, flags);
 8274	lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
 8275	if (!lockup_detected) {
 8276		/* no heartbeat, but controller gave us a zero. */
 8277		dev_warn(&h->pdev->dev,
 8278			"lockup detected after %d but scratchpad register is zero\n",
 8279			h->heartbeat_sample_interval / HZ);
 8280		lockup_detected = 0xffffffff;
 8281	}
 8282	set_lockup_detected_for_all_cpus(h, lockup_detected);
 8283	spin_unlock_irqrestore(&h->lock, flags);
 8284	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
 8285			lockup_detected, h->heartbeat_sample_interval / HZ);
 8286	if (lockup_detected == 0xffff0000) {
 8287		dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
 8288		writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
 8289	}
 8290	pci_disable_device(h->pdev);
 8291	fail_all_outstanding_cmds(h);
 8292}
 8293
 8294static int detect_controller_lockup(struct ctlr_info *h)
 8295{
 8296	u64 now;
 8297	u32 heartbeat;
 8298	unsigned long flags;
 8299
 8300	now = get_jiffies_64();
 8301	/* If we've received an interrupt recently, we're ok. */
 8302	if (time_after64(h->last_intr_timestamp +
 8303				(h->heartbeat_sample_interval), now))
 8304		return false;
 8305
 8306	/*
 8307	 * If we've already checked the heartbeat recently, we're ok.
 8308	 * This could happen if someone sends us a signal. We
 8309	 * otherwise don't care about signals in this thread.
 8310	 */
 8311	if (time_after64(h->last_heartbeat_timestamp +
 8312				(h->heartbeat_sample_interval), now))
 8313		return false;
 8314
 8315	/* If heartbeat has not changed since we last looked, we're not ok. */
 8316	spin_lock_irqsave(&h->lock, flags);
 8317	heartbeat = readl(&h->cfgtable->HeartBeat);
 8318	spin_unlock_irqrestore(&h->lock, flags);
 8319	if (h->last_heartbeat == heartbeat) {
 8320		controller_lockup_detected(h);
 8321		return true;
 8322	}
 8323
 8324	/* We're ok. */
 8325	h->last_heartbeat = heartbeat;
 8326	h->last_heartbeat_timestamp = now;
 8327	return false;
 8328}
 8329
 8330/*
 8331 * Set ioaccel status for all ioaccel volumes.
 8332 *
 8333 * Called from monitor controller worker (hpsa_event_monitor_worker)
 8334 *
 8335 * A Volume (or Volumes that comprise an Array set) may be undergoing a
 8336 * transformation, so we will be turning off ioaccel for all volumes that
 8337 * make up the Array.
 8338 */
 8339static void hpsa_set_ioaccel_status(struct ctlr_info *h)
 8340{
 8341	int rc;
 8342	int i;
 8343	u8 ioaccel_status;
 8344	unsigned char *buf;
 8345	struct hpsa_scsi_dev_t *device;
 8346
 8347	if (!h)
 8348		return;
 8349
 8350	buf = kmalloc(64, GFP_KERNEL);
 8351	if (!buf)
 8352		return;
 8353
 8354	/*
 8355	 * Run through current device list used during I/O requests.
 8356	 */
 8357	for (i = 0; i < h->ndevices; i++) {
 8358		int offload_to_be_enabled = 0;
 8359		int offload_config = 0;
 8360
 8361		device = h->dev[i];
 8362
 8363		if (!device)
 8364			continue;
 8365		if (!hpsa_vpd_page_supported(h, device->scsi3addr,
 8366						HPSA_VPD_LV_IOACCEL_STATUS))
 8367			continue;
 8368
 8369		memset(buf, 0, 64);
 8370
 8371		rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
 8372					VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
 8373					buf, 64);
 8374		if (rc != 0)
 8375			continue;
 8376
 8377		ioaccel_status = buf[IOACCEL_STATUS_BYTE];
 8378
 8379		/*
 8380		 * Check if offload is still configured on
 8381		 */
 8382		offload_config =
 8383				!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
 8384		/*
 8385		 * If offload is configured on, check to see if ioaccel
 8386		 * needs to be enabled.
 8387		 */
 8388		if (offload_config)
 8389			offload_to_be_enabled =
 8390				!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
 8391
 8392		/*
 8393		 * If ioaccel is to be re-enabled, re-enable later during the
 8394		 * scan operation so the driver can get a fresh raidmap
 8395		 * before turning ioaccel back on.
 8396		 */
 8397		if (offload_to_be_enabled)
 8398			continue;
 8399
 8400		/*
 8401		 * Immediately turn off ioaccel for any volume the
 8402		 * controller tells us to. Some of the reasons could be:
 8403		 *    transformation - change to the LVs of an Array.
 8404		 *    degraded volume - component failure
 8405		 */
 8406		hpsa_turn_off_ioaccel_for_device(device);
 8407	}
 8408
 8409	kfree(buf);
 8410}
 8411
 8412static void hpsa_ack_ctlr_events(struct ctlr_info *h)
 8413{
 8414	char *event_type;
 8415
 8416	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
 8417		return;
 8418
 8419	/* Ask the controller to clear the events we're handling. */
 8420	if ((h->transMethod & (CFGTBL_Trans_io_accel1
 8421			| CFGTBL_Trans_io_accel2)) &&
 8422		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
 8423		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
 8424
 8425		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
 8426			event_type = "state change";
 8427		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
 8428			event_type = "configuration change";
 8429		/* Stop sending new RAID offload reqs via the IO accelerator */
 8430		scsi_block_requests(h->scsi_host);
 8431		hpsa_set_ioaccel_status(h);
 8432		hpsa_drain_accel_commands(h);
 8433		/* Set 'accelerator path config change' bit */
 8434		dev_warn(&h->pdev->dev,
 8435			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
 8436			h->events, event_type);
 8437		writel(h->events, &(h->cfgtable->clear_event_notify));
 8438		/* Set the "clear event notify field update" bit 6 */
 8439		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
 8440		/* Wait until ctlr clears 'clear event notify field', bit 6 */
 8441		hpsa_wait_for_clear_event_notify_ack(h);
 8442		scsi_unblock_requests(h->scsi_host);
 8443	} else {
 8444		/* Acknowledge controller notification events. */
 8445		writel(h->events, &(h->cfgtable->clear_event_notify));
 8446		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
 8447		hpsa_wait_for_clear_event_notify_ack(h);
 8448	}
 8449	return;
 8450}
 8451
 8452/* Check a register on the controller to see if there are configuration
 8453 * changes (added/changed/removed logical drives, etc.) which mean that
 8454 * we should rescan the controller for devices.
 8455 * Also check flag for driver-initiated rescan.
 8456 */
 8457static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
 8458{
 8459	if (h->drv_req_rescan) {
 8460		h->drv_req_rescan = 0;
 8461		return 1;
 8462	}
 8463
 8464	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
 8465		return 0;
 8466
 8467	h->events = readl(&(h->cfgtable->event_notify));
 8468	return h->events & RESCAN_REQUIRED_EVENT_BITS;
 8469}
 8470
 8471/*
 8472 * Check if any of the offline devices have become ready
 8473 */
 8474static int hpsa_offline_devices_ready(struct ctlr_info *h)
 8475{
 8476	unsigned long flags;
 8477	struct offline_device_entry *d;
 8478	struct list_head *this, *tmp;
 8479
 8480	spin_lock_irqsave(&h->offline_device_lock, flags);
 8481	list_for_each_safe(this, tmp, &h->offline_device_list) {
 8482		d = list_entry(this, struct offline_device_entry,
 8483				offline_list);
 8484		spin_unlock_irqrestore(&h->offline_device_lock, flags);
 8485		if (!hpsa_volume_offline(h, d->scsi3addr)) {
 8486			spin_lock_irqsave(&h->offline_device_lock, flags);
 8487			list_del(&d->offline_list);
 8488			spin_unlock_irqrestore(&h->offline_device_lock, flags);
 8489			return 1;
 8490		}
 8491		spin_lock_irqsave(&h->offline_device_lock, flags);
 8492	}
 8493	spin_unlock_irqrestore(&h->offline_device_lock, flags);
 8494	return 0;
 8495}
 8496
 8497static int hpsa_luns_changed(struct ctlr_info *h)
 8498{
 8499	int rc = 1; /* assume there are changes */
 8500	struct ReportLUNdata *logdev = NULL;
 8501
 8502	/* if we can't find out if lun data has changed,
 8503	 * assume that it has.
 8504	 */
 8505
 8506	if (!h->lastlogicals)
 8507		return rc;
 8508
 8509	logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
 8510	if (!logdev)
 8511		return rc;
 8512
 8513	if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
 8514		dev_warn(&h->pdev->dev,
 8515			"report luns failed, can't track lun changes.\n");
 8516		goto out;
 8517	}
 8518	if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
 8519		dev_info(&h->pdev->dev,
 8520			"Lun changes detected.\n");
 8521		memcpy(h->lastlogicals, logdev, sizeof(*logdev));
 8522		goto out;
 8523	} else
 8524		rc = 0; /* no changes detected. */
 8525out:
 8526	kfree(logdev);
 8527	return rc;
 8528}
 8529
 8530static void hpsa_perform_rescan(struct ctlr_info *h)
 8531{
 8532	struct Scsi_Host *sh = NULL;
 8533	unsigned long flags;
 8534
 8535	/*
 8536	 * Do the scan after the reset
 8537	 */
 8538	spin_lock_irqsave(&h->reset_lock, flags);
 8539	if (h->reset_in_progress) {
 8540		h->drv_req_rescan = 1;
 8541		spin_unlock_irqrestore(&h->reset_lock, flags);
 8542		return;
 8543	}
 8544	spin_unlock_irqrestore(&h->reset_lock, flags);
 8545
 8546	sh = scsi_host_get(h->scsi_host);
 8547	if (sh != NULL) {
 8548		hpsa_scan_start(sh);
 8549		scsi_host_put(sh);
 8550		h->drv_req_rescan = 0;
 8551	}
 8552}
 8553
 8554/*
 8555 * watch for controller events
 8556 */
 8557static void hpsa_event_monitor_worker(struct work_struct *work)
 8558{
 8559	struct ctlr_info *h = container_of(to_delayed_work(work),
 8560					struct ctlr_info, event_monitor_work);
 8561	unsigned long flags;
 8562
 8563	spin_lock_irqsave(&h->lock, flags);
 8564	if (h->remove_in_progress) {
 8565		spin_unlock_irqrestore(&h->lock, flags);
 8566		return;
 8567	}
 8568	spin_unlock_irqrestore(&h->lock, flags);
 8569
 8570	if (hpsa_ctlr_needs_rescan(h)) {
 8571		hpsa_ack_ctlr_events(h);
 8572		hpsa_perform_rescan(h);
 8573	}
 8574
 8575	spin_lock_irqsave(&h->lock, flags);
 8576	if (!h->remove_in_progress)
 8577		queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
 8578				HPSA_EVENT_MONITOR_INTERVAL);
 8579	spin_unlock_irqrestore(&h->lock, flags);
 8580}
 8581
 8582static void hpsa_rescan_ctlr_worker(struct work_struct *work)
 8583{
 8584	unsigned long flags;
 8585	struct ctlr_info *h = container_of(to_delayed_work(work),
 8586					struct ctlr_info, rescan_ctlr_work);
 8587
 8588	spin_lock_irqsave(&h->lock, flags);
 8589	if (h->remove_in_progress) {
 8590		spin_unlock_irqrestore(&h->lock, flags);
 8591		return;
 8592	}
 8593	spin_unlock_irqrestore(&h->lock, flags);
 8594
 8595	if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
 8596		hpsa_perform_rescan(h);
 8597	} else if (h->discovery_polling) {
 8598		if (hpsa_luns_changed(h)) {
 8599			dev_info(&h->pdev->dev,
 8600				"driver discovery polling rescan.\n");
 8601			hpsa_perform_rescan(h);
 8602		}
 8603	}
 8604	spin_lock_irqsave(&h->lock, flags);
 8605	if (!h->remove_in_progress)
 8606		queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
 8607				h->heartbeat_sample_interval);
 8608	spin_unlock_irqrestore(&h->lock, flags);
 8609}
 8610
 8611static void hpsa_monitor_ctlr_worker(struct work_struct *work)
 8612{
 8613	unsigned long flags;
 8614	struct ctlr_info *h = container_of(to_delayed_work(work),
 8615					struct ctlr_info, monitor_ctlr_work);
 8616
 8617	detect_controller_lockup(h);
 8618	if (lockup_detected(h))
 8619		return;
 8620
 8621	spin_lock_irqsave(&h->lock, flags);
 8622	if (!h->remove_in_progress)
 8623		queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
 8624				h->heartbeat_sample_interval);
 8625	spin_unlock_irqrestore(&h->lock, flags);
 8626}
 8627
 8628static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
 8629						char *name)
 8630{
 8631	struct workqueue_struct *wq = NULL;
 8632
 8633	wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
 8634	if (!wq)
 8635		dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
 8636
 8637	return wq;
 8638}
 8639
 8640static void hpda_free_ctlr_info(struct ctlr_info *h)
 8641{
 8642	kfree(h->reply_map);
 8643	kfree(h);
 8644}
 8645
 8646static struct ctlr_info *hpda_alloc_ctlr_info(void)
 8647{
 8648	struct ctlr_info *h;
 8649
 8650	h = kzalloc(sizeof(*h), GFP_KERNEL);
 8651	if (!h)
 8652		return NULL;
 8653
 8654	h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
 8655	if (!h->reply_map) {
 8656		kfree(h);
 8657		return NULL;
 8658	}
 8659	return h;
 8660}
 8661
 8662static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 8663{
 8664	int rc;
 8665	struct ctlr_info *h;
 8666	int try_soft_reset = 0;
 8667	unsigned long flags;
 8668	u32 board_id;
 8669
 8670	if (number_of_controllers == 0)
 8671		printk(KERN_INFO DRIVER_NAME "\n");
 8672
 8673	rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
 8674	if (rc < 0) {
 8675		dev_warn(&pdev->dev, "Board ID not found\n");
 8676		return rc;
 8677	}
 8678
 8679	rc = hpsa_init_reset_devices(pdev, board_id);
 8680	if (rc) {
 8681		if (rc != -ENOTSUPP)
 8682			return rc;
 8683		/* If the reset fails in a particular way (it has no way to do
 8684		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
 8685		 * a soft reset once we get the controller configured up to the
 8686		 * point that it can accept a command.
 8687		 */
 8688		try_soft_reset = 1;
 8689		rc = 0;
 8690	}
 8691
 8692reinit_after_soft_reset:
 8693
 8694	/* Command structures must be aligned on a 32-byte boundary because
 8695	 * the 5 lower bits of the address are used by the hardware. and by
 8696	 * the driver.  See comments in hpsa.h for more info.
 8697	 */
 
 8698	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
 8699	h = hpda_alloc_ctlr_info();
 8700	if (!h) {
 8701		dev_err(&pdev->dev, "Failed to allocate controller head\n");
 8702		return -ENOMEM;
 8703	}
 8704
 8705	h->pdev = pdev;
 8706
 8707	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
 8708	INIT_LIST_HEAD(&h->offline_device_list);
 
 8709	spin_lock_init(&h->lock);
 8710	spin_lock_init(&h->offline_device_lock);
 8711	spin_lock_init(&h->scan_lock);
 8712	spin_lock_init(&h->reset_lock);
 8713	atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
 8714
 8715	/* Allocate and clear per-cpu variable lockup_detected */
 8716	h->lockup_detected = alloc_percpu(u32);
 8717	if (!h->lockup_detected) {
 8718		dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
 8719		rc = -ENOMEM;
 8720		goto clean1;	/* aer/h */
 8721	}
 8722	set_lockup_detected_for_all_cpus(h, 0);
 8723
 8724	rc = hpsa_pci_init(h);
 8725	if (rc)
 8726		goto clean2;	/* lu, aer/h */
 8727
 8728	/* relies on h-> settings made by hpsa_pci_init, including
 8729	 * interrupt_mode h->intr */
 8730	rc = hpsa_scsi_host_alloc(h);
 8731	if (rc)
 8732		goto clean2_5;	/* pci, lu, aer/h */
 8733
 8734	sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
 8735	h->ctlr = number_of_controllers;
 8736	number_of_controllers++;
 8737
 8738	/* configure PCI DMA stuff */
 8739	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 8740	if (rc != 0) {
 8741		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 8742		if (rc != 0) {
 
 
 
 
 8743			dev_err(&pdev->dev, "no suitable DMA available\n");
 8744			goto clean3;	/* shost, pci, lu, aer/h */
 8745		}
 8746	}
 8747
 8748	/* make sure the board interrupts are off */
 8749	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 8750
 8751	rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
 8752	if (rc)
 8753		goto clean3;	/* shost, pci, lu, aer/h */
 8754	rc = hpsa_alloc_cmd_pool(h);
 8755	if (rc)
 8756		goto clean4;	/* irq, shost, pci, lu, aer/h */
 8757	rc = hpsa_alloc_sg_chain_blocks(h);
 8758	if (rc)
 8759		goto clean5;	/* cmd, irq, shost, pci, lu, aer/h */
 8760	init_waitqueue_head(&h->scan_wait_queue);
 8761	init_waitqueue_head(&h->event_sync_wait_queue);
 8762	mutex_init(&h->reset_mutex);
 8763	h->scan_finished = 1; /* no scan currently in progress */
 8764	h->scan_waiting = 0;
 8765
 8766	pci_set_drvdata(pdev, h);
 8767	h->ndevices = 0;
 8768
 8769	spin_lock_init(&h->devlock);
 8770	rc = hpsa_put_ctlr_into_performant_mode(h);
 8771	if (rc)
 8772		goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
 8773
 8774	/* create the resubmit workqueue */
 8775	h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
 8776	if (!h->rescan_ctlr_wq) {
 8777		rc = -ENOMEM;
 8778		goto clean7;
 8779	}
 8780
 8781	h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
 8782	if (!h->resubmit_wq) {
 8783		rc = -ENOMEM;
 8784		goto clean7;	/* aer/h */
 8785	}
 8786
 8787	h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
 8788	if (!h->monitor_ctlr_wq) {
 8789		rc = -ENOMEM;
 8790		goto clean7;
 8791	}
 8792
 8793	/*
 8794	 * At this point, the controller is ready to take commands.
 8795	 * Now, if reset_devices and the hard reset didn't work, try
 8796	 * the soft reset and see if that works.
 8797	 */
 8798	if (try_soft_reset) {
 8799
 8800		/* This is kind of gross.  We may or may not get a completion
 8801		 * from the soft reset command, and if we do, then the value
 8802		 * from the fifo may or may not be valid.  So, we wait 10 secs
 8803		 * after the reset throwing away any completions we get during
 8804		 * that time.  Unregister the interrupt handler and register
 8805		 * fake ones to scoop up any residual completions.
 8806		 */
 8807		spin_lock_irqsave(&h->lock, flags);
 8808		h->access.set_intr_mask(h, HPSA_INTR_OFF);
 8809		spin_unlock_irqrestore(&h->lock, flags);
 8810		hpsa_free_irqs(h);
 8811		rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
 8812					hpsa_intx_discard_completions);
 8813		if (rc) {
 8814			dev_warn(&h->pdev->dev,
 8815				"Failed to request_irq after soft reset.\n");
 8816			/*
 8817			 * cannot goto clean7 or free_irqs will be called
 8818			 * again. Instead, do its work
 8819			 */
 8820			hpsa_free_performant_mode(h);	/* clean7 */
 8821			hpsa_free_sg_chain_blocks(h);	/* clean6 */
 8822			hpsa_free_cmd_pool(h);		/* clean5 */
 8823			/*
 8824			 * skip hpsa_free_irqs(h) clean4 since that
 8825			 * was just called before request_irqs failed
 8826			 */
 8827			goto clean3;
 8828		}
 8829
 8830		rc = hpsa_kdump_soft_reset(h);
 8831		if (rc)
 8832			/* Neither hard nor soft reset worked, we're hosed. */
 8833			goto clean7;
 8834
 8835		dev_info(&h->pdev->dev, "Board READY.\n");
 8836		dev_info(&h->pdev->dev,
 8837			"Waiting for stale completions to drain.\n");
 8838		h->access.set_intr_mask(h, HPSA_INTR_ON);
 8839		msleep(10000);
 8840		h->access.set_intr_mask(h, HPSA_INTR_OFF);
 8841
 8842		rc = controller_reset_failed(h->cfgtable);
 8843		if (rc)
 8844			dev_info(&h->pdev->dev,
 8845				"Soft reset appears to have failed.\n");
 8846
 8847		/* since the controller's reset, we have to go back and re-init
 8848		 * everything.  Easiest to just forget what we've done and do it
 8849		 * all over again.
 8850		 */
 8851		hpsa_undo_allocations_after_kdump_soft_reset(h);
 8852		try_soft_reset = 0;
 8853		if (rc)
 8854			/* don't goto clean, we already unallocated */
 8855			return -ENODEV;
 8856
 8857		goto reinit_after_soft_reset;
 8858	}
 8859
 8860	/* Enable Accelerated IO path at driver layer */
 8861	h->acciopath_status = 1;
 8862	/* Disable discovery polling.*/
 8863	h->discovery_polling = 0;
 8864
 8865
 8866	/* Turn the interrupts on so we can service requests */
 8867	h->access.set_intr_mask(h, HPSA_INTR_ON);
 8868
 8869	hpsa_hba_inquiry(h);
 
 
 
 8870
 8871	h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
 8872	if (!h->lastlogicals)
 8873		dev_info(&h->pdev->dev,
 8874			"Can't track change to report lun data\n");
 8875
 8876	/* hook into SCSI subsystem */
 8877	rc = hpsa_scsi_add_host(h);
 8878	if (rc)
 8879		goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
 8880
 8881	/* Monitor the controller for firmware lockups */
 8882	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
 8883	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
 8884	schedule_delayed_work(&h->monitor_ctlr_work,
 8885				h->heartbeat_sample_interval);
 8886	INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
 8887	queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
 8888				h->heartbeat_sample_interval);
 8889	INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
 8890	schedule_delayed_work(&h->event_monitor_work,
 8891				HPSA_EVENT_MONITOR_INTERVAL);
 8892	return 0;
 8893
 8894clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
 8895	kfree(h->lastlogicals);
 8896clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
 8897	hpsa_free_performant_mode(h);
 8898	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 8899clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
 8900	hpsa_free_sg_chain_blocks(h);
 8901clean5: /* cmd, irq, shost, pci, lu, aer/h */
 8902	hpsa_free_cmd_pool(h);
 8903clean4: /* irq, shost, pci, lu, aer/h */
 8904	hpsa_free_irqs(h);
 8905clean3: /* shost, pci, lu, aer/h */
 8906	scsi_host_put(h->scsi_host);
 8907	h->scsi_host = NULL;
 8908clean2_5: /* pci, lu, aer/h */
 8909	hpsa_free_pci_init(h);
 8910clean2: /* lu, aer/h */
 8911	if (h->lockup_detected) {
 8912		free_percpu(h->lockup_detected);
 8913		h->lockup_detected = NULL;
 8914	}
 8915clean1:	/* wq/aer/h */
 8916	if (h->resubmit_wq) {
 8917		destroy_workqueue(h->resubmit_wq);
 8918		h->resubmit_wq = NULL;
 8919	}
 8920	if (h->rescan_ctlr_wq) {
 8921		destroy_workqueue(h->rescan_ctlr_wq);
 8922		h->rescan_ctlr_wq = NULL;
 8923	}
 8924	if (h->monitor_ctlr_wq) {
 8925		destroy_workqueue(h->monitor_ctlr_wq);
 8926		h->monitor_ctlr_wq = NULL;
 8927	}
 8928	hpda_free_ctlr_info(h);
 8929	return rc;
 8930}
 8931
 8932static void hpsa_flush_cache(struct ctlr_info *h)
 8933{
 8934	char *flush_buf;
 8935	struct CommandList *c;
 8936	int rc;
 8937
 8938	if (unlikely(lockup_detected(h)))
 8939		return;
 8940	flush_buf = kzalloc(4, GFP_KERNEL);
 8941	if (!flush_buf)
 8942		return;
 8943
 8944	c = cmd_alloc(h);
 8945
 8946	if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
 8947		RAID_CTLR_LUNID, TYPE_CMD)) {
 8948		goto out;
 8949	}
 8950	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
 8951			DEFAULT_TIMEOUT);
 8952	if (rc)
 8953		goto out;
 8954	if (c->err_info->CommandStatus != 0)
 8955out:
 8956		dev_warn(&h->pdev->dev,
 8957			"error flushing cache on controller\n");
 8958	cmd_free(h, c);
 
 8959	kfree(flush_buf);
 8960}
 8961
 8962/* Make controller gather fresh report lun data each time we
 8963 * send down a report luns request
 8964 */
 8965static void hpsa_disable_rld_caching(struct ctlr_info *h)
 8966{
 8967	u32 *options;
 8968	struct CommandList *c;
 8969	int rc;
 8970
 8971	/* Don't bother trying to set diag options if locked up */
 8972	if (unlikely(h->lockup_detected))
 8973		return;
 8974
 8975	options = kzalloc(sizeof(*options), GFP_KERNEL);
 8976	if (!options)
 8977		return;
 8978
 8979	c = cmd_alloc(h);
 8980
 8981	/* first, get the current diag options settings */
 8982	if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
 8983		RAID_CTLR_LUNID, TYPE_CMD))
 8984		goto errout;
 8985
 8986	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 8987			NO_TIMEOUT);
 8988	if ((rc != 0) || (c->err_info->CommandStatus != 0))
 8989		goto errout;
 8990
 8991	/* Now, set the bit for disabling the RLD caching */
 8992	*options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
 8993
 8994	if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
 8995		RAID_CTLR_LUNID, TYPE_CMD))
 8996		goto errout;
 8997
 8998	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
 8999			NO_TIMEOUT);
 9000	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
 9001		goto errout;
 9002
 9003	/* Now verify that it got set: */
 9004	if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
 9005		RAID_CTLR_LUNID, TYPE_CMD))
 9006		goto errout;
 9007
 9008	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 9009			NO_TIMEOUT);
 9010	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
 9011		goto errout;
 9012
 9013	if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
 9014		goto out;
 9015
 9016errout:
 9017	dev_err(&h->pdev->dev,
 9018			"Error: failed to disable report lun data caching.\n");
 9019out:
 9020	cmd_free(h, c);
 9021	kfree(options);
 9022}
 9023
 9024static void __hpsa_shutdown(struct pci_dev *pdev)
 9025{
 9026	struct ctlr_info *h;
 9027
 9028	h = pci_get_drvdata(pdev);
 9029	/* Turn board interrupts off  and send the flush cache command
 9030	 * sendcmd will turn off interrupt, and send the flush...
 9031	 * To write all data in the battery backed cache to disks
 9032	 */
 9033	hpsa_flush_cache(h);
 9034	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 9035	hpsa_free_irqs(h);			/* init_one 4 */
 9036	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
 9037}
 9038
 9039static void hpsa_shutdown(struct pci_dev *pdev)
 9040{
 9041	__hpsa_shutdown(pdev);
 9042	pci_disable_device(pdev);
 9043}
 9044
 9045static void hpsa_free_device_info(struct ctlr_info *h)
 9046{
 9047	int i;
 9048
 9049	for (i = 0; i < h->ndevices; i++) {
 9050		kfree(h->dev[i]);
 9051		h->dev[i] = NULL;
 9052	}
 9053}
 9054
 9055static void hpsa_remove_one(struct pci_dev *pdev)
 9056{
 9057	struct ctlr_info *h;
 9058	unsigned long flags;
 9059
 9060	if (pci_get_drvdata(pdev) == NULL) {
 9061		dev_err(&pdev->dev, "unable to remove device\n");
 9062		return;
 9063	}
 9064	h = pci_get_drvdata(pdev);
 9065
 9066	/* Get rid of any controller monitoring work items */
 9067	spin_lock_irqsave(&h->lock, flags);
 9068	h->remove_in_progress = 1;
 9069	spin_unlock_irqrestore(&h->lock, flags);
 9070	cancel_delayed_work_sync(&h->monitor_ctlr_work);
 9071	cancel_delayed_work_sync(&h->rescan_ctlr_work);
 9072	cancel_delayed_work_sync(&h->event_monitor_work);
 9073	destroy_workqueue(h->rescan_ctlr_wq);
 9074	destroy_workqueue(h->resubmit_wq);
 9075	destroy_workqueue(h->monitor_ctlr_wq);
 9076
 9077	hpsa_delete_sas_host(h);
 9078
 
 
 
 9079	/*
 9080	 * Call before disabling interrupts.
 9081	 * scsi_remove_host can trigger I/O operations especially
 9082	 * when multipath is enabled. There can be SYNCHRONIZE CACHE
 9083	 * operations which cannot complete and will hang the system.
 9084	 */
 9085	if (h->scsi_host)
 9086		scsi_remove_host(h->scsi_host);		/* init_one 8 */
 9087	/* includes hpsa_free_irqs - init_one 4 */
 9088	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
 9089	__hpsa_shutdown(pdev);
 9090
 9091	hpsa_free_device_info(h);		/* scan */
 9092
 9093	kfree(h->hba_inquiry_data);			/* init_one 10 */
 9094	h->hba_inquiry_data = NULL;			/* init_one 10 */
 9095	hpsa_free_ioaccel2_sg_chain_blocks(h);
 9096	hpsa_free_performant_mode(h);			/* init_one 7 */
 9097	hpsa_free_sg_chain_blocks(h);			/* init_one 6 */
 9098	hpsa_free_cmd_pool(h);				/* init_one 5 */
 9099	kfree(h->lastlogicals);
 9100
 9101	/* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
 9102
 9103	scsi_host_put(h->scsi_host);			/* init_one 3 */
 9104	h->scsi_host = NULL;				/* init_one 3 */
 9105
 9106	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
 9107	hpsa_free_pci_init(h);				/* init_one 2.5 */
 9108
 9109	free_percpu(h->lockup_detected);		/* init_one 2 */
 9110	h->lockup_detected = NULL;			/* init_one 2 */
 9111
 9112	hpda_free_ctlr_info(h);				/* init_one 1 */
 9113}
 9114
 9115static int __maybe_unused hpsa_suspend(
 9116	__attribute__((unused)) struct device *dev)
 9117{
 9118	return -ENOSYS;
 9119}
 9120
 9121static int __maybe_unused hpsa_resume
 9122	(__attribute__((unused)) struct device *dev)
 9123{
 9124	return -ENOSYS;
 9125}
 9126
 9127static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
 9128
 9129static struct pci_driver hpsa_pci_driver = {
 9130	.name = HPSA,
 9131	.probe = hpsa_init_one,
 9132	.remove = hpsa_remove_one,
 9133	.id_table = hpsa_pci_device_id,	/* id_table */
 9134	.shutdown = hpsa_shutdown,
 9135	.driver.pm = &hpsa_pm_ops,
 
 9136};
 9137
 9138/* Fill in bucket_map[], given nsgs (the max number of
 9139 * scatter gather elements supported) and bucket[],
 9140 * which is an array of 8 integers.  The bucket[] array
 9141 * contains 8 different DMA transfer sizes (in 16
 9142 * byte increments) which the controller uses to fetch
 9143 * commands.  This function fills in bucket_map[], which
 9144 * maps a given number of scatter gather elements to one of
 9145 * the 8 DMA transfer sizes.  The point of it is to allow the
 9146 * controller to only do as much DMA as needed to fetch the
 9147 * command, with the DMA transfer size encoded in the lower
 9148 * bits of the command address.
 9149 */
 9150static void  calc_bucket_map(int bucket[], int num_buckets,
 9151	int nsgs, int min_blocks, u32 *bucket_map)
 9152{
 9153	int i, j, b, size;
 9154
 
 
 
 9155	/* Note, bucket_map must have nsgs+1 entries. */
 9156	for (i = 0; i <= nsgs; i++) {
 9157		/* Compute size of a command with i SG entries */
 9158		size = i + min_blocks;
 9159		b = num_buckets; /* Assume the biggest bucket */
 9160		/* Find the bucket that is just big enough */
 9161		for (j = 0; j < num_buckets; j++) {
 9162			if (bucket[j] >= size) {
 9163				b = j;
 9164				break;
 9165			}
 9166		}
 9167		/* for a command with i SG entries, use bucket b. */
 9168		bucket_map[i] = b;
 9169	}
 9170}
 9171
 9172/*
 9173 * return -ENODEV on err, 0 on success (or no action)
 9174 * allocates numerous items that must be freed later
 9175 */
 9176static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
 9177{
 9178	int i;
 9179	unsigned long register_value;
 9180	unsigned long transMethod = CFGTBL_Trans_Performant |
 9181			(trans_support & CFGTBL_Trans_use_short_tags) |
 9182				CFGTBL_Trans_enable_directed_msix |
 9183			(trans_support & (CFGTBL_Trans_io_accel1 |
 9184				CFGTBL_Trans_io_accel2));
 9185	struct access_method access = SA5_performant_access;
 9186
 9187	/* This is a bit complicated.  There are 8 registers on
 9188	 * the controller which we write to to tell it 8 different
 9189	 * sizes of commands which there may be.  It's a way of
 9190	 * reducing the DMA done to fetch each command.  Encoded into
 9191	 * each command's tag are 3 bits which communicate to the controller
 9192	 * which of the eight sizes that command fits within.  The size of
 9193	 * each command depends on how many scatter gather entries there are.
 9194	 * Each SG entry requires 16 bytes.  The eight registers are programmed
 9195	 * with the number of 16-byte blocks a command of that size requires.
 9196	 * The smallest command possible requires 5 such 16 byte blocks.
 9197	 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
 9198	 * blocks.  Note, this only extends to the SG entries contained
 9199	 * within the command block, and does not extend to chained blocks
 9200	 * of SG elements.   bft[] contains the eight values we write to
 9201	 * the registers.  They are not evenly distributed, but have more
 9202	 * sizes for small commands, and fewer sizes for larger commands.
 9203	 */
 9204	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
 9205#define MIN_IOACCEL2_BFT_ENTRY 5
 9206#define HPSA_IOACCEL2_HEADER_SZ 4
 9207	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
 9208			13, 14, 15, 16, 17, 18, 19,
 9209			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
 9210	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
 9211	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
 9212	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
 9213				 16 * MIN_IOACCEL2_BFT_ENTRY);
 9214	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
 9215	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
 9216	/*  5 = 1 s/g entry or 4k
 9217	 *  6 = 2 s/g entry or 8k
 9218	 *  8 = 4 s/g entry or 16k
 9219	 * 10 = 6 s/g entry or 24k
 9220	 */
 9221
 9222	/* If the controller supports either ioaccel method then
 9223	 * we can also use the RAID stack submit path that does not
 9224	 * perform the superfluous readl() after each command submission.
 9225	 */
 9226	if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
 9227		access = SA5_performant_access_no_read;
 9228
 9229	/* Controller spec: zero out this buffer. */
 9230	for (i = 0; i < h->nreply_queues; i++)
 9231		memset(h->reply_queue[i].head, 0, h->reply_queue_size);
 9232
 9233	bft[7] = SG_ENTRIES_IN_CMD + 4;
 9234	calc_bucket_map(bft, ARRAY_SIZE(bft),
 9235				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
 9236	for (i = 0; i < 8; i++)
 9237		writel(bft[i], &h->transtable->BlockFetch[i]);
 9238
 9239	/* size of controller ring buffer */
 9240	writel(h->max_commands, &h->transtable->RepQSize);
 9241	writel(h->nreply_queues, &h->transtable->RepQCount);
 9242	writel(0, &h->transtable->RepQCtrAddrLow32);
 9243	writel(0, &h->transtable->RepQCtrAddrHigh32);
 9244
 9245	for (i = 0; i < h->nreply_queues; i++) {
 9246		writel(0, &h->transtable->RepQAddr[i].upper);
 9247		writel(h->reply_queue[i].busaddr,
 9248			&h->transtable->RepQAddr[i].lower);
 9249	}
 9250
 9251	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
 9252	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
 9253	/*
 9254	 * enable outbound interrupt coalescing in accelerator mode;
 9255	 */
 9256	if (trans_support & CFGTBL_Trans_io_accel1) {
 9257		access = SA5_ioaccel_mode1_access;
 9258		writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
 9259		writel(4, &h->cfgtable->HostWrite.CoalIntCount);
 9260	} else
 9261		if (trans_support & CFGTBL_Trans_io_accel2)
 9262			access = SA5_ioaccel_mode2_access;
 9263	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 9264	if (hpsa_wait_for_mode_change_ack(h)) {
 9265		dev_err(&h->pdev->dev,
 9266			"performant mode problem - doorbell timeout\n");
 9267		return -ENODEV;
 9268	}
 9269	register_value = readl(&(h->cfgtable->TransportActive));
 9270	if (!(register_value & CFGTBL_Trans_Performant)) {
 9271		dev_err(&h->pdev->dev,
 9272			"performant mode problem - transport not active\n");
 9273		return -ENODEV;
 9274	}
 9275	/* Change the access methods to the performant access methods */
 9276	h->access = access;
 9277	h->transMethod = transMethod;
 9278
 9279	if (!((trans_support & CFGTBL_Trans_io_accel1) ||
 9280		(trans_support & CFGTBL_Trans_io_accel2)))
 9281		return 0;
 9282
 9283	if (trans_support & CFGTBL_Trans_io_accel1) {
 9284		/* Set up I/O accelerator mode */
 9285		for (i = 0; i < h->nreply_queues; i++) {
 9286			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
 9287			h->reply_queue[i].current_entry =
 9288				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
 9289		}
 9290		bft[7] = h->ioaccel_maxsg + 8;
 9291		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
 9292				h->ioaccel1_blockFetchTable);
 9293
 9294		/* initialize all reply queue entries to unused */
 9295		for (i = 0; i < h->nreply_queues; i++)
 9296			memset(h->reply_queue[i].head,
 9297				(u8) IOACCEL_MODE1_REPLY_UNUSED,
 9298				h->reply_queue_size);
 9299
 9300		/* set all the constant fields in the accelerator command
 9301		 * frames once at init time to save CPU cycles later.
 9302		 */
 9303		for (i = 0; i < h->nr_cmds; i++) {
 9304			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
 9305
 9306			cp->function = IOACCEL1_FUNCTION_SCSIIO;
 9307			cp->err_info = (u32) (h->errinfo_pool_dhandle +
 9308					(i * sizeof(struct ErrorInfo)));
 9309			cp->err_info_len = sizeof(struct ErrorInfo);
 9310			cp->sgl_offset = IOACCEL1_SGLOFFSET;
 9311			cp->host_context_flags =
 9312				cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
 9313			cp->timeout_sec = 0;
 9314			cp->ReplyQueue = 0;
 9315			cp->tag =
 9316				cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
 9317			cp->host_addr =
 9318				cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
 9319					(i * sizeof(struct io_accel1_cmd)));
 9320		}
 9321	} else if (trans_support & CFGTBL_Trans_io_accel2) {
 9322		u64 cfg_offset, cfg_base_addr_index;
 9323		u32 bft2_offset, cfg_base_addr;
 9324
 9325		hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
 9326				    &cfg_base_addr_index, &cfg_offset);
 9327		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
 9328		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
 9329		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
 9330				4, h->ioaccel2_blockFetchTable);
 9331		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
 9332		BUILD_BUG_ON(offsetof(struct CfgTable,
 9333				io_accel_request_size_offset) != 0xb8);
 9334		h->ioaccel2_bft2_regs =
 9335			remap_pci_mem(pci_resource_start(h->pdev,
 9336					cfg_base_addr_index) +
 9337					cfg_offset + bft2_offset,
 9338					ARRAY_SIZE(bft2) *
 9339					sizeof(*h->ioaccel2_bft2_regs));
 9340		for (i = 0; i < ARRAY_SIZE(bft2); i++)
 9341			writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
 9342	}
 9343	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 9344	if (hpsa_wait_for_mode_change_ack(h)) {
 9345		dev_err(&h->pdev->dev,
 9346			"performant mode problem - enabling ioaccel mode\n");
 9347		return -ENODEV;
 9348	}
 9349	return 0;
 9350}
 9351
 9352/* Free ioaccel1 mode command blocks and block fetch table */
 9353static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
 9354{
 9355	if (h->ioaccel_cmd_pool) {
 9356		dma_free_coherent(&h->pdev->dev,
 9357				  h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
 9358				  h->ioaccel_cmd_pool,
 9359				  h->ioaccel_cmd_pool_dhandle);
 9360		h->ioaccel_cmd_pool = NULL;
 9361		h->ioaccel_cmd_pool_dhandle = 0;
 9362	}
 9363	kfree(h->ioaccel1_blockFetchTable);
 9364	h->ioaccel1_blockFetchTable = NULL;
 9365}
 9366
 9367/* Allocate ioaccel1 mode command blocks and block fetch table */
 9368static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
 9369{
 9370	h->ioaccel_maxsg =
 9371		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
 9372	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
 9373		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
 9374
 9375	/* Command structures must be aligned on a 128-byte boundary
 9376	 * because the 7 lower bits of the address are used by the
 9377	 * hardware.
 9378	 */
 9379	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
 9380			IOACCEL1_COMMANDLIST_ALIGNMENT);
 9381	h->ioaccel_cmd_pool =
 9382		dma_alloc_coherent(&h->pdev->dev,
 9383			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
 9384			&h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
 9385
 9386	h->ioaccel1_blockFetchTable =
 9387		kmalloc(((h->ioaccel_maxsg + 1) *
 9388				sizeof(u32)), GFP_KERNEL);
 9389
 9390	if ((h->ioaccel_cmd_pool == NULL) ||
 9391		(h->ioaccel1_blockFetchTable == NULL))
 9392		goto clean_up;
 9393
 9394	memset(h->ioaccel_cmd_pool, 0,
 9395		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
 9396	return 0;
 9397
 9398clean_up:
 9399	hpsa_free_ioaccel1_cmd_and_bft(h);
 9400	return -ENOMEM;
 9401}
 9402
 9403/* Free ioaccel2 mode command blocks and block fetch table */
 9404static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
 9405{
 9406	hpsa_free_ioaccel2_sg_chain_blocks(h);
 9407
 9408	if (h->ioaccel2_cmd_pool) {
 9409		dma_free_coherent(&h->pdev->dev,
 9410				  h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
 9411				  h->ioaccel2_cmd_pool,
 9412				  h->ioaccel2_cmd_pool_dhandle);
 9413		h->ioaccel2_cmd_pool = NULL;
 9414		h->ioaccel2_cmd_pool_dhandle = 0;
 9415	}
 9416	kfree(h->ioaccel2_blockFetchTable);
 9417	h->ioaccel2_blockFetchTable = NULL;
 9418}
 9419
 9420/* Allocate ioaccel2 mode command blocks and block fetch table */
 9421static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
 9422{
 9423	int rc;
 9424
 9425	/* Allocate ioaccel2 mode command blocks and block fetch table */
 9426
 9427	h->ioaccel_maxsg =
 9428		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
 9429	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
 9430		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
 9431
 9432	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
 9433			IOACCEL2_COMMANDLIST_ALIGNMENT);
 9434	h->ioaccel2_cmd_pool =
 9435		dma_alloc_coherent(&h->pdev->dev,
 9436			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
 9437			&h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
 9438
 9439	h->ioaccel2_blockFetchTable =
 9440		kmalloc(((h->ioaccel_maxsg + 1) *
 9441				sizeof(u32)), GFP_KERNEL);
 9442
 9443	if ((h->ioaccel2_cmd_pool == NULL) ||
 9444		(h->ioaccel2_blockFetchTable == NULL)) {
 9445		rc = -ENOMEM;
 9446		goto clean_up;
 9447	}
 9448
 9449	rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
 9450	if (rc)
 9451		goto clean_up;
 9452
 9453	memset(h->ioaccel2_cmd_pool, 0,
 9454		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
 9455	return 0;
 9456
 9457clean_up:
 9458	hpsa_free_ioaccel2_cmd_and_bft(h);
 9459	return rc;
 9460}
 9461
 9462/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
 9463static void hpsa_free_performant_mode(struct ctlr_info *h)
 9464{
 9465	kfree(h->blockFetchTable);
 9466	h->blockFetchTable = NULL;
 9467	hpsa_free_reply_queues(h);
 9468	hpsa_free_ioaccel1_cmd_and_bft(h);
 9469	hpsa_free_ioaccel2_cmd_and_bft(h);
 9470}
 9471
 9472/* return -ENODEV on error, 0 on success (or no action)
 9473 * allocates numerous items that must be freed later
 9474 */
 9475static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 9476{
 9477	u32 trans_support;
 9478	int i, rc;
 9479
 9480	if (hpsa_simple_mode)
 9481		return 0;
 9482
 9483	trans_support = readl(&(h->cfgtable->TransportSupport));
 9484	if (!(trans_support & PERFORMANT_MODE))
 9485		return 0;
 9486
 9487	/* Check for I/O accelerator mode support */
 9488	if (trans_support & CFGTBL_Trans_io_accel1) {
 9489		rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
 9490		if (rc)
 9491			return rc;
 9492	} else if (trans_support & CFGTBL_Trans_io_accel2) {
 9493		rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
 9494		if (rc)
 9495			return rc;
 9496	}
 9497
 9498	h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
 9499	hpsa_get_max_perf_mode_cmds(h);
 
 9500	/* Performant mode ring buffer and supporting data structures */
 9501	h->reply_queue_size = h->max_commands * sizeof(u64);
 9502
 9503	for (i = 0; i < h->nreply_queues; i++) {
 9504		h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
 9505						h->reply_queue_size,
 9506						&h->reply_queue[i].busaddr,
 9507						GFP_KERNEL);
 9508		if (!h->reply_queue[i].head) {
 9509			rc = -ENOMEM;
 9510			goto clean1;	/* rq, ioaccel */
 9511		}
 9512		h->reply_queue[i].size = h->max_commands;
 9513		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
 9514		h->reply_queue[i].current_entry = 0;
 9515	}
 9516
 9517	/* Need a block fetch table for performant mode */
 9518	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
 9519				sizeof(u32)), GFP_KERNEL);
 9520	if (!h->blockFetchTable) {
 9521		rc = -ENOMEM;
 9522		goto clean1;	/* rq, ioaccel */
 9523	}
 9524
 9525	rc = hpsa_enter_performant_mode(h, trans_support);
 9526	if (rc)
 9527		goto clean2;	/* bft, rq, ioaccel */
 9528	return 0;
 9529
 9530clean2:	/* bft, rq, ioaccel */
 9531	kfree(h->blockFetchTable);
 9532	h->blockFetchTable = NULL;
 9533clean1:	/* rq, ioaccel */
 9534	hpsa_free_reply_queues(h);
 9535	hpsa_free_ioaccel1_cmd_and_bft(h);
 9536	hpsa_free_ioaccel2_cmd_and_bft(h);
 9537	return rc;
 9538}
 9539
 9540static int is_accelerated_cmd(struct CommandList *c)
 9541{
 9542	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
 9543}
 9544
 9545static void hpsa_drain_accel_commands(struct ctlr_info *h)
 9546{
 9547	struct CommandList *c = NULL;
 9548	int i, accel_cmds_out;
 9549	int refcount;
 9550
 9551	do { /* wait for all outstanding ioaccel commands to drain out */
 9552		accel_cmds_out = 0;
 9553		for (i = 0; i < h->nr_cmds; i++) {
 9554			c = h->cmd_pool + i;
 9555			refcount = atomic_inc_return(&c->refcount);
 9556			if (refcount > 1) /* Command is allocated */
 9557				accel_cmds_out += is_accelerated_cmd(c);
 9558			cmd_free(h, c);
 9559		}
 9560		if (accel_cmds_out <= 0)
 9561			break;
 9562		msleep(100);
 9563	} while (1);
 9564}
 9565
 9566static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
 9567				struct hpsa_sas_port *hpsa_sas_port)
 9568{
 9569	struct hpsa_sas_phy *hpsa_sas_phy;
 9570	struct sas_phy *phy;
 9571
 9572	hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
 9573	if (!hpsa_sas_phy)
 9574		return NULL;
 9575
 9576	phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
 9577		hpsa_sas_port->next_phy_index);
 9578	if (!phy) {
 9579		kfree(hpsa_sas_phy);
 9580		return NULL;
 9581	}
 9582
 9583	hpsa_sas_port->next_phy_index++;
 9584	hpsa_sas_phy->phy = phy;
 9585	hpsa_sas_phy->parent_port = hpsa_sas_port;
 9586
 9587	return hpsa_sas_phy;
 9588}
 9589
 9590static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
 9591{
 9592	struct sas_phy *phy = hpsa_sas_phy->phy;
 9593
 9594	sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
 9595	if (hpsa_sas_phy->added_to_port)
 9596		list_del(&hpsa_sas_phy->phy_list_entry);
 9597	sas_phy_delete(phy);
 9598	kfree(hpsa_sas_phy);
 9599}
 9600
 9601static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
 9602{
 9603	int rc;
 9604	struct hpsa_sas_port *hpsa_sas_port;
 9605	struct sas_phy *phy;
 9606	struct sas_identify *identify;
 9607
 9608	hpsa_sas_port = hpsa_sas_phy->parent_port;
 9609	phy = hpsa_sas_phy->phy;
 9610
 9611	identify = &phy->identify;
 9612	memset(identify, 0, sizeof(*identify));
 9613	identify->sas_address = hpsa_sas_port->sas_address;
 9614	identify->device_type = SAS_END_DEVICE;
 9615	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
 9616	identify->target_port_protocols = SAS_PROTOCOL_STP;
 9617	phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
 9618	phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
 9619	phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
 9620	phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
 9621	phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
 9622
 9623	rc = sas_phy_add(hpsa_sas_phy->phy);
 9624	if (rc)
 9625		return rc;
 9626
 9627	sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
 9628	list_add_tail(&hpsa_sas_phy->phy_list_entry,
 9629			&hpsa_sas_port->phy_list_head);
 9630	hpsa_sas_phy->added_to_port = true;
 9631
 9632	return 0;
 9633}
 9634
 9635static int
 9636	hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
 9637				struct sas_rphy *rphy)
 9638{
 9639	struct sas_identify *identify;
 9640
 9641	identify = &rphy->identify;
 9642	identify->sas_address = hpsa_sas_port->sas_address;
 9643	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
 9644	identify->target_port_protocols = SAS_PROTOCOL_STP;
 9645
 9646	return sas_rphy_add(rphy);
 9647}
 9648
 9649static struct hpsa_sas_port
 9650	*hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
 9651				u64 sas_address)
 9652{
 9653	int rc;
 9654	struct hpsa_sas_port *hpsa_sas_port;
 9655	struct sas_port *port;
 9656
 9657	hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
 9658	if (!hpsa_sas_port)
 9659		return NULL;
 9660
 9661	INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
 9662	hpsa_sas_port->parent_node = hpsa_sas_node;
 9663
 9664	port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
 9665	if (!port)
 9666		goto free_hpsa_port;
 9667
 9668	rc = sas_port_add(port);
 9669	if (rc)
 9670		goto free_sas_port;
 9671
 9672	hpsa_sas_port->port = port;
 9673	hpsa_sas_port->sas_address = sas_address;
 9674	list_add_tail(&hpsa_sas_port->port_list_entry,
 9675			&hpsa_sas_node->port_list_head);
 9676
 9677	return hpsa_sas_port;
 9678
 9679free_sas_port:
 9680	sas_port_free(port);
 9681free_hpsa_port:
 9682	kfree(hpsa_sas_port);
 9683
 9684	return NULL;
 9685}
 9686
 9687static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
 9688{
 9689	struct hpsa_sas_phy *hpsa_sas_phy;
 9690	struct hpsa_sas_phy *next;
 9691
 9692	list_for_each_entry_safe(hpsa_sas_phy, next,
 9693			&hpsa_sas_port->phy_list_head, phy_list_entry)
 9694		hpsa_free_sas_phy(hpsa_sas_phy);
 9695
 9696	sas_port_delete(hpsa_sas_port->port);
 9697	list_del(&hpsa_sas_port->port_list_entry);
 9698	kfree(hpsa_sas_port);
 9699}
 9700
 9701static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
 9702{
 9703	struct hpsa_sas_node *hpsa_sas_node;
 9704
 9705	hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
 9706	if (hpsa_sas_node) {
 9707		hpsa_sas_node->parent_dev = parent_dev;
 9708		INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
 9709	}
 9710
 9711	return hpsa_sas_node;
 9712}
 9713
 9714static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
 9715{
 9716	struct hpsa_sas_port *hpsa_sas_port;
 9717	struct hpsa_sas_port *next;
 9718
 9719	if (!hpsa_sas_node)
 9720		return;
 9721
 9722	list_for_each_entry_safe(hpsa_sas_port, next,
 9723			&hpsa_sas_node->port_list_head, port_list_entry)
 9724		hpsa_free_sas_port(hpsa_sas_port);
 9725
 9726	kfree(hpsa_sas_node);
 9727}
 9728
 9729static struct hpsa_scsi_dev_t
 9730	*hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
 9731					struct sas_rphy *rphy)
 9732{
 9733	int i;
 9734	struct hpsa_scsi_dev_t *device;
 9735
 9736	for (i = 0; i < h->ndevices; i++) {
 9737		device = h->dev[i];
 9738		if (!device->sas_port)
 9739			continue;
 9740		if (device->sas_port->rphy == rphy)
 9741			return device;
 9742	}
 9743
 9744	return NULL;
 9745}
 9746
 9747static int hpsa_add_sas_host(struct ctlr_info *h)
 9748{
 9749	int rc;
 9750	struct device *parent_dev;
 9751	struct hpsa_sas_node *hpsa_sas_node;
 9752	struct hpsa_sas_port *hpsa_sas_port;
 9753	struct hpsa_sas_phy *hpsa_sas_phy;
 9754
 9755	parent_dev = &h->scsi_host->shost_dev;
 9756
 9757	hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
 9758	if (!hpsa_sas_node)
 9759		return -ENOMEM;
 9760
 9761	hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
 9762	if (!hpsa_sas_port) {
 9763		rc = -ENODEV;
 9764		goto free_sas_node;
 9765	}
 9766
 9767	hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
 9768	if (!hpsa_sas_phy) {
 9769		rc = -ENODEV;
 9770		goto free_sas_port;
 9771	}
 9772
 9773	rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
 9774	if (rc)
 9775		goto free_sas_phy;
 9776
 9777	h->sas_host = hpsa_sas_node;
 9778
 9779	return 0;
 9780
 9781free_sas_phy:
 9782	sas_phy_free(hpsa_sas_phy->phy);
 9783	kfree(hpsa_sas_phy);
 9784free_sas_port:
 9785	hpsa_free_sas_port(hpsa_sas_port);
 9786free_sas_node:
 9787	hpsa_free_sas_node(hpsa_sas_node);
 9788
 9789	return rc;
 9790}
 9791
 9792static void hpsa_delete_sas_host(struct ctlr_info *h)
 9793{
 9794	hpsa_free_sas_node(h->sas_host);
 9795}
 9796
 9797static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
 9798				struct hpsa_scsi_dev_t *device)
 9799{
 9800	int rc;
 9801	struct hpsa_sas_port *hpsa_sas_port;
 9802	struct sas_rphy *rphy;
 9803
 9804	hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
 9805	if (!hpsa_sas_port)
 9806		return -ENOMEM;
 9807
 9808	rphy = sas_end_device_alloc(hpsa_sas_port->port);
 9809	if (!rphy) {
 9810		rc = -ENODEV;
 9811		goto free_sas_port;
 9812	}
 9813
 9814	hpsa_sas_port->rphy = rphy;
 9815	device->sas_port = hpsa_sas_port;
 9816
 9817	rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
 9818	if (rc)
 9819		goto free_sas_rphy;
 9820
 9821	return 0;
 9822
 9823free_sas_rphy:
 9824	sas_rphy_free(rphy);
 9825free_sas_port:
 9826	hpsa_free_sas_port(hpsa_sas_port);
 9827	device->sas_port = NULL;
 9828
 9829	return rc;
 9830}
 9831
 9832static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
 9833{
 9834	if (device->sas_port) {
 9835		hpsa_free_sas_port(device->sas_port);
 9836		device->sas_port = NULL;
 9837	}
 9838}
 9839
 9840static int
 9841hpsa_sas_get_linkerrors(struct sas_phy *phy)
 9842{
 9843	return 0;
 9844}
 9845
 9846static int
 9847hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 9848{
 9849	struct Scsi_Host *shost = phy_to_shost(rphy);
 9850	struct ctlr_info *h;
 9851	struct hpsa_scsi_dev_t *sd;
 9852
 9853	if (!shost)
 9854		return -ENXIO;
 9855
 9856	h = shost_to_hba(shost);
 9857
 9858	if (!h)
 9859		return -ENXIO;
 9860
 9861	sd = hpsa_find_device_by_sas_rphy(h, rphy);
 9862	if (!sd)
 9863		return -ENXIO;
 9864
 9865	*identifier = sd->eli;
 9866
 9867	return 0;
 9868}
 9869
 9870static int
 9871hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
 9872{
 9873	return -ENXIO;
 9874}
 9875
 9876static int
 9877hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
 9878{
 9879	return 0;
 9880}
 9881
 9882static int
 9883hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
 9884{
 9885	return 0;
 9886}
 9887
 9888static int
 9889hpsa_sas_phy_setup(struct sas_phy *phy)
 9890{
 9891	return 0;
 9892}
 9893
 9894static void
 9895hpsa_sas_phy_release(struct sas_phy *phy)
 9896{
 9897}
 9898
 9899static int
 9900hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
 9901{
 9902	return -EINVAL;
 
 9903}
 9904
 9905static struct sas_function_template hpsa_sas_transport_functions = {
 9906	.get_linkerrors = hpsa_sas_get_linkerrors,
 9907	.get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
 9908	.get_bay_identifier = hpsa_sas_get_bay_identifier,
 9909	.phy_reset = hpsa_sas_phy_reset,
 9910	.phy_enable = hpsa_sas_phy_enable,
 9911	.phy_setup = hpsa_sas_phy_setup,
 9912	.phy_release = hpsa_sas_phy_release,
 9913	.set_phy_speed = hpsa_sas_phy_speed,
 9914};
 9915
 9916/*
 9917 *  This is it.  Register the PCI driver information for the cards we control
 9918 *  the OS will call our registered routines when it finds one of our cards.
 9919 */
 9920static int __init hpsa_init(void)
 9921{
 9922	int rc;
 9923
 9924	hpsa_sas_transport_template =
 9925		sas_attach_transport(&hpsa_sas_transport_functions);
 9926	if (!hpsa_sas_transport_template)
 9927		return -ENODEV;
 9928
 9929	rc = pci_register_driver(&hpsa_pci_driver);
 9930
 9931	if (rc)
 9932		sas_release_transport(hpsa_sas_transport_template);
 9933
 9934	return rc;
 9935}
 9936
 9937static void __exit hpsa_cleanup(void)
 9938{
 9939	pci_unregister_driver(&hpsa_pci_driver);
 9940	sas_release_transport(hpsa_sas_transport_template);
 9941}
 9942
 9943static void __attribute__((unused)) verify_offsets(void)
 9944{
 9945#define VERIFY_OFFSET(member, offset) \
 9946	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
 9947
 9948	VERIFY_OFFSET(structure_size, 0);
 9949	VERIFY_OFFSET(volume_blk_size, 4);
 9950	VERIFY_OFFSET(volume_blk_cnt, 8);
 9951	VERIFY_OFFSET(phys_blk_shift, 16);
 9952	VERIFY_OFFSET(parity_rotation_shift, 17);
 9953	VERIFY_OFFSET(strip_size, 18);
 9954	VERIFY_OFFSET(disk_starting_blk, 20);
 9955	VERIFY_OFFSET(disk_blk_cnt, 28);
 9956	VERIFY_OFFSET(data_disks_per_row, 36);
 9957	VERIFY_OFFSET(metadata_disks_per_row, 38);
 9958	VERIFY_OFFSET(row_cnt, 40);
 9959	VERIFY_OFFSET(layout_map_count, 42);
 9960	VERIFY_OFFSET(flags, 44);
 9961	VERIFY_OFFSET(dekindex, 46);
 9962	/* VERIFY_OFFSET(reserved, 48 */
 9963	VERIFY_OFFSET(data, 64);
 9964
 9965#undef VERIFY_OFFSET
 9966
 9967#define VERIFY_OFFSET(member, offset) \
 9968	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
 9969
 9970	VERIFY_OFFSET(IU_type, 0);
 9971	VERIFY_OFFSET(direction, 1);
 9972	VERIFY_OFFSET(reply_queue, 2);
 9973	/* VERIFY_OFFSET(reserved1, 3);  */
 9974	VERIFY_OFFSET(scsi_nexus, 4);
 9975	VERIFY_OFFSET(Tag, 8);
 9976	VERIFY_OFFSET(cdb, 16);
 9977	VERIFY_OFFSET(cciss_lun, 32);
 9978	VERIFY_OFFSET(data_len, 40);
 9979	VERIFY_OFFSET(cmd_priority_task_attr, 44);
 9980	VERIFY_OFFSET(sg_count, 45);
 9981	/* VERIFY_OFFSET(reserved3 */
 9982	VERIFY_OFFSET(err_ptr, 48);
 9983	VERIFY_OFFSET(err_len, 56);
 9984	/* VERIFY_OFFSET(reserved4  */
 9985	VERIFY_OFFSET(sg, 64);
 9986
 9987#undef VERIFY_OFFSET
 9988
 9989#define VERIFY_OFFSET(member, offset) \
 9990	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
 9991
 9992	VERIFY_OFFSET(dev_handle, 0x00);
 9993	VERIFY_OFFSET(reserved1, 0x02);
 9994	VERIFY_OFFSET(function, 0x03);
 9995	VERIFY_OFFSET(reserved2, 0x04);
 9996	VERIFY_OFFSET(err_info, 0x0C);
 9997	VERIFY_OFFSET(reserved3, 0x10);
 9998	VERIFY_OFFSET(err_info_len, 0x12);
 9999	VERIFY_OFFSET(reserved4, 0x13);
10000	VERIFY_OFFSET(sgl_offset, 0x14);
10001	VERIFY_OFFSET(reserved5, 0x15);
10002	VERIFY_OFFSET(transfer_len, 0x1C);
10003	VERIFY_OFFSET(reserved6, 0x20);
10004	VERIFY_OFFSET(io_flags, 0x24);
10005	VERIFY_OFFSET(reserved7, 0x26);
10006	VERIFY_OFFSET(LUN, 0x34);
10007	VERIFY_OFFSET(control, 0x3C);
10008	VERIFY_OFFSET(CDB, 0x40);
10009	VERIFY_OFFSET(reserved8, 0x50);
10010	VERIFY_OFFSET(host_context_flags, 0x60);
10011	VERIFY_OFFSET(timeout_sec, 0x62);
10012	VERIFY_OFFSET(ReplyQueue, 0x64);
10013	VERIFY_OFFSET(reserved9, 0x65);
10014	VERIFY_OFFSET(tag, 0x68);
10015	VERIFY_OFFSET(host_addr, 0x70);
10016	VERIFY_OFFSET(CISS_LUN, 0x78);
10017	VERIFY_OFFSET(SG, 0x78 + 8);
10018#undef VERIFY_OFFSET
10019}
10020
10021module_init(hpsa_init);
10022module_exit(hpsa_cleanup);
v3.1
   1/*
   2 *    Disk Array driver for HP Smart Array SAS controllers
   3 *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
 
 
 
   4 *
   5 *    This program is free software; you can redistribute it and/or modify
   6 *    it under the terms of the GNU General Public License as published by
   7 *    the Free Software Foundation; version 2 of the License.
   8 *
   9 *    This program is distributed in the hope that it will be useful,
  10 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  12 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  13 *
  14 *    You should have received a copy of the GNU General Public License
  15 *    along with this program; if not, write to the Free Software
  16 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17 *
  18 *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
  19 *
  20 */
  21
  22#include <linux/module.h>
  23#include <linux/interrupt.h>
  24#include <linux/types.h>
  25#include <linux/pci.h>
  26#include <linux/kernel.h>
  27#include <linux/slab.h>
  28#include <linux/delay.h>
  29#include <linux/fs.h>
  30#include <linux/timer.h>
  31#include <linux/seq_file.h>
  32#include <linux/init.h>
  33#include <linux/spinlock.h>
  34#include <linux/compat.h>
  35#include <linux/blktrace_api.h>
  36#include <linux/uaccess.h>
  37#include <linux/io.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/completion.h>
  40#include <linux/moduleparam.h>
  41#include <scsi/scsi.h>
  42#include <scsi/scsi_cmnd.h>
  43#include <scsi/scsi_device.h>
  44#include <scsi/scsi_host.h>
  45#include <scsi/scsi_tcq.h>
 
 
 
  46#include <linux/cciss_ioctl.h>
  47#include <linux/string.h>
  48#include <linux/bitmap.h>
  49#include <linux/atomic.h>
  50#include <linux/kthread.h>
 
 
 
 
  51#include "hpsa_cmd.h"
  52#include "hpsa.h"
  53
  54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
  55#define HPSA_DRIVER_VERSION "2.0.2-1"
 
 
 
  56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 
  57
  58/* How long to wait (in milliseconds) for board to go into simple mode */
  59#define MAX_CONFIG_WAIT 30000
 
 
 
  60#define MAX_IOCTL_CONFIG_WAIT 1000
  61
  62/*define how many times we will try a command because of bus resets */
  63#define MAX_CMD_RETRIES 3
 
 
  64
  65/* Embedded module documentation macros - see modules.h */
  66MODULE_AUTHOR("Hewlett-Packard Company");
  67MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
  68	HPSA_DRIVER_VERSION);
  69MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
  70MODULE_VERSION(HPSA_DRIVER_VERSION);
  71MODULE_LICENSE("GPL");
 
  72
  73static int hpsa_allow_any;
  74module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
  75MODULE_PARM_DESC(hpsa_allow_any,
  76		"Allow hpsa driver to access unknown HP Smart Array hardware");
  77static int hpsa_simple_mode;
  78module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
  79MODULE_PARM_DESC(hpsa_simple_mode,
  80	"Use 'simple mode' rather than 'performant mode'");
  81
  82/* define the PCI info for the cards we can control */
  83static const struct pci_device_id hpsa_pci_device_id[] = {
  84	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
  85	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
  86	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
  87	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
  88	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
  89	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
  90	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
  91	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
  92	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
  93	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
  94	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
  95	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
  96	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
  97	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
  98	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  99	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 100		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 
 
 101	{0,}
 102};
 103
 104MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
 105
 106/*  board_id = Subsystem Device ID & Vendor ID
 107 *  product = Marketing Name for the board
 108 *  access = Address of the struct of function pointers
 109 */
 110static struct board_type products[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111	{0x3241103C, "Smart Array P212", &SA5_access},
 112	{0x3243103C, "Smart Array P410", &SA5_access},
 113	{0x3245103C, "Smart Array P410i", &SA5_access},
 114	{0x3247103C, "Smart Array P411", &SA5_access},
 115	{0x3249103C, "Smart Array P812", &SA5_access},
 116	{0x324a103C, "Smart Array P712m", &SA5_access},
 117	{0x324b103C, "Smart Array P711m", &SA5_access},
 118	{0x3350103C, "Smart Array", &SA5_access},
 119	{0x3351103C, "Smart Array", &SA5_access},
 120	{0x3352103C, "Smart Array", &SA5_access},
 121	{0x3353103C, "Smart Array", &SA5_access},
 122	{0x3354103C, "Smart Array", &SA5_access},
 123	{0x3355103C, "Smart Array", &SA5_access},
 124	{0x3356103C, "Smart Array", &SA5_access},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 126};
 127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128static int number_of_controllers;
 129
 130static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
 131static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
 132static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
 133static void start_io(struct ctlr_info *h);
 
 
 
 
 134
 135#ifdef CONFIG_COMPAT
 136static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
 
 137#endif
 138
 139static void cmd_free(struct ctlr_info *h, struct CommandList *c);
 140static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
 141static struct CommandList *cmd_alloc(struct ctlr_info *h);
 142static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
 143static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 144	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
 
 
 145	int cmd_type);
 
 
 
 146
 147static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 148static void hpsa_scan_start(struct Scsi_Host *);
 149static int hpsa_scan_finished(struct Scsi_Host *sh,
 150	unsigned long elapsed_time);
 151static int hpsa_change_queue_depth(struct scsi_device *sdev,
 152	int qdepth, int reason);
 153
 154static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
 155static int hpsa_slave_alloc(struct scsi_device *sdev);
 
 156static void hpsa_slave_destroy(struct scsi_device *sdev);
 157
 158static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
 159static int check_for_unit_attention(struct ctlr_info *h,
 160	struct CommandList *c);
 161static void check_ioctl_unit_attention(struct ctlr_info *h,
 162	struct CommandList *c);
 163/* performant mode helper functions */
 164static void calc_bucket_map(int *bucket, int num_buckets,
 165	int nsgs, int *bucket_map);
 166static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
 167static inline u32 next_command(struct ctlr_info *h);
 168static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
 169	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
 170	u64 *cfg_offset);
 171static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 172	unsigned long *memory_bar);
 173static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
 174static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
 175	void __iomem *vaddr, int wait_for_ready);
 
 
 
 
 
 
 
 176#define BOARD_NOT_READY 0
 177#define BOARD_READY 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178
 179static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 180{
 181	unsigned long *priv = shost_priv(sdev->host);
 182	return (struct ctlr_info *) *priv;
 183}
 184
 185static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
 186{
 187	unsigned long *priv = shost_priv(sh);
 188	return (struct ctlr_info *) *priv;
 189}
 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 191static int check_for_unit_attention(struct ctlr_info *h,
 192	struct CommandList *c)
 193{
 194	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
 
 
 
 
 
 
 
 
 
 
 195		return 0;
 196
 197	switch (c->err_info->SenseInfo[12]) {
 198	case STATE_CHANGED:
 199		dev_warn(&h->pdev->dev, "hpsa%d: a state change "
 200			"detected, command retried\n", h->ctlr);
 
 201		break;
 202	case LUN_FAILED:
 203		dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
 204			"detected, action required\n", h->ctlr);
 205		break;
 206	case REPORT_LUNS_CHANGED:
 207		dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
 208			"changed, action required\n", h->ctlr);
 209	/*
 210	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
 
 211	 */
 212		break;
 213	case POWER_OR_RESET:
 214		dev_warn(&h->pdev->dev, "hpsa%d: a power on "
 215			"or device reset detected\n", h->ctlr);
 
 216		break;
 217	case UNIT_ATTENTION_CLEARED:
 218		dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
 219		    "cleared by another initiator\n", h->ctlr);
 
 220		break;
 221	default:
 222		dev_warn(&h->pdev->dev, "hpsa%d: unknown "
 223			"unit attention detected\n", h->ctlr);
 
 224		break;
 225	}
 226	return 1;
 227}
 228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229static ssize_t host_store_rescan(struct device *dev,
 230				 struct device_attribute *attr,
 231				 const char *buf, size_t count)
 232{
 233	struct ctlr_info *h;
 234	struct Scsi_Host *shost = class_to_shost(dev);
 235	h = shost_to_hba(shost);
 236	hpsa_scan_start(h->scsi_host);
 237	return count;
 238}
 239
 
 
 
 
 
 
 240static ssize_t host_show_firmware_revision(struct device *dev,
 241	     struct device_attribute *attr, char *buf)
 242{
 243	struct ctlr_info *h;
 244	struct Scsi_Host *shost = class_to_shost(dev);
 245	unsigned char *fwrev;
 246
 247	h = shost_to_hba(shost);
 248	if (!h->hba_inquiry_data)
 249		return 0;
 250	fwrev = &h->hba_inquiry_data[32];
 251	return snprintf(buf, 20, "%c%c%c%c\n",
 252		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
 253}
 254
 255static ssize_t host_show_commands_outstanding(struct device *dev,
 256	     struct device_attribute *attr, char *buf)
 257{
 258	struct Scsi_Host *shost = class_to_shost(dev);
 259	struct ctlr_info *h = shost_to_hba(shost);
 260
 261	return snprintf(buf, 20, "%d\n", h->commands_outstanding);
 
 262}
 263
 264static ssize_t host_show_transport_mode(struct device *dev,
 265	struct device_attribute *attr, char *buf)
 266{
 267	struct ctlr_info *h;
 268	struct Scsi_Host *shost = class_to_shost(dev);
 269
 270	h = shost_to_hba(shost);
 271	return snprintf(buf, 20, "%s\n",
 272		h->transMethod & CFGTBL_Trans_Performant ?
 273			"performant" : "simple");
 274}
 275
 
 
 
 
 
 
 
 
 
 
 
 276/* List of controllers which cannot be hard reset on kexec with reset_devices */
 277static u32 unresettable_controller[] = {
 278	0x324a103C, /* Smart Array P712m */
 279	0x324b103C, /* SmartArray P711m */
 280	0x3223103C, /* Smart Array P800 */
 281	0x3234103C, /* Smart Array P400 */
 282	0x3235103C, /* Smart Array P400i */
 283	0x3211103C, /* Smart Array E200i */
 284	0x3212103C, /* Smart Array E200 */
 285	0x3213103C, /* Smart Array E200i */
 286	0x3214103C, /* Smart Array E200i */
 287	0x3215103C, /* Smart Array E200i */
 288	0x3237103C, /* Smart Array E500 */
 289	0x323D103C, /* Smart Array P700m */
 
 290	0x409C0E11, /* Smart Array 6400 */
 291	0x409D0E11, /* Smart Array 6400 EM */
 
 
 
 
 
 
 292};
 293
 294/* List of controllers which cannot even be soft reset */
 295static u32 soft_unresettable_controller[] = {
 
 
 
 
 
 
 
 296	/* Exclude 640x boards.  These are two pci devices in one slot
 297	 * which share a battery backed cache module.  One controls the
 298	 * cache, the other accesses the cache through the one that controls
 299	 * it.  If we reset the one controlling the cache, the other will
 300	 * likely not be happy.  Just forbid resetting this conjoined mess.
 301	 * The 640x isn't really supported by hpsa anyway.
 302	 */
 303	0x409C0E11, /* Smart Array 6400 */
 304	0x409D0E11, /* Smart Array 6400 EM */
 305};
 306
 307static int ctlr_is_hard_resettable(u32 board_id)
 308{
 309	int i;
 310
 311	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
 312		if (unresettable_controller[i] == board_id)
 313			return 0;
 314	return 1;
 
 
 
 
 
 
 315}
 316
 317static int ctlr_is_soft_resettable(u32 board_id)
 318{
 319	int i;
 320
 321	for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
 322		if (soft_unresettable_controller[i] == board_id)
 323			return 0;
 324	return 1;
 325}
 326
 327static int ctlr_is_resettable(u32 board_id)
 328{
 329	return ctlr_is_hard_resettable(board_id) ||
 330		ctlr_is_soft_resettable(board_id);
 331}
 332
 333static ssize_t host_show_resettable(struct device *dev,
 334	struct device_attribute *attr, char *buf)
 335{
 336	struct ctlr_info *h;
 337	struct Scsi_Host *shost = class_to_shost(dev);
 338
 339	h = shost_to_hba(shost);
 340	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
 341}
 342
 343static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
 344{
 345	return (scsi3addr[3] & 0xC0) == 0x40;
 346}
 347
 348static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
 349	"UNKNOWN"
 350};
 351#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 352
 353static ssize_t raid_level_show(struct device *dev,
 354	     struct device_attribute *attr, char *buf)
 355{
 356	ssize_t l = 0;
 357	unsigned char rlevel;
 358	struct ctlr_info *h;
 359	struct scsi_device *sdev;
 360	struct hpsa_scsi_dev_t *hdev;
 361	unsigned long flags;
 362
 363	sdev = to_scsi_device(dev);
 364	h = sdev_to_hba(sdev);
 365	spin_lock_irqsave(&h->lock, flags);
 366	hdev = sdev->hostdata;
 367	if (!hdev) {
 368		spin_unlock_irqrestore(&h->lock, flags);
 369		return -ENODEV;
 370	}
 371
 372	/* Is this even a logical drive? */
 373	if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
 374		spin_unlock_irqrestore(&h->lock, flags);
 375		l = snprintf(buf, PAGE_SIZE, "N/A\n");
 376		return l;
 377	}
 378
 379	rlevel = hdev->raid_level;
 380	spin_unlock_irqrestore(&h->lock, flags);
 381	if (rlevel > RAID_UNKNOWN)
 382		rlevel = RAID_UNKNOWN;
 383	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
 384	return l;
 385}
 386
 387static ssize_t lunid_show(struct device *dev,
 388	     struct device_attribute *attr, char *buf)
 389{
 390	struct ctlr_info *h;
 391	struct scsi_device *sdev;
 392	struct hpsa_scsi_dev_t *hdev;
 393	unsigned long flags;
 394	unsigned char lunid[8];
 395
 396	sdev = to_scsi_device(dev);
 397	h = sdev_to_hba(sdev);
 398	spin_lock_irqsave(&h->lock, flags);
 399	hdev = sdev->hostdata;
 400	if (!hdev) {
 401		spin_unlock_irqrestore(&h->lock, flags);
 402		return -ENODEV;
 403	}
 404	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
 405	spin_unlock_irqrestore(&h->lock, flags);
 406	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 407		lunid[0], lunid[1], lunid[2], lunid[3],
 408		lunid[4], lunid[5], lunid[6], lunid[7]);
 409}
 410
 411static ssize_t unique_id_show(struct device *dev,
 412	     struct device_attribute *attr, char *buf)
 413{
 414	struct ctlr_info *h;
 415	struct scsi_device *sdev;
 416	struct hpsa_scsi_dev_t *hdev;
 417	unsigned long flags;
 418	unsigned char sn[16];
 419
 420	sdev = to_scsi_device(dev);
 421	h = sdev_to_hba(sdev);
 422	spin_lock_irqsave(&h->lock, flags);
 423	hdev = sdev->hostdata;
 424	if (!hdev) {
 425		spin_unlock_irqrestore(&h->lock, flags);
 426		return -ENODEV;
 427	}
 428	memcpy(sn, hdev->device_id, sizeof(sn));
 429	spin_unlock_irqrestore(&h->lock, flags);
 430	return snprintf(buf, 16 * 2 + 2,
 431			"%02X%02X%02X%02X%02X%02X%02X%02X"
 432			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
 433			sn[0], sn[1], sn[2], sn[3],
 434			sn[4], sn[5], sn[6], sn[7],
 435			sn[8], sn[9], sn[10], sn[11],
 436			sn[12], sn[13], sn[14], sn[15]);
 437}
 438
 439static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 440static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 441static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 
 
 
 
 
 
 
 
 
 443static DEVICE_ATTR(firmware_revision, S_IRUGO,
 444	host_show_firmware_revision, NULL);
 445static DEVICE_ATTR(commands_outstanding, S_IRUGO,
 446	host_show_commands_outstanding, NULL);
 447static DEVICE_ATTR(transport_mode, S_IRUGO,
 448	host_show_transport_mode, NULL);
 449static DEVICE_ATTR(resettable, S_IRUGO,
 450	host_show_resettable, NULL);
 451
 452static struct device_attribute *hpsa_sdev_attrs[] = {
 453	&dev_attr_raid_level,
 454	&dev_attr_lunid,
 455	&dev_attr_unique_id,
 
 
 
 
 
 
 
 
 
 456	NULL,
 457};
 458
 459static struct device_attribute *hpsa_shost_attrs[] = {
 460	&dev_attr_rescan,
 461	&dev_attr_firmware_revision,
 462	&dev_attr_commands_outstanding,
 463	&dev_attr_transport_mode,
 464	&dev_attr_resettable,
 
 
 
 
 
 
 
 465	NULL,
 466};
 467
 468static struct scsi_host_template hpsa_driver_template = {
 
 
 
 
 
 469	.module			= THIS_MODULE,
 470	.name			= "hpsa",
 471	.proc_name		= "hpsa",
 472	.queuecommand		= hpsa_scsi_queue_command,
 473	.scan_start		= hpsa_scan_start,
 474	.scan_finished		= hpsa_scan_finished,
 475	.change_queue_depth	= hpsa_change_queue_depth,
 476	.this_id		= -1,
 477	.use_clustering		= ENABLE_CLUSTERING,
 478	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
 479	.ioctl			= hpsa_ioctl,
 480	.slave_alloc		= hpsa_slave_alloc,
 
 481	.slave_destroy		= hpsa_slave_destroy,
 482#ifdef CONFIG_COMPAT
 483	.compat_ioctl		= hpsa_compat_ioctl,
 484#endif
 485	.sdev_attrs = hpsa_sdev_attrs,
 486	.shost_attrs = hpsa_shost_attrs,
 
 
 487};
 488
 489
 490/* Enqueuing and dequeuing functions for cmdlists. */
 491static inline void addQ(struct list_head *list, struct CommandList *c)
 492{
 493	list_add_tail(&c->list, list);
 494}
 495
 496static inline u32 next_command(struct ctlr_info *h)
 497{
 498	u32 a;
 499
 500	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
 501		return h->access.command_completed(h);
 502
 503	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
 504		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
 505		(h->reply_pool_head)++;
 506		h->commands_outstanding--;
 507	} else {
 508		a = FIFO_EMPTY;
 509	}
 510	/* Check for wraparound */
 511	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
 512		h->reply_pool_head = h->reply_pool;
 513		h->reply_pool_wraparound ^= 1;
 514	}
 515	return a;
 516}
 517
 518/* set_performant_mode: Modify the tag for cciss performant
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519 * set bit 0 for pull model, bits 3-1 for block fetch
 520 * register number
 521 */
 522static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
 
 
 523{
 524	if (likely(h->transMethod & CFGTBL_Trans_Performant))
 525		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526}
 527
 528static void enqueue_cmd_and_start_io(struct ctlr_info *h,
 529	struct CommandList *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530{
 531	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532
 533	set_performant_mode(h, c);
 534	spin_lock_irqsave(&h->lock, flags);
 535	addQ(&h->reqQ, c);
 536	h->Qdepth++;
 537	start_io(h);
 538	spin_unlock_irqrestore(&h->lock, flags);
 539}
 540
 541static inline void removeQ(struct CommandList *c)
 
 
 
 
 
 
 
 
 
 542{
 543	if (WARN_ON(list_empty(&c->list)))
 544		return;
 545	list_del_init(&c->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546}
 547
 548static inline int is_hba_lunid(unsigned char scsi3addr[])
 549{
 550	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
 551}
 552
 553static inline int is_scsi_rev_5(struct ctlr_info *h)
 554{
 555	if (!h->hba_inquiry_data)
 556		return 0;
 557	if ((h->hba_inquiry_data[2] & 0x07) == 5)
 558		return 1;
 559	return 0;
 560}
 561
 562static int hpsa_find_target_lun(struct ctlr_info *h,
 563	unsigned char scsi3addr[], int bus, int *target, int *lun)
 564{
 565	/* finds an unused bus, target, lun for a new physical device
 566	 * assumes h->devlock is held
 567	 */
 568	int i, found = 0;
 569	DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
 570
 571	memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
 572
 573	for (i = 0; i < h->ndevices; i++) {
 574		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
 575			set_bit(h->dev[i]->target, lun_taken);
 576	}
 577
 578	for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
 579		if (!test_bit(i, lun_taken)) {
 580			/* *bus = 1; */
 581			*target = i;
 582			*lun = 0;
 583			found = 1;
 584			break;
 585		}
 586	}
 587	return !found;
 588}
 589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590/* Add an entry into h->dev[] array. */
 591static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
 592		struct hpsa_scsi_dev_t *device,
 593		struct hpsa_scsi_dev_t *added[], int *nadded)
 594{
 595	/* assumes h->devlock is held */
 596	int n = h->ndevices;
 597	int i;
 598	unsigned char addr1[8], addr2[8];
 599	struct hpsa_scsi_dev_t *sd;
 600
 601	if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
 602		dev_err(&h->pdev->dev, "too many devices, some will be "
 603			"inaccessible.\n");
 604		return -1;
 605	}
 606
 607	/* physical devices do not have lun or target assigned until now. */
 608	if (device->lun != -1)
 609		/* Logical device, lun is already assigned. */
 610		goto lun_assigned;
 611
 612	/* If this device a non-zero lun of a multi-lun device
 613	 * byte 4 of the 8-byte LUN addr will contain the logical
 614	 * unit no, zero otherise.
 615	 */
 616	if (device->scsi3addr[4] == 0) {
 617		/* This is not a non-zero lun of a multi-lun device */
 618		if (hpsa_find_target_lun(h, device->scsi3addr,
 619			device->bus, &device->target, &device->lun) != 0)
 620			return -1;
 621		goto lun_assigned;
 622	}
 623
 624	/* This is a non-zero lun of a multi-lun device.
 625	 * Search through our list and find the device which
 626	 * has the same 8 byte LUN address, excepting byte 4.
 627	 * Assign the same bus and target for this new LUN.
 628	 * Use the logical unit number from the firmware.
 629	 */
 630	memcpy(addr1, device->scsi3addr, 8);
 631	addr1[4] = 0;
 
 632	for (i = 0; i < n; i++) {
 633		sd = h->dev[i];
 634		memcpy(addr2, sd->scsi3addr, 8);
 635		addr2[4] = 0;
 636		/* differ only in byte 4? */
 
 637		if (memcmp(addr1, addr2, 8) == 0) {
 638			device->bus = sd->bus;
 639			device->target = sd->target;
 640			device->lun = device->scsi3addr[4];
 641			break;
 642		}
 643	}
 644	if (device->lun == -1) {
 645		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
 646			" suspect firmware bug or unsupported hardware "
 647			"configuration.\n");
 648			return -1;
 649	}
 650
 651lun_assigned:
 652
 653	h->dev[n] = device;
 654	h->ndevices++;
 655	added[*nadded] = device;
 656	(*nadded)++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657
 658	/* initially, (before registering with scsi layer) we don't
 659	 * know our hostno and we don't want to print anything first
 660	 * time anyway (the scsi layer's inquiries will show that info)
 661	 */
 662	/* if (hostno != -1) */
 663		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
 664			scsi_device_type(device->devtype), hostno,
 665			device->bus, device->target, device->lun);
 666	return 0;
 667}
 668
 669/* Replace an entry from h->dev[] array. */
 670static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
 671	int entry, struct hpsa_scsi_dev_t *new_entry,
 672	struct hpsa_scsi_dev_t *added[], int *nadded,
 673	struct hpsa_scsi_dev_t *removed[], int *nremoved)
 674{
 675	/* assumes h->devlock is held */
 676	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
 677	removed[*nremoved] = h->dev[entry];
 678	(*nremoved)++;
 679
 680	/*
 681	 * New physical devices won't have target/lun assigned yet
 682	 * so we need to preserve the values in the slot we are replacing.
 683	 */
 684	if (new_entry->target == -1) {
 685		new_entry->target = h->dev[entry]->target;
 686		new_entry->lun = h->dev[entry]->lun;
 687	}
 688
 689	h->dev[entry] = new_entry;
 690	added[*nadded] = new_entry;
 691	(*nadded)++;
 692	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
 693		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
 694			new_entry->target, new_entry->lun);
 695}
 696
 697/* Remove an entry from h->dev[] array. */
 698static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
 699	struct hpsa_scsi_dev_t *removed[], int *nremoved)
 700{
 701	/* assumes h->devlock is held */
 702	int i;
 703	struct hpsa_scsi_dev_t *sd;
 704
 705	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
 706
 707	sd = h->dev[entry];
 708	removed[*nremoved] = h->dev[entry];
 709	(*nremoved)++;
 710
 711	for (i = entry; i < h->ndevices-1; i++)
 712		h->dev[i] = h->dev[i+1];
 713	h->ndevices--;
 714	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
 715		scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
 716		sd->lun);
 717}
 718
 719#define SCSI3ADDR_EQ(a, b) ( \
 720	(a)[7] == (b)[7] && \
 721	(a)[6] == (b)[6] && \
 722	(a)[5] == (b)[5] && \
 723	(a)[4] == (b)[4] && \
 724	(a)[3] == (b)[3] && \
 725	(a)[2] == (b)[2] && \
 726	(a)[1] == (b)[1] && \
 727	(a)[0] == (b)[0])
 728
 729static void fixup_botched_add(struct ctlr_info *h,
 730	struct hpsa_scsi_dev_t *added)
 731{
 732	/* called when scsi_add_device fails in order to re-adjust
 733	 * h->dev[] to match the mid layer's view.
 734	 */
 735	unsigned long flags;
 736	int i, j;
 737
 738	spin_lock_irqsave(&h->lock, flags);
 739	for (i = 0; i < h->ndevices; i++) {
 740		if (h->dev[i] == added) {
 741			for (j = i; j < h->ndevices-1; j++)
 742				h->dev[j] = h->dev[j+1];
 743			h->ndevices--;
 744			break;
 745		}
 746	}
 747	spin_unlock_irqrestore(&h->lock, flags);
 748	kfree(added);
 749}
 750
 751static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
 752	struct hpsa_scsi_dev_t *dev2)
 753{
 754	/* we compare everything except lun and target as these
 755	 * are not yet assigned.  Compare parts likely
 756	 * to differ first
 757	 */
 758	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
 759		sizeof(dev1->scsi3addr)) != 0)
 760		return 0;
 761	if (memcmp(dev1->device_id, dev2->device_id,
 762		sizeof(dev1->device_id)) != 0)
 763		return 0;
 764	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
 765		return 0;
 766	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
 767		return 0;
 768	if (dev1->devtype != dev2->devtype)
 769		return 0;
 770	if (dev1->bus != dev2->bus)
 771		return 0;
 772	return 1;
 773}
 774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775/* Find needle in haystack.  If exact match found, return DEVICE_SAME,
 776 * and return needle location in *index.  If scsi3addr matches, but not
 777 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
 778 * location in *index.  If needle not found, return DEVICE_NOT_FOUND.
 
 
 
 779 */
 780static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
 781	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
 782	int *index)
 783{
 784	int i;
 785#define DEVICE_NOT_FOUND 0
 786#define DEVICE_CHANGED 1
 787#define DEVICE_SAME 2
 
 
 
 
 788	for (i = 0; i < haystack_size; i++) {
 789		if (haystack[i] == NULL) /* previously removed. */
 790			continue;
 791		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
 792			*index = i;
 793			if (device_is_the_same(needle, haystack[i]))
 
 
 794				return DEVICE_SAME;
 795			else
 
 
 
 796				return DEVICE_CHANGED;
 
 797		}
 798	}
 799	*index = -1;
 800	return DEVICE_NOT_FOUND;
 801}
 802
 803static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804	struct hpsa_scsi_dev_t *sd[], int nsds)
 805{
 806	/* sd contains scsi3 addresses and devtypes, and inquiry
 807	 * data.  This function takes what's in sd to be the current
 808	 * reality and updates h->dev[] to reflect that reality.
 809	 */
 810	int i, entry, device_change, changes = 0;
 811	struct hpsa_scsi_dev_t *csd;
 812	unsigned long flags;
 813	struct hpsa_scsi_dev_t **added, **removed;
 814	int nadded, nremoved;
 815	struct Scsi_Host *sh = NULL;
 816
 817	added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
 818		GFP_KERNEL);
 819	removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
 820		GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 821
 822	if (!added || !removed) {
 823		dev_warn(&h->pdev->dev, "out of memory in "
 824			"adjust_hpsa_scsi_table\n");
 825		goto free_and_out;
 826	}
 827
 828	spin_lock_irqsave(&h->devlock, flags);
 829
 830	/* find any devices in h->dev[] that are not in
 831	 * sd[] and remove them from h->dev[], and for any
 832	 * devices which have changed, remove the old device
 833	 * info and add the new device info.
 
 
 834	 */
 835	i = 0;
 836	nremoved = 0;
 837	nadded = 0;
 838	while (i < h->ndevices) {
 839		csd = h->dev[i];
 840		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
 841		if (device_change == DEVICE_NOT_FOUND) {
 842			changes++;
 843			hpsa_scsi_remove_entry(h, hostno, i,
 844				removed, &nremoved);
 845			continue; /* remove ^^^, hence i not incremented */
 846		} else if (device_change == DEVICE_CHANGED) {
 847			changes++;
 848			hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
 849				added, &nadded, removed, &nremoved);
 850			/* Set it to NULL to prevent it from being freed
 851			 * at the bottom of hpsa_update_scsi_devices()
 852			 */
 853			sd[entry] = NULL;
 
 
 854		}
 855		i++;
 856	}
 857
 858	/* Now, make sure every device listed in sd[] is also
 859	 * listed in h->dev[], adding them if they aren't found
 860	 */
 861
 862	for (i = 0; i < nsds; i++) {
 863		if (!sd[i]) /* if already added above. */
 864			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 865		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
 866					h->ndevices, &entry);
 867		if (device_change == DEVICE_NOT_FOUND) {
 868			changes++;
 869			if (hpsa_scsi_add_entry(h, hostno, sd[i],
 870				added, &nadded) != 0)
 871				break;
 872			sd[i] = NULL; /* prevent from being freed later. */
 873		} else if (device_change == DEVICE_CHANGED) {
 874			/* should never happen... */
 875			changes++;
 876			dev_warn(&h->pdev->dev,
 877				"device unexpectedly changed.\n");
 878			/* but if it does happen, we just ignore that device */
 879		}
 880	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 881	spin_unlock_irqrestore(&h->devlock, flags);
 882
 
 
 
 
 
 
 
 
 
 
 
 883	/* Don't notify scsi mid layer of any changes the first time through
 884	 * (or if there are no changes) scsi_scan_host will do it later the
 885	 * first time through.
 886	 */
 887	if (hostno == -1 || !changes)
 888		goto free_and_out;
 889
 890	sh = h->scsi_host;
 891	/* Notify scsi mid layer of any removed devices */
 892	for (i = 0; i < nremoved; i++) {
 893		struct scsi_device *sdev =
 894			scsi_device_lookup(sh, removed[i]->bus,
 895				removed[i]->target, removed[i]->lun);
 896		if (sdev != NULL) {
 897			scsi_remove_device(sdev);
 898			scsi_device_put(sdev);
 899		} else {
 900			/* We don't expect to get here.
 901			 * future cmds to this device will get selection
 902			 * timeout as if the device was gone.
 903			 */
 904			dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
 905				" for removal.", hostno, removed[i]->bus,
 906				removed[i]->target, removed[i]->lun);
 907		}
 908		kfree(removed[i]);
 909		removed[i] = NULL;
 910	}
 911
 912	/* Notify scsi mid layer of any added devices */
 913	for (i = 0; i < nadded; i++) {
 914		if (scsi_add_device(sh, added[i]->bus,
 915			added[i]->target, added[i]->lun) == 0)
 
 
 
 916			continue;
 917		dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
 918			"device not added.\n", hostno, added[i]->bus,
 919			added[i]->target, added[i]->lun);
 
 
 920		/* now we have to remove it from h->dev,
 921		 * since it didn't get added to scsi mid layer
 922		 */
 923		fixup_botched_add(h, added[i]);
 
 924	}
 925
 926free_and_out:
 927	kfree(added);
 928	kfree(removed);
 929}
 930
 931/*
 932 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
 933 * Assume's h->devlock is held.
 934 */
 935static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
 936	int bus, int target, int lun)
 937{
 938	int i;
 939	struct hpsa_scsi_dev_t *sd;
 940
 941	for (i = 0; i < h->ndevices; i++) {
 942		sd = h->dev[i];
 943		if (sd->bus == bus && sd->target == target && sd->lun == lun)
 944			return sd;
 945	}
 946	return NULL;
 947}
 948
 949/* link sdev->hostdata to our per-device structure. */
 950static int hpsa_slave_alloc(struct scsi_device *sdev)
 951{
 952	struct hpsa_scsi_dev_t *sd;
 953	unsigned long flags;
 954	struct ctlr_info *h;
 955
 956	h = sdev_to_hba(sdev);
 957	spin_lock_irqsave(&h->devlock, flags);
 958	sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
 959		sdev_id(sdev), sdev->lun);
 960	if (sd != NULL)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961		sdev->hostdata = sd;
 
 
 962	spin_unlock_irqrestore(&h->devlock, flags);
 963	return 0;
 964}
 965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966static void hpsa_slave_destroy(struct scsi_device *sdev)
 967{
 968	/* nothing to do. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969}
 970
 971static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
 972{
 973	int i;
 974
 975	if (!h->cmd_sg_list)
 976		return;
 977	for (i = 0; i < h->nr_cmds; i++) {
 978		kfree(h->cmd_sg_list[i]);
 979		h->cmd_sg_list[i] = NULL;
 980	}
 981	kfree(h->cmd_sg_list);
 982	h->cmd_sg_list = NULL;
 983}
 984
 985static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
 986{
 987	int i;
 988
 989	if (h->chainsize <= 0)
 990		return 0;
 991
 992	h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
 993				GFP_KERNEL);
 994	if (!h->cmd_sg_list)
 995		return -ENOMEM;
 
 996	for (i = 0; i < h->nr_cmds; i++) {
 997		h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
 998						h->chainsize, GFP_KERNEL);
 
 999		if (!h->cmd_sg_list[i])
1000			goto clean;
 
1001	}
1002	return 0;
1003
1004clean:
1005	hpsa_free_sg_chain_blocks(h);
1006	return -ENOMEM;
1007}
1008
1009static void hpsa_map_sg_chain_block(struct ctlr_info *h,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010	struct CommandList *c)
1011{
1012	struct SGDescriptor *chain_sg, *chain_block;
1013	u64 temp64;
 
1014
1015	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1016	chain_block = h->cmd_sg_list[c->cmdindex];
1017	chain_sg->Ext = HPSA_SG_CHAIN;
1018	chain_sg->Len = sizeof(*chain_sg) *
1019		(c->Header.SGTotal - h->max_cmd_sg_entries);
1020	temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1021				PCI_DMA_TODEVICE);
1022	chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1023	chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
 
 
 
 
 
 
1024}
1025
1026static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1027	struct CommandList *c)
1028{
1029	struct SGDescriptor *chain_sg;
1030	union u64bit temp64;
1031
1032	if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1033		return;
1034
1035	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1036	temp64.val32.lower = chain_sg->Addr.lower;
1037	temp64.val32.upper = chain_sg->Addr.upper;
1038	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039}
1040
1041static void complete_scsi_command(struct CommandList *cp)
1042{
1043	struct scsi_cmnd *cmd;
1044	struct ctlr_info *h;
1045	struct ErrorInfo *ei;
 
 
1046
1047	unsigned char sense_key;
1048	unsigned char asc;      /* additional sense code */
1049	unsigned char ascq;     /* additional sense code qualifier */
1050	unsigned long sense_data_size;
1051
1052	ei = cp->err_info;
1053	cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1054	h = cp->h;
1055
 
 
 
 
 
 
 
 
 
 
 
 
1056	scsi_dma_unmap(cmd); /* undo the DMA mappings */
1057	if (cp->Header.SGTotal > h->max_cmd_sg_entries)
 
1058		hpsa_unmap_sg_chain_block(h, cp);
1059
1060	cmd->result = (DID_OK << 16); 		/* host byte */
1061	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
1062	cmd->result |= ei->ScsiStatus;
1063
1064	/* copy the sense data whether we need to or not. */
1065	if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1066		sense_data_size = SCSI_SENSE_BUFFERSIZE;
1067	else
1068		sense_data_size = sizeof(ei->SenseInfo);
1069	if (ei->SenseLen < sense_data_size)
1070		sense_data_size = ei->SenseLen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1071
1072	memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1073	scsi_set_resid(cmd, ei->ResidualCnt);
 
 
1074
1075	if (ei->CommandStatus == 0) {
1076		cmd->scsi_done(cmd);
1077		cmd_free(h, cp);
1078		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079	}
1080
1081	/* an error has occurred */
1082	switch (ei->CommandStatus) {
1083
1084	case CMD_TARGET_STATUS:
1085		if (ei->ScsiStatus) {
1086			/* Get sense key */
1087			sense_key = 0xf & ei->SenseInfo[2];
1088			/* Get additional sense code */
1089			asc = ei->SenseInfo[12];
1090			/* Get addition sense code qualifier */
1091			ascq = ei->SenseInfo[13];
1092		}
1093
 
 
 
1094		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1095			if (check_for_unit_attention(h, cp)) {
1096				cmd->result = DID_SOFT_ERROR << 16;
 
 
 
 
 
1097				break;
1098			}
1099			if (sense_key == ILLEGAL_REQUEST) {
1100				/*
1101				 * SCSI REPORT_LUNS is commonly unsupported on
1102				 * Smart Array.  Suppress noisy complaint.
1103				 */
1104				if (cp->Request.CDB[0] == REPORT_LUNS)
1105					break;
1106
1107				/* If ASC/ASCQ indicate Logical Unit
1108				 * Not Supported condition,
1109				 */
1110				if ((asc == 0x25) && (ascq == 0x0)) {
1111					dev_warn(&h->pdev->dev, "cp %p "
1112						"has check condition\n", cp);
1113					break;
1114				}
1115			}
1116
1117			if (sense_key == NOT_READY) {
1118				/* If Sense is Not Ready, Logical Unit
1119				 * Not ready, Manual Intervention
1120				 * required
1121				 */
1122				if ((asc == 0x04) && (ascq == 0x03)) {
1123					dev_warn(&h->pdev->dev, "cp %p "
1124						"has check condition: unit "
1125						"not ready, manual "
1126						"intervention required\n", cp);
1127					break;
1128				}
1129			}
1130			if (sense_key == ABORTED_COMMAND) {
1131				/* Aborted command is retryable */
1132				dev_warn(&h->pdev->dev, "cp %p "
1133					"has check condition: aborted command: "
1134					"ASC: 0x%x, ASCQ: 0x%x\n",
1135					cp, asc, ascq);
1136				cmd->result = DID_SOFT_ERROR << 16;
1137				break;
1138			}
1139			/* Must be some other type of check condition */
1140			dev_warn(&h->pdev->dev, "cp %p has check condition: "
1141					"unknown type: "
1142					"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1143					"Returning result: 0x%x, "
1144					"cmd=[%02x %02x %02x %02x %02x "
1145					"%02x %02x %02x %02x %02x %02x "
1146					"%02x %02x %02x %02x %02x]\n",
1147					cp, sense_key, asc, ascq,
1148					cmd->result,
1149					cmd->cmnd[0], cmd->cmnd[1],
1150					cmd->cmnd[2], cmd->cmnd[3],
1151					cmd->cmnd[4], cmd->cmnd[5],
1152					cmd->cmnd[6], cmd->cmnd[7],
1153					cmd->cmnd[8], cmd->cmnd[9],
1154					cmd->cmnd[10], cmd->cmnd[11],
1155					cmd->cmnd[12], cmd->cmnd[13],
1156					cmd->cmnd[14], cmd->cmnd[15]);
1157			break;
1158		}
1159
1160
1161		/* Problem was not a check condition
1162		 * Pass it up to the upper layers...
1163		 */
1164		if (ei->ScsiStatus) {
1165			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1166				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1167				"Returning result: 0x%x\n",
1168				cp, ei->ScsiStatus,
1169				sense_key, asc, ascq,
1170				cmd->result);
1171		} else {  /* scsi status is zero??? How??? */
1172			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1173				"Returning no connection.\n", cp),
1174
1175			/* Ordinarily, this case should never happen,
1176			 * but there is a bug in some released firmware
1177			 * revisions that allows it to happen if, for
1178			 * example, a 4100 backplane loses power and
1179			 * the tape drive is in it.  We assume that
1180			 * it's a fatal error of some kind because we
1181			 * can't show that it wasn't. We will make it
1182			 * look like selection timeout since that is
1183			 * the most common reason for this to occur,
1184			 * and it's severe enough.
1185			 */
1186
1187			cmd->result = DID_NO_CONNECT << 16;
1188		}
1189		break;
1190
1191	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1192		break;
1193	case CMD_DATA_OVERRUN:
1194		dev_warn(&h->pdev->dev, "cp %p has"
1195			" completed with data overrun "
1196			"reported\n", cp);
1197		break;
1198	case CMD_INVALID: {
1199		/* print_bytes(cp, sizeof(*cp), 1, 0);
1200		print_cmd(cp); */
1201		/* We get CMD_INVALID if you address a non-existent device
1202		 * instead of a selection timeout (no response).  You will
1203		 * see this if you yank out a drive, then try to access it.
1204		 * This is kind of a shame because it means that any other
1205		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1206		 * missing target. */
1207		cmd->result = DID_NO_CONNECT << 16;
1208	}
1209		break;
1210	case CMD_PROTOCOL_ERR:
1211		dev_warn(&h->pdev->dev, "cp %p has "
1212			"protocol error \n", cp);
 
1213		break;
1214	case CMD_HARDWARE_ERR:
1215		cmd->result = DID_ERROR << 16;
1216		dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
 
1217		break;
1218	case CMD_CONNECTION_LOST:
1219		cmd->result = DID_ERROR << 16;
1220		dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
 
1221		break;
1222	case CMD_ABORTED:
1223		cmd->result = DID_ABORT << 16;
1224		dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1225				cp, ei->ScsiStatus);
1226		break;
1227	case CMD_ABORT_FAILED:
1228		cmd->result = DID_ERROR << 16;
1229		dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
 
1230		break;
1231	case CMD_UNSOLICITED_ABORT:
1232		cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1233		dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1234			"abort\n", cp);
1235		break;
1236	case CMD_TIMEOUT:
1237		cmd->result = DID_TIME_OUT << 16;
1238		dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
 
1239		break;
1240	case CMD_UNABORTABLE:
1241		cmd->result = DID_ERROR << 16;
1242		dev_warn(&h->pdev->dev, "Command unabortable\n");
1243		break;
 
 
 
 
 
 
 
 
 
 
 
 
1244	default:
1245		cmd->result = DID_ERROR << 16;
1246		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1247				cp, ei->CommandStatus);
1248	}
1249	cmd->scsi_done(cmd);
1250	cmd_free(h, cp);
1251}
1252
1253static int hpsa_scsi_detect(struct ctlr_info *h)
1254{
1255	struct Scsi_Host *sh;
1256	int error;
1257
1258	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1259	if (sh == NULL)
1260		goto fail;
1261
1262	sh->io_port = 0;
1263	sh->n_io_port = 0;
1264	sh->this_id = -1;
1265	sh->max_channel = 3;
1266	sh->max_cmd_len = MAX_COMMAND_SIZE;
1267	sh->max_lun = HPSA_MAX_LUN;
1268	sh->max_id = HPSA_MAX_LUN;
1269	sh->can_queue = h->nr_cmds;
1270	sh->cmd_per_lun = h->nr_cmds;
1271	sh->sg_tablesize = h->maxsgentries;
1272	h->scsi_host = sh;
1273	sh->hostdata[0] = (unsigned long) h;
1274	sh->irq = h->intr[h->intr_mode];
1275	sh->unique_id = sh->irq;
1276	error = scsi_add_host(sh, &h->pdev->dev);
1277	if (error)
1278		goto fail_host_put;
1279	scsi_scan_host(sh);
1280	return 0;
1281
1282 fail_host_put:
1283	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1284		" failed for controller %d\n", h->ctlr);
1285	scsi_host_put(sh);
1286	return error;
1287 fail:
1288	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1289		" failed for controller %d\n", h->ctlr);
1290	return -ENOMEM;
1291}
1292
1293static void hpsa_pci_unmap(struct pci_dev *pdev,
1294	struct CommandList *c, int sg_used, int data_direction)
1295{
1296	int i;
1297	union u64bit addr64;
1298
1299	for (i = 0; i < sg_used; i++) {
1300		addr64.val32.lower = c->SG[i].Addr.lower;
1301		addr64.val32.upper = c->SG[i].Addr.upper;
1302		pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1303			data_direction);
1304	}
1305}
1306
1307static void hpsa_map_one(struct pci_dev *pdev,
1308		struct CommandList *cp,
1309		unsigned char *buf,
1310		size_t buflen,
1311		int data_direction)
1312{
1313	u64 addr64;
1314
1315	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1316		cp->Header.SGList = 0;
1317		cp->Header.SGTotal = 0;
1318		return;
1319	}
1320
1321	addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1322	cp->SG[0].Addr.lower =
1323	  (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1324	cp->SG[0].Addr.upper =
1325	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1326	cp->SG[0].Len = buflen;
1327	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
1328	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
 
 
 
 
 
1329}
1330
1331static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1332	struct CommandList *c)
 
 
1333{
1334	DECLARE_COMPLETION_ONSTACK(wait);
1335
1336	c->waiting = &wait;
1337	enqueue_cmd_and_start_io(h, c);
1338	wait_for_completion(&wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339}
1340
1341static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1342	struct CommandList *c, int data_direction)
 
 
1343{
1344	int retry_count = 0;
 
1345
1346	do {
1347		memset(c->err_info, 0, sizeof(*c->err_info));
1348		hpsa_scsi_do_simple_cmd_core(h, c);
 
 
 
1349		retry_count++;
1350	} while (check_for_unit_attention(h, c) && retry_count <= 3);
 
 
 
 
 
 
 
1351	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
 
 
 
 
 
 
 
 
 
 
 
 
 
1352}
1353
1354static void hpsa_scsi_interpret_error(struct CommandList *cp)
 
1355{
1356	struct ErrorInfo *ei;
1357	struct device *d = &cp->h->pdev->dev;
 
 
1358
1359	ei = cp->err_info;
1360	switch (ei->CommandStatus) {
1361	case CMD_TARGET_STATUS:
1362		dev_warn(d, "cmd %p has completed with errors\n", cp);
1363		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1364				ei->ScsiStatus);
 
 
 
 
 
 
 
 
 
1365		if (ei->ScsiStatus == 0)
1366			dev_warn(d, "SCSI status is abnormally zero.  "
1367			"(probably indicates selection timeout "
1368			"reported incorrectly due to a known "
1369			"firmware bug, circa July, 2001.)\n");
1370		break;
1371	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1372			dev_info(d, "UNDERRUN\n");
1373		break;
1374	case CMD_DATA_OVERRUN:
1375		dev_warn(d, "cp %p has completed with data overrun\n", cp);
1376		break;
1377	case CMD_INVALID: {
1378		/* controller unfortunately reports SCSI passthru's
1379		 * to non-existent targets as invalid commands.
1380		 */
1381		dev_warn(d, "cp %p is reported invalid (probably means "
1382			"target device no longer present)\n", cp);
1383		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1384		print_cmd(cp);  */
1385		}
1386		break;
1387	case CMD_PROTOCOL_ERR:
1388		dev_warn(d, "cp %p has protocol error \n", cp);
1389		break;
1390	case CMD_HARDWARE_ERR:
1391		/* cmd->result = DID_ERROR << 16; */
1392		dev_warn(d, "cp %p had hardware error\n", cp);
1393		break;
1394	case CMD_CONNECTION_LOST:
1395		dev_warn(d, "cp %p had connection lost\n", cp);
1396		break;
1397	case CMD_ABORTED:
1398		dev_warn(d, "cp %p was aborted\n", cp);
1399		break;
1400	case CMD_ABORT_FAILED:
1401		dev_warn(d, "cp %p reports abort failed\n", cp);
1402		break;
1403	case CMD_UNSOLICITED_ABORT:
1404		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1405		break;
1406	case CMD_TIMEOUT:
1407		dev_warn(d, "cp %p timed out\n", cp);
1408		break;
1409	case CMD_UNABORTABLE:
1410		dev_warn(d, "Command unabortable\n");
 
 
 
1411		break;
1412	default:
1413		dev_warn(d, "cp %p returned unknown status %x\n", cp,
 
1414				ei->CommandStatus);
1415	}
1416}
1417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1418static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1419			unsigned char page, unsigned char *buf,
1420			unsigned char bufsize)
1421{
1422	int rc = IO_OK;
1423	struct CommandList *c;
1424	struct ErrorInfo *ei;
1425
1426	c = cmd_special_alloc(h);
1427
1428	if (c == NULL) {			/* trouble... */
1429		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1430		return -ENOMEM;
 
1431	}
1432
1433	fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1434	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
 
1435	ei = c->err_info;
1436	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1437		hpsa_scsi_interpret_error(c);
1438		rc = -1;
1439	}
1440	cmd_special_free(h, c);
 
1441	return rc;
1442}
1443
1444static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
 
1445{
1446	int rc = IO_OK;
1447	struct CommandList *c;
1448	struct ErrorInfo *ei;
1449
1450	c = cmd_special_alloc(h);
 
1451
1452	if (c == NULL) {			/* trouble... */
1453		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1454		return -ENOMEM;
 
 
 
1455	}
1456
1457	fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1458	hpsa_scsi_do_simple_cmd_core(h, c);
1459	/* no unmap needed here because no data xfer. */
1460
1461	ei = c->err_info;
1462	if (ei->CommandStatus != 0) {
1463		hpsa_scsi_interpret_error(c);
1464		rc = -1;
1465	}
1466	cmd_special_free(h, c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467	return rc;
1468}
1469
1470static void hpsa_get_raid_level(struct ctlr_info *h,
1471	unsigned char *scsi3addr, unsigned char *raid_level)
1472{
1473	int rc;
1474	unsigned char *buf;
1475
1476	*raid_level = RAID_UNKNOWN;
1477	buf = kzalloc(64, GFP_KERNEL);
1478	if (!buf)
1479		return;
1480	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
 
 
 
 
 
 
 
1481	if (rc == 0)
1482		*raid_level = buf[8];
1483	if (*raid_level > RAID_UNKNOWN)
1484		*raid_level = RAID_UNKNOWN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1485	kfree(buf);
1486	return;
1487}
1488
1489/* Get the device id from inquiry page 0x83 */
1490static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1491	unsigned char *device_id, int buflen)
1492{
1493	int rc;
1494	unsigned char *buf;
1495
1496	if (buflen > 16)
1497		buflen = 16;
 
 
1498	buf = kzalloc(64, GFP_KERNEL);
1499	if (!buf)
1500		return -1;
1501	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1502	if (rc == 0)
 
 
 
 
1503		memcpy(device_id, &buf[8], buflen);
 
 
1504	kfree(buf);
1505	return rc != 0;
 
1506}
1507
1508static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1509		struct ReportLUNdata *buf, int bufsize,
1510		int extended_response)
1511{
1512	int rc = IO_OK;
1513	struct CommandList *c;
1514	unsigned char scsi3addr[8];
1515	struct ErrorInfo *ei;
1516
1517	c = cmd_special_alloc(h);
1518	if (c == NULL) {			/* trouble... */
1519		dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1520		return -1;
1521	}
1522	/* address the controller */
1523	memset(scsi3addr, 0, sizeof(scsi3addr));
1524	fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1525		buf, bufsize, 0, scsi3addr, TYPE_CMD);
 
 
 
1526	if (extended_response)
1527		c->Request.CDB[1] = extended_response;
1528	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
 
 
 
1529	ei = c->err_info;
1530	if (ei->CommandStatus != 0 &&
1531	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
1532		hpsa_scsi_interpret_error(c);
1533		rc = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
1534	}
1535	cmd_special_free(h, c);
 
1536	return rc;
1537}
1538
1539static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1540		struct ReportLUNdata *buf,
1541		int bufsize, int extended_response)
1542{
1543	return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1544}
1545
1546static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1547		struct ReportLUNdata *buf, int bufsize)
1548{
1549	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1550}
1551
1552static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1553	int bus, int target, int lun)
1554{
1555	device->bus = bus;
1556	device->target = target;
1557	device->lun = lun;
1558}
1559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1560static int hpsa_update_device_info(struct ctlr_info *h,
1561	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1562	unsigned char *is_OBDR_device)
1563{
1564
1565#define OBDR_SIG_OFFSET 43
1566#define OBDR_TAPE_SIG "$DR-10"
1567#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1568#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1569
1570	unsigned char *inq_buff;
1571	unsigned char *obdr_sig;
 
1572
1573	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1574	if (!inq_buff)
 
1575		goto bail_out;
 
1576
1577	/* Do an inquiry to the device to see what it is. */
1578	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1579		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1580		/* Inquiry failed (msg printed already) */
1581		dev_err(&h->pdev->dev,
1582			"hpsa_update_device_info: inquiry failed\n");
 
 
1583		goto bail_out;
1584	}
1585
 
 
 
1586	this_device->devtype = (inq_buff[0] & 0x1f);
1587	memcpy(this_device->scsi3addr, scsi3addr, 8);
1588	memcpy(this_device->vendor, &inq_buff[8],
1589		sizeof(this_device->vendor));
1590	memcpy(this_device->model, &inq_buff[16],
1591		sizeof(this_device->model));
 
1592	memset(this_device->device_id, 0,
1593		sizeof(this_device->device_id));
1594	hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1595		sizeof(this_device->device_id));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596
1597	if (this_device->devtype == TYPE_DISK &&
1598		is_logical_dev_addr_mode(scsi3addr))
1599		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1600	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601		this_device->raid_level = RAID_UNKNOWN;
 
 
 
 
 
 
 
 
 
1602
1603	if (is_OBDR_device) {
1604		/* See if this is a One-Button-Disaster-Recovery device
1605		 * by looking for "$DR-10" at offset 43 in inquiry data.
1606		 */
1607		obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1608		*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1609					strncmp(obdr_sig, OBDR_TAPE_SIG,
1610						OBDR_SIG_LEN) == 0);
1611	}
1612
1613	kfree(inq_buff);
1614	return 0;
1615
1616bail_out:
1617	kfree(inq_buff);
1618	return 1;
1619}
1620
1621static unsigned char *msa2xxx_model[] = {
1622	"MSA2012",
1623	"MSA2024",
1624	"MSA2312",
1625	"MSA2324",
1626	"P2000 G3 SAS",
1627	NULL,
1628};
1629
1630static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1631{
1632	int i;
1633
1634	for (i = 0; msa2xxx_model[i]; i++)
1635		if (strncmp(device->model, msa2xxx_model[i],
1636			strlen(msa2xxx_model[i])) == 0)
1637			return 1;
1638	return 0;
1639}
1640
1641/* Helper function to assign bus, target, lun mapping of devices.
1642 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1643 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1644 * Logical drive target and lun are assigned at this time, but
1645 * physical device lun and target assignment are deferred (assigned
1646 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1647 */
1648static void figure_bus_target_lun(struct ctlr_info *h,
1649	u8 *lunaddrbytes, int *bus, int *target, int *lun,
1650	struct hpsa_scsi_dev_t *device)
1651{
1652	u32 lunid;
1653
1654	if (is_logical_dev_addr_mode(lunaddrbytes)) {
1655		/* logical device */
1656		if (unlikely(is_scsi_rev_5(h))) {
1657			/* p1210m, logical drives lun assignments
1658			 * match SCSI REPORT LUNS data.
1659			 */
1660			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1661			*bus = 0;
1662			*target = 0;
1663			*lun = (lunid & 0x3fff) + 1;
1664		} else {
1665			/* not p1210m... */
1666			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1667			if (is_msa2xxx(h, device)) {
1668				/* msa2xxx way, put logicals on bus 1
1669				 * and match target/lun numbers box
1670				 * reports.
1671				 */
1672				*bus = 1;
1673				*target = (lunid >> 16) & 0x3fff;
1674				*lun = lunid & 0x00ff;
1675			} else {
1676				/* Traditional smart array way. */
1677				*bus = 0;
1678				*lun = 0;
1679				*target = lunid & 0x3fff;
1680			}
1681		}
1682	} else {
1683		/* physical device */
1684		if (is_hba_lunid(lunaddrbytes))
1685			if (unlikely(is_scsi_rev_5(h))) {
1686				*bus = 0; /* put p1210m ctlr at 0,0,0 */
1687				*target = 0;
1688				*lun = 0;
1689				return;
1690			} else
1691				*bus = 3; /* traditional smartarray */
1692		else
1693			*bus = 2; /* physical disk */
1694		*target = -1;
1695		*lun = -1; /* we will fill these in later. */
1696	}
 
 
1697}
1698
1699/*
1700 * If there is no lun 0 on a target, linux won't find any devices.
1701 * For the MSA2xxx boxes, we have to manually detect the enclosure
1702 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1703 * it for some reason.  *tmpdevice is the target we're adding,
1704 * this_device is a pointer into the current element of currentsd[]
1705 * that we're building up in update_scsi_devices(), below.
1706 * lunzerobits is a bitmap that tracks which targets already have a
1707 * lun 0 assigned.
1708 * Returns 1 if an enclosure was added, 0 if not.
1709 */
1710static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1711	struct hpsa_scsi_dev_t *tmpdevice,
1712	struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1713	int bus, int target, int lun, unsigned long lunzerobits[],
1714	int *nmsa2xxx_enclosures)
1715{
1716	unsigned char scsi3addr[8];
 
 
 
1717
1718	if (test_bit(target, lunzerobits))
1719		return 0; /* There is already a lun 0 on this target. */
1720
1721	if (!is_logical_dev_addr_mode(lunaddrbytes))
1722		return 0; /* It's the logical targets that may lack lun 0. */
1723
1724	if (!is_msa2xxx(h, tmpdevice))
1725		return 0; /* It's only the MSA2xxx that have this problem. */
1726
1727	if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1728		return 0;
1729
1730	memset(scsi3addr, 0, 8);
1731	scsi3addr[3] = target;
1732	if (is_hba_lunid(scsi3addr))
1733		return 0; /* Don't add the RAID controller here. */
1734
1735	if (is_scsi_rev_5(h))
1736		return 0; /* p1210m doesn't need to do this. */
1737
1738#define MAX_MSA2XXX_ENCLOSURES 32
1739	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1740		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1741			"enclosures exceeded.  Check your hardware "
1742			"configuration.");
1743		return 0;
1744	}
1745
1746	if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1747		return 0;
1748	(*nmsa2xxx_enclosures)++;
1749	hpsa_set_bus_target_lun(this_device, bus, target, 0);
1750	set_bit(target, lunzerobits);
1751	return 1;
1752}
1753
1754/*
1755 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
1756 * logdev.  The number of luns in physdev and logdev are returned in
1757 * *nphysicals and *nlogicals, respectively.
1758 * Returns 0 on success, -1 otherwise.
1759 */
1760static int hpsa_gather_lun_info(struct ctlr_info *h,
1761	int reportlunsize,
1762	struct ReportLUNdata *physdev, u32 *nphysicals,
1763	struct ReportLUNdata *logdev, u32 *nlogicals)
1764{
1765	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1766		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1767		return -1;
1768	}
1769	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1770	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1771		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1772			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1773			*nphysicals - HPSA_MAX_PHYS_LUN);
1774		*nphysicals = HPSA_MAX_PHYS_LUN;
1775	}
1776	if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1777		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1778		return -1;
1779	}
1780	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1781	/* Reject Logicals in excess of our max capability. */
1782	if (*nlogicals > HPSA_MAX_LUN) {
1783		dev_warn(&h->pdev->dev,
1784			"maximum logical LUNs (%d) exceeded.  "
1785			"%d LUNs ignored.\n", HPSA_MAX_LUN,
1786			*nlogicals - HPSA_MAX_LUN);
1787			*nlogicals = HPSA_MAX_LUN;
1788	}
1789	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1790		dev_warn(&h->pdev->dev,
1791			"maximum logical + physical LUNs (%d) exceeded. "
1792			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1793			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1794		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1795	}
1796	return 0;
1797}
1798
1799u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1800	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
 
1801	struct ReportLUNdata *logdev_list)
1802{
1803	/* Helper function, figure out where the LUN ID info is coming from
1804	 * given index i, lists of physical and logical devices, where in
1805	 * the list the raid controller is supposed to appear (first or last)
1806	 */
1807
1808	int logicals_start = nphysicals + (raid_ctlr_position == 0);
1809	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1810
1811	if (i == raid_ctlr_position)
1812		return RAID_CTLR_LUNID;
1813
1814	if (i < logicals_start)
1815		return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
 
1816
1817	if (i < last_device)
1818		return &logdev_list->LUN[i - nphysicals -
1819			(raid_ctlr_position == 0)][0];
1820	BUG();
1821	return NULL;
1822}
1823
1824static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825{
1826	/* the idea here is we could get notified
1827	 * that some devices have changed, so we do a report
1828	 * physical luns and report logical luns cmd, and adjust
1829	 * our list of devices accordingly.
1830	 *
1831	 * The scsi3addr's of devices won't change so long as the
1832	 * adapter is not reset.  That means we can rescan and
1833	 * tell which devices we already know about, vs. new
1834	 * devices, vs.  disappearing devices.
1835	 */
1836	struct ReportLUNdata *physdev_list = NULL;
1837	struct ReportLUNdata *logdev_list = NULL;
 
 
1838	u32 nphysicals = 0;
1839	u32 nlogicals = 0;
 
1840	u32 ndev_allocated = 0;
1841	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1842	int ncurrent = 0;
1843	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1844	int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1845	int bus, target, lun;
1846	int raid_ctlr_position;
1847	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1848
1849	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1850		GFP_KERNEL);
1851	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1852	logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1853	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
 
 
1854
1855	if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
 
1856		dev_err(&h->pdev->dev, "out of memory\n");
1857		goto out;
1858	}
1859	memset(lunzerobits, 0, sizeof(lunzerobits));
1860
1861	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1862			logdev_list, &nlogicals))
 
 
 
1863		goto out;
 
1864
1865	/* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1866	 * but each of them 4 times through different paths.  The plus 1
1867	 * is for the RAID controller.
 
 
 
 
 
 
 
1868	 */
1869	ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
 
 
1870
1871	/* Allocate the per device structures */
1872	for (i = 0; i < ndevs_to_allocate; i++) {
 
 
 
 
 
 
 
1873		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1874		if (!currentsd[i]) {
1875			dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1876				__FILE__, __LINE__);
1877			goto out;
1878		}
1879		ndev_allocated++;
1880	}
1881
1882	if (unlikely(is_scsi_rev_5(h)))
1883		raid_ctlr_position = 0;
1884	else
1885		raid_ctlr_position = nphysicals + nlogicals;
1886
1887	/* adjust our table of devices */
1888	nmsa2xxx_enclosures = 0;
1889	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1890		u8 *lunaddrbytes, is_OBDR = 0;
 
 
 
 
 
 
 
1891
1892		/* Figure out where the LUN ID info is coming from */
1893		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1894			i, nphysicals, nlogicals, physdev_list, logdev_list);
1895		/* skip masked physical devices. */
1896		if (lunaddrbytes[3] & 0xC0 &&
1897			i < nphysicals + (raid_ctlr_position == 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1898			continue;
 
1899
1900		/* Get device type, vendor, model, device id */
1901		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1902							&is_OBDR))
1903			continue; /* skip it if we can't talk to it. */
1904		figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1905			tmpdevice);
1906		this_device = currentsd[ncurrent];
1907
 
 
 
1908		/*
1909		 * For the msa2xxx boxes, we have to insert a LUN 0 which
1910		 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1911		 * is nonetheless an enclosure device there.  We have to
1912		 * present that otherwise linux won't find anything if
1913		 * there is no lun 0.
1914		 */
1915		if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1916				lunaddrbytes, bus, target, lun, lunzerobits,
1917				&nmsa2xxx_enclosures)) {
1918			ncurrent++;
1919			this_device = currentsd[ncurrent];
1920		}
1921
1922		*this_device = *tmpdevice;
1923		hpsa_set_bus_target_lun(this_device, bus, target, lun);
 
 
 
1924
1925		switch (this_device->devtype) {
1926		case TYPE_ROM:
1927			/* We don't *really* support actual CD-ROM devices,
1928			 * just "One Button Disaster Recovery" tape drive
1929			 * which temporarily pretends to be a CD-ROM drive.
1930			 * So we check that the device is really an OBDR tape
1931			 * device by checking for "$DR-10" in bytes 43-48 of
1932			 * the inquiry data.
1933			 */
1934			if (is_OBDR)
1935				ncurrent++;
1936			break;
1937		case TYPE_DISK:
1938			if (i < nphysicals)
1939				break;
 
 
 
 
 
 
 
 
1940			ncurrent++;
1941			break;
1942		case TYPE_TAPE:
1943		case TYPE_MEDIUM_CHANGER:
1944			ncurrent++;
1945			break;
 
 
 
 
 
 
 
1946		case TYPE_RAID:
1947			/* Only present the Smartarray HBA as a RAID controller.
1948			 * If it's a RAID controller other than the HBA itself
1949			 * (an external RAID controller, MSA500 or similar)
1950			 * don't present it.
1951			 */
1952			if (!is_hba_lunid(lunaddrbytes))
1953				break;
1954			ncurrent++;
1955			break;
1956		default:
1957			break;
1958		}
1959		if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1960			break;
1961	}
1962	adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
 
 
 
 
 
 
 
 
 
 
 
 
1963out:
1964	kfree(tmpdevice);
1965	for (i = 0; i < ndev_allocated; i++)
1966		kfree(currentsd[i]);
1967	kfree(currentsd);
1968	kfree(physdev_list);
1969	kfree(logdev_list);
 
 
1970}
1971
1972/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
 
 
 
 
 
 
 
 
 
 
 
 
1973 * dma mapping  and fills in the scatter gather entries of the
1974 * hpsa command, cp.
1975 */
1976static int hpsa_scatter_gather(struct ctlr_info *h,
1977		struct CommandList *cp,
1978		struct scsi_cmnd *cmd)
1979{
1980	unsigned int len;
1981	struct scatterlist *sg;
1982	u64 addr64;
1983	int use_sg, i, sg_index, chained;
1984	struct SGDescriptor *curr_sg;
1985
1986	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1987
1988	use_sg = scsi_dma_map(cmd);
1989	if (use_sg < 0)
1990		return use_sg;
1991
1992	if (!use_sg)
1993		goto sglist_finished;
1994
 
 
 
 
 
 
 
1995	curr_sg = cp->SG;
1996	chained = 0;
1997	sg_index = 0;
1998	scsi_for_each_sg(cmd, sg, use_sg, i) {
1999		if (i == h->max_cmd_sg_entries - 1 &&
2000			use_sg > h->max_cmd_sg_entries) {
2001			chained = 1;
2002			curr_sg = h->cmd_sg_list[cp->cmdindex];
2003			sg_index = 0;
2004		}
2005		addr64 = (u64) sg_dma_address(sg);
2006		len  = sg_dma_len(sg);
2007		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2008		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2009		curr_sg->Len = len;
2010		curr_sg->Ext = 0;  /* we are not chaining */
2011		curr_sg++;
2012	}
2013
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2014	if (use_sg + chained > h->maxSG)
2015		h->maxSG = use_sg + chained;
2016
2017	if (chained) {
2018		cp->Header.SGList = h->max_cmd_sg_entries;
2019		cp->Header.SGTotal = (u16) (use_sg + 1);
2020		hpsa_map_sg_chain_block(h, cp);
 
 
 
2021		return 0;
2022	}
2023
2024sglist_finished:
2025
2026	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
2027	cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2028	return 0;
2029}
2030
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2031
2032static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2033	void (*done)(struct scsi_cmnd *))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034{
2035	struct ctlr_info *h;
2036	struct hpsa_scsi_dev_t *dev;
2037	unsigned char scsi3addr[8];
2038	struct CommandList *c;
2039	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2040
2041	/* Get the ptr to our adapter structure out of cmd->host. */
2042	h = sdev_to_hba(cmd->device);
2043	dev = cmd->device->hostdata;
2044	if (!dev) {
2045		cmd->result = DID_NO_CONNECT << 16;
2046		done(cmd);
2047		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2048	}
2049	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2050
2051	/* Need a lock as this is being allocated from the pool */
2052	spin_lock_irqsave(&h->lock, flags);
2053	c = cmd_alloc(h);
2054	spin_unlock_irqrestore(&h->lock, flags);
2055	if (c == NULL) {			/* trouble... */
2056		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2057		return SCSI_MLQUEUE_HOST_BUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2058	}
2059
2060	/* Fill in the command list header */
 
2061
2062	cmd->scsi_done = done;    /* save this for use by completion code */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063
2064	/* save c in case we have to abort it  */
 
 
 
 
 
 
 
 
2065	cmd->host_scribble = (unsigned char *) c;
2066
2067	c->cmd_type = CMD_SCSI;
2068	c->scsi_cmd = cmd;
2069	c->Header.ReplyQueue = 0;  /* unused in simple mode */
2070	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2071	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2072	c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
2073
2074	/* Fill in the request block... */
2075
2076	c->Request.Timeout = 0;
2077	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2078	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2079	c->Request.CDBLen = cmd->cmd_len;
2080	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2081	c->Request.Type.Type = TYPE_CMD;
2082	c->Request.Type.Attribute = ATTR_SIMPLE;
2083	switch (cmd->sc_data_direction) {
2084	case DMA_TO_DEVICE:
2085		c->Request.Type.Direction = XFER_WRITE;
 
2086		break;
2087	case DMA_FROM_DEVICE:
2088		c->Request.Type.Direction = XFER_READ;
 
2089		break;
2090	case DMA_NONE:
2091		c->Request.Type.Direction = XFER_NONE;
 
2092		break;
2093	case DMA_BIDIRECTIONAL:
2094		/* This can happen if a buggy application does a scsi passthru
2095		 * and sets both inlen and outlen to non-zero. ( see
2096		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2097		 */
2098
2099		c->Request.Type.Direction = XFER_RSVD;
 
2100		/* This is technically wrong, and hpsa controllers should
2101		 * reject it with CMD_INVALID, which is the most correct
2102		 * response, but non-fibre backends appear to let it
2103		 * slide by, and give the same results as if this field
2104		 * were set correctly.  Either way is acceptable for
2105		 * our purposes here.
2106		 */
2107
2108		break;
2109
2110	default:
2111		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2112			cmd->sc_data_direction);
2113		BUG();
2114		break;
2115	}
2116
2117	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2118		cmd_free(h, c);
 
 
 
 
 
2119		return SCSI_MLQUEUE_HOST_BUSY;
2120	}
 
 
 
2121	enqueue_cmd_and_start_io(h, c);
2122	/* the cmd'll come back via intr handler in complete_scsi_command()  */
2123	return 0;
2124}
2125
2126static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2127
2128static void hpsa_scan_start(struct Scsi_Host *sh)
2129{
2130	struct ctlr_info *h = shost_to_hba(sh);
2131	unsigned long flags;
2132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2133	/* wait until any scan already in progress is finished. */
2134	while (1) {
2135		spin_lock_irqsave(&h->scan_lock, flags);
2136		if (h->scan_finished)
2137			break;
 
2138		spin_unlock_irqrestore(&h->scan_lock, flags);
2139		wait_event(h->scan_wait_queue, h->scan_finished);
2140		/* Note: We don't need to worry about a race between this
2141		 * thread and driver unload because the midlayer will
2142		 * have incremented the reference count, so unload won't
2143		 * happen if we're in here.
2144		 */
2145	}
2146	h->scan_finished = 0; /* mark scan as in progress */
 
2147	spin_unlock_irqrestore(&h->scan_lock, flags);
2148
2149	hpsa_update_scsi_devices(h, h->scsi_host->host_no);
 
2150
2151	spin_lock_irqsave(&h->scan_lock, flags);
2152	h->scan_finished = 1; /* mark scan as finished. */
2153	wake_up_all(&h->scan_wait_queue);
2154	spin_unlock_irqrestore(&h->scan_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2155}
2156
2157static int hpsa_scan_finished(struct Scsi_Host *sh,
2158	unsigned long elapsed_time)
2159{
2160	struct ctlr_info *h = shost_to_hba(sh);
2161	unsigned long flags;
2162	int finished;
2163
2164	spin_lock_irqsave(&h->scan_lock, flags);
2165	finished = h->scan_finished;
2166	spin_unlock_irqrestore(&h->scan_lock, flags);
2167	return finished;
2168}
2169
2170static int hpsa_change_queue_depth(struct scsi_device *sdev,
2171	int qdepth, int reason)
2172{
2173	struct ctlr_info *h = sdev_to_hba(sdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2174
2175	if (reason != SCSI_QDEPTH_DEFAULT)
2176		return -ENOTSUPP;
 
 
 
 
 
2177
2178	if (qdepth < 1)
2179		qdepth = 1;
2180	else
2181		if (qdepth > h->nr_cmds)
2182			qdepth = h->nr_cmds;
2183	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2184	return sdev->queue_depth;
2185}
2186
2187static void hpsa_unregister_scsi(struct ctlr_info *h)
 
 
 
 
 
 
2188{
2189	/* we are being forcibly unloaded, and may not refuse. */
2190	scsi_remove_host(h->scsi_host);
2191	scsi_host_put(h->scsi_host);
2192	h->scsi_host = NULL;
 
 
 
2193}
2194
2195static int hpsa_register_scsi(struct ctlr_info *h)
 
 
 
 
 
 
2196{
2197	int rc;
2198
2199	rc = hpsa_scsi_detect(h);
2200	if (rc != 0)
2201		dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2202			" hpsa_scsi_detect(), rc is %d\n", rc);
2203	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2204}
2205
2206static int wait_for_device_to_become_ready(struct ctlr_info *h,
2207	unsigned char lunaddr[])
 
 
 
 
 
2208{
2209	int rc = 0;
2210	int count = 0;
2211	int waittime = 1; /* seconds */
2212	struct CommandList *c;
2213
2214	c = cmd_special_alloc(h);
2215	if (!c) {
2216		dev_warn(&h->pdev->dev, "out of memory in "
2217			"wait_for_device_to_become_ready.\n");
2218		return IO_ERROR;
2219	}
2220
2221	/* Send test unit ready until device ready, or give up. */
2222	while (count < HPSA_TUR_RETRY_LIMIT) {
2223
2224		/* Wait for a bit.  do this first, because if we send
 
2225		 * the TUR right away, the reset will just abort it.
2226		 */
2227		msleep(1000 * waittime);
2228		count++;
 
 
 
2229
2230		/* Increase wait time with each try, up to a point. */
2231		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2232			waittime = waittime * 2;
2233
2234		/* Send the Test Unit Ready */
2235		fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2236		hpsa_scsi_do_simple_cmd_core(h, c);
2237		/* no unmap needed here because no data xfer. */
2238
2239		if (c->err_info->CommandStatus == CMD_SUCCESS)
2240			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2241
2242		if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2243			c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2244			(c->err_info->SenseInfo[2] == NO_SENSE ||
2245			c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2246			break;
2247
2248		dev_warn(&h->pdev->dev, "waiting %d secs "
2249			"for device to become ready.\n", waittime);
2250		rc = 1; /* device not ready. */
2251	}
2252
2253	if (rc)
2254		dev_warn(&h->pdev->dev, "giving up on device.\n");
2255	else
2256		dev_warn(&h->pdev->dev, "device is ready.\n");
2257
2258	cmd_special_free(h, c);
2259	return rc;
2260}
2261
2262/* Need at least one of these error handlers to keep ../scsi/hosts.c from
2263 * complaining.  Doing a host- or bus-reset can't do anything good here.
2264 */
2265static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2266{
2267	int rc;
 
2268	struct ctlr_info *h;
2269	struct hpsa_scsi_dev_t *dev;
 
 
 
2270
2271	/* find the controller to which the command to be aborted was sent */
2272	h = sdev_to_hba(scsicmd->device);
2273	if (h == NULL) /* paranoia */
2274		return FAILED;
 
 
 
 
 
 
 
 
 
 
2275	dev = scsicmd->device->hostdata;
2276	if (!dev) {
2277		dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2278			"device lookup failed.\n");
2279		return FAILED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2280	}
2281	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2282		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2283	/* send a reset to the SCSI LUN which the command was sent to */
2284	rc = hpsa_send_reset(h, dev->scsi3addr);
2285	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2286		return SUCCESS;
 
 
2287
2288	dev_warn(&h->pdev->dev, "resetting device failed.\n");
2289	return FAILED;
 
 
 
 
 
 
 
 
 
 
2290}
2291
2292/*
2293 * For operations that cannot sleep, a command block is allocated at init,
2294 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2295 * which ones are free or in use.  Lock must be held when calling this.
2296 * cmd_free() is the complement.
 
2297 */
2298static struct CommandList *cmd_alloc(struct ctlr_info *h)
 
2299{
2300	struct CommandList *c;
2301	int i;
2302	union u64bit temp64;
2303	dma_addr_t cmd_dma_handle, err_dma_handle;
 
 
 
 
 
 
 
2304
2305	do {
2306		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2307		if (i == h->nr_cmds)
2308			return NULL;
2309	} while (test_and_set_bit
2310		 (i & (BITS_PER_LONG - 1),
2311		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2312	c = h->cmd_pool + i;
2313	memset(c, 0, sizeof(*c));
2314	cmd_dma_handle = h->cmd_pool_dhandle
2315	    + i * sizeof(*c);
2316	c->err_info = h->errinfo_pool + i;
2317	memset(c->err_info, 0, sizeof(*c->err_info));
2318	err_dma_handle = h->errinfo_pool_dhandle
2319	    + i * sizeof(*c->err_info);
2320	h->nr_allocs++;
2321
2322	c->cmdindex = i;
 
2323
2324	INIT_LIST_HEAD(&c->list);
2325	c->busaddr = (u32) cmd_dma_handle;
2326	temp64.val = (u64) err_dma_handle;
2327	c->ErrDesc.Addr.lower = temp64.val32.lower;
2328	c->ErrDesc.Addr.upper = temp64.val32.upper;
2329	c->ErrDesc.Len = sizeof(*c->err_info);
2330
2331	c->h = h;
2332	return c;
2333}
2334
2335/* For operations that can wait for kmalloc to possibly sleep,
2336 * this routine can be called. Lock need not be held to call
2337 * cmd_special_alloc. cmd_special_free() is the complement.
 
 
 
 
 
 
 
 
 
 
 
 
 
2338 */
2339static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
 
2340{
2341	struct CommandList *c;
2342	union u64bit temp64;
2343	dma_addr_t cmd_dma_handle, err_dma_handle;
2344
2345	c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2346	if (c == NULL)
2347		return NULL;
2348	memset(c, 0, sizeof(*c));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2349
2350	c->cmdindex = -1;
2351
2352	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2353		    &err_dma_handle);
2354
2355	if (c->err_info == NULL) {
2356		pci_free_consistent(h->pdev,
2357			sizeof(*c), c, cmd_dma_handle);
2358		return NULL;
 
 
 
 
 
 
 
 
2359	}
2360	memset(c->err_info, 0, sizeof(*c->err_info));
 
2361
2362	INIT_LIST_HEAD(&c->list);
2363	c->busaddr = (u32) cmd_dma_handle;
2364	temp64.val = (u64) err_dma_handle;
2365	c->ErrDesc.Addr.lower = temp64.val32.lower;
2366	c->ErrDesc.Addr.upper = temp64.val32.upper;
2367	c->ErrDesc.Len = sizeof(*c->err_info);
2368
2369	c->h = h;
2370	return c;
2371}
2372
 
 
 
 
 
 
2373static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2374{
2375	int i;
 
2376
2377	i = c - h->cmd_pool;
2378	clear_bit(i & (BITS_PER_LONG - 1),
2379		  h->cmd_pool_bits + (i / BITS_PER_LONG));
2380	h->nr_frees++;
2381}
2382
2383static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2384{
2385	union u64bit temp64;
2386
2387	temp64.val32.lower = c->ErrDesc.Addr.lower;
2388	temp64.val32.upper = c->ErrDesc.Addr.upper;
2389	pci_free_consistent(h->pdev, sizeof(*c->err_info),
2390			    c->err_info, (dma_addr_t) temp64.val);
2391	pci_free_consistent(h->pdev, sizeof(*c),
2392			    c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2393}
2394
2395#ifdef CONFIG_COMPAT
2396
2397static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
 
2398{
2399	IOCTL32_Command_struct __user *arg32 =
2400	    (IOCTL32_Command_struct __user *) arg;
2401	IOCTL_Command_struct arg64;
2402	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2403	int err;
2404	u32 cp;
2405
 
 
 
2406	memset(&arg64, 0, sizeof(arg64));
2407	err = 0;
2408	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2409			   sizeof(arg64.LUN_info));
2410	err |= copy_from_user(&arg64.Request, &arg32->Request,
2411			   sizeof(arg64.Request));
2412	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2413			   sizeof(arg64.error_info));
2414	err |= get_user(arg64.buf_size, &arg32->buf_size);
2415	err |= get_user(cp, &arg32->buf);
2416	arg64.buf = compat_ptr(cp);
2417	err |= copy_to_user(p, &arg64, sizeof(arg64));
2418
2419	if (err)
2420		return -EFAULT;
2421
2422	err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
 
 
 
2423	if (err)
2424		return err;
2425	err |= copy_in_user(&arg32->error_info, &p->error_info,
2426			 sizeof(arg32->error_info));
2427	if (err)
2428		return -EFAULT;
2429	return err;
2430}
2431
2432static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2433	int cmd, void *arg)
2434{
2435	BIG_IOCTL32_Command_struct __user *arg32 =
2436	    (BIG_IOCTL32_Command_struct __user *) arg;
2437	BIG_IOCTL_Command_struct arg64;
2438	BIG_IOCTL_Command_struct __user *p =
2439	    compat_alloc_user_space(sizeof(arg64));
2440	int err;
2441	u32 cp;
2442
 
 
2443	memset(&arg64, 0, sizeof(arg64));
2444	err = 0;
2445	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2446			   sizeof(arg64.LUN_info));
2447	err |= copy_from_user(&arg64.Request, &arg32->Request,
2448			   sizeof(arg64.Request));
2449	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2450			   sizeof(arg64.error_info));
2451	err |= get_user(arg64.buf_size, &arg32->buf_size);
2452	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2453	err |= get_user(cp, &arg32->buf);
2454	arg64.buf = compat_ptr(cp);
2455	err |= copy_to_user(p, &arg64, sizeof(arg64));
2456
2457	if (err)
2458		return -EFAULT;
2459
2460	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2461	if (err)
2462		return err;
2463	err |= copy_in_user(&arg32->error_info, &p->error_info,
2464			 sizeof(arg32->error_info));
2465	if (err)
2466		return -EFAULT;
2467	return err;
2468}
2469
2470static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
 
2471{
2472	switch (cmd) {
2473	case CCISS_GETPCIINFO:
2474	case CCISS_GETINTINFO:
2475	case CCISS_SETINTINFO:
2476	case CCISS_GETNODENAME:
2477	case CCISS_SETNODENAME:
2478	case CCISS_GETHEARTBEAT:
2479	case CCISS_GETBUSTYPES:
2480	case CCISS_GETFIRMVER:
2481	case CCISS_GETDRIVVER:
2482	case CCISS_REVALIDVOLS:
2483	case CCISS_DEREGDISK:
2484	case CCISS_REGNEWDISK:
2485	case CCISS_REGNEWD:
2486	case CCISS_RESCANDISK:
2487	case CCISS_GETLUNINFO:
2488		return hpsa_ioctl(dev, cmd, arg);
2489
2490	case CCISS_PASSTHRU32:
2491		return hpsa_ioctl32_passthru(dev, cmd, arg);
2492	case CCISS_BIG_PASSTHRU32:
2493		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2494
2495	default:
2496		return -ENOIOCTLCMD;
2497	}
2498}
2499#endif
2500
2501static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2502{
2503	struct hpsa_pci_info pciinfo;
2504
2505	if (!argp)
2506		return -EINVAL;
2507	pciinfo.domain = pci_domain_nr(h->pdev->bus);
2508	pciinfo.bus = h->pdev->bus->number;
2509	pciinfo.dev_fn = h->pdev->devfn;
2510	pciinfo.board_id = h->board_id;
2511	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2512		return -EFAULT;
2513	return 0;
2514}
2515
2516static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2517{
2518	DriverVer_type DriverVer;
2519	unsigned char vmaj, vmin, vsubmin;
2520	int rc;
2521
2522	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2523		&vmaj, &vmin, &vsubmin);
2524	if (rc != 3) {
2525		dev_info(&h->pdev->dev, "driver version string '%s' "
2526			"unrecognized.", HPSA_DRIVER_VERSION);
2527		vmaj = 0;
2528		vmin = 0;
2529		vsubmin = 0;
2530	}
2531	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2532	if (!argp)
2533		return -EINVAL;
2534	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2535		return -EFAULT;
2536	return 0;
2537}
2538
2539static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 
2540{
2541	IOCTL_Command_struct iocommand;
2542	struct CommandList *c;
2543	char *buff = NULL;
2544	union u64bit temp64;
 
2545
2546	if (!argp)
2547		return -EINVAL;
2548	if (!capable(CAP_SYS_RAWIO))
2549		return -EPERM;
2550	if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2551		return -EFAULT;
2552	if ((iocommand.buf_size < 1) &&
2553	    (iocommand.Request.Type.Direction != XFER_NONE)) {
2554		return -EINVAL;
2555	}
2556	if (iocommand.buf_size > 0) {
2557		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2558		if (buff == NULL)
2559			return -EFAULT;
2560		if (iocommand.Request.Type.Direction == XFER_WRITE) {
2561			/* Copy the data into the buffer we created */
2562			if (copy_from_user(buff, iocommand.buf,
2563				iocommand.buf_size)) {
2564				kfree(buff);
2565				return -EFAULT;
2566			}
2567		} else {
2568			memset(buff, 0, iocommand.buf_size);
2569		}
2570	}
2571	c = cmd_special_alloc(h);
2572	if (c == NULL) {
2573		kfree(buff);
2574		return -ENOMEM;
2575	}
2576	/* Fill in the command type */
2577	c->cmd_type = CMD_IOCTL_PEND;
 
2578	/* Fill in Command Header */
2579	c->Header.ReplyQueue = 0; /* unused in simple mode */
2580	if (iocommand.buf_size > 0) {	/* buffer to fill */
2581		c->Header.SGList = 1;
2582		c->Header.SGTotal = 1;
2583	} else	{ /* no buffers to fill */
2584		c->Header.SGList = 0;
2585		c->Header.SGTotal = 0;
2586	}
2587	memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2588	/* use the kernel address the cmd block for tag */
2589	c->Header.Tag.lower = c->busaddr;
2590
2591	/* Fill in Request block */
2592	memcpy(&c->Request, &iocommand.Request,
2593		sizeof(c->Request));
2594
2595	/* Fill in the scatter gather information */
2596	if (iocommand.buf_size > 0) {
2597		temp64.val = pci_map_single(h->pdev, buff,
2598			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2599		c->SG[0].Addr.lower = temp64.val32.lower;
2600		c->SG[0].Addr.upper = temp64.val32.upper;
2601		c->SG[0].Len = iocommand.buf_size;
2602		c->SG[0].Ext = 0; /* we are not chaining*/
2603	}
2604	hpsa_scsi_do_simple_cmd_core(h, c);
2605	if (iocommand.buf_size > 0)
2606		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
 
 
 
 
 
 
2607	check_ioctl_unit_attention(h, c);
 
 
 
 
2608
2609	/* Copy the error information out */
2610	memcpy(&iocommand.error_info, c->err_info,
2611		sizeof(iocommand.error_info));
2612	if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2613		kfree(buff);
2614		cmd_special_free(h, c);
2615		return -EFAULT;
2616	}
2617	if (iocommand.Request.Type.Direction == XFER_READ &&
2618		iocommand.buf_size > 0) {
2619		/* Copy the data out of the buffer we created */
2620		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2621			kfree(buff);
2622			cmd_special_free(h, c);
2623			return -EFAULT;
2624		}
2625	}
 
 
 
2626	kfree(buff);
2627	cmd_special_free(h, c);
2628	return 0;
2629}
2630
2631static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 
2632{
2633	BIG_IOCTL_Command_struct *ioc;
2634	struct CommandList *c;
2635	unsigned char **buff = NULL;
2636	int *buff_size = NULL;
2637	union u64bit temp64;
2638	BYTE sg_used = 0;
2639	int status = 0;
2640	int i;
2641	u32 left;
2642	u32 sz;
2643	BYTE __user *data_ptr;
2644
2645	if (!argp)
2646		return -EINVAL;
2647	if (!capable(CAP_SYS_RAWIO))
2648		return -EPERM;
2649	ioc = (BIG_IOCTL_Command_struct *)
2650	    kmalloc(sizeof(*ioc), GFP_KERNEL);
2651	if (!ioc) {
2652		status = -ENOMEM;
2653		goto cleanup1;
2654	}
2655	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2656		status = -EFAULT;
2657		goto cleanup1;
2658	}
2659	if ((ioc->buf_size < 1) &&
2660	    (ioc->Request.Type.Direction != XFER_NONE)) {
2661		status = -EINVAL;
2662		goto cleanup1;
2663	}
2664	/* Check kmalloc limits  using all SGs */
2665	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2666		status = -EINVAL;
2667		goto cleanup1;
2668	}
2669	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2670		status = -EINVAL;
2671		goto cleanup1;
2672	}
2673	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2674	if (!buff) {
2675		status = -ENOMEM;
2676		goto cleanup1;
2677	}
2678	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2679	if (!buff_size) {
2680		status = -ENOMEM;
2681		goto cleanup1;
2682	}
2683	left = ioc->buf_size;
2684	data_ptr = ioc->buf;
2685	while (left) {
2686		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2687		buff_size[sg_used] = sz;
2688		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2689		if (buff[sg_used] == NULL) {
2690			status = -ENOMEM;
2691			goto cleanup1;
2692		}
2693		if (ioc->Request.Type.Direction == XFER_WRITE) {
2694			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2695				status = -ENOMEM;
2696				goto cleanup1;
2697			}
2698		} else
2699			memset(buff[sg_used], 0, sz);
2700		left -= sz;
2701		data_ptr += sz;
2702		sg_used++;
2703	}
2704	c = cmd_special_alloc(h);
2705	if (c == NULL) {
2706		status = -ENOMEM;
2707		goto cleanup1;
2708	}
2709	c->cmd_type = CMD_IOCTL_PEND;
 
2710	c->Header.ReplyQueue = 0;
2711	c->Header.SGList = c->Header.SGTotal = sg_used;
 
2712	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2713	c->Header.Tag.lower = c->busaddr;
2714	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2715	if (ioc->buf_size > 0) {
2716		int i;
2717		for (i = 0; i < sg_used; i++) {
2718			temp64.val = pci_map_single(h->pdev, buff[i],
2719				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
2720			c->SG[i].Addr.lower = temp64.val32.lower;
2721			c->SG[i].Addr.upper = temp64.val32.upper;
2722			c->SG[i].Len = buff_size[i];
2723			/* we are not chaining */
2724			c->SG[i].Ext = 0;
 
 
 
 
 
 
 
2725		}
 
2726	}
2727	hpsa_scsi_do_simple_cmd_core(h, c);
 
2728	if (sg_used)
2729		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2730	check_ioctl_unit_attention(h, c);
 
 
 
 
 
2731	/* Copy the error information out */
2732	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2733	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2734		cmd_special_free(h, c);
2735		status = -EFAULT;
2736		goto cleanup1;
2737	}
2738	if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
2739		/* Copy the data out of the buffer we created */
2740		BYTE __user *ptr = ioc->buf;
2741		for (i = 0; i < sg_used; i++) {
2742			if (copy_to_user(ptr, buff[i], buff_size[i])) {
2743				cmd_special_free(h, c);
2744				status = -EFAULT;
2745				goto cleanup1;
2746			}
2747			ptr += buff_size[i];
2748		}
2749	}
2750	cmd_special_free(h, c);
2751	status = 0;
 
 
2752cleanup1:
2753	if (buff) {
 
 
2754		for (i = 0; i < sg_used; i++)
2755			kfree(buff[i]);
2756		kfree(buff);
2757	}
2758	kfree(buff_size);
2759	kfree(ioc);
2760	return status;
2761}
2762
2763static void check_ioctl_unit_attention(struct ctlr_info *h,
2764	struct CommandList *c)
2765{
2766	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2767			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2768		(void) check_for_unit_attention(h, c);
2769}
 
2770/*
2771 * ioctl
2772 */
2773static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
 
2774{
2775	struct ctlr_info *h;
2776	void __user *argp = (void __user *)arg;
2777
2778	h = sdev_to_hba(dev);
2779
2780	switch (cmd) {
2781	case CCISS_DEREGDISK:
2782	case CCISS_REGNEWDISK:
2783	case CCISS_REGNEWD:
2784		hpsa_scan_start(h->scsi_host);
2785		return 0;
2786	case CCISS_GETPCIINFO:
2787		return hpsa_getpciinfo_ioctl(h, argp);
2788	case CCISS_GETDRIVVER:
2789		return hpsa_getdrivver_ioctl(h, argp);
2790	case CCISS_PASSTHRU:
2791		return hpsa_passthru_ioctl(h, argp);
2792	case CCISS_BIG_PASSTHRU:
2793		return hpsa_big_passthru_ioctl(h, argp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2794	default:
2795		return -ENOTTY;
2796	}
2797}
2798
2799static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2800	unsigned char *scsi3addr, u8 reset_type)
2801{
2802	struct CommandList *c;
2803
2804	c = cmd_alloc(h);
2805	if (!c)
2806		return -ENOMEM;
2807	fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2808		RAID_CTLR_LUNID, TYPE_MSG);
2809	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2810	c->waiting = NULL;
2811	enqueue_cmd_and_start_io(h, c);
2812	/* Don't wait for completion, the reset won't complete.  Don't free
2813	 * the command either.  This is the last command we will send before
2814	 * re-initializing everything, so it doesn't matter and won't leak.
2815	 */
2816	return 0;
2817}
2818
2819static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2820	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2821	int cmd_type)
2822{
2823	int pci_dir = XFER_NONE;
2824
2825	c->cmd_type = CMD_IOCTL_PEND;
 
2826	c->Header.ReplyQueue = 0;
2827	if (buff != NULL && size > 0) {
2828		c->Header.SGList = 1;
2829		c->Header.SGTotal = 1;
2830	} else {
2831		c->Header.SGList = 0;
2832		c->Header.SGTotal = 0;
2833	}
2834	c->Header.Tag.lower = c->busaddr;
2835	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2836
2837	c->Request.Type.Type = cmd_type;
2838	if (cmd_type == TYPE_CMD) {
2839		switch (cmd) {
2840		case HPSA_INQUIRY:
2841			/* are we trying to read a vital product page */
2842			if (page_code != 0) {
2843				c->Request.CDB[1] = 0x01;
2844				c->Request.CDB[2] = page_code;
2845			}
2846			c->Request.CDBLen = 6;
2847			c->Request.Type.Attribute = ATTR_SIMPLE;
2848			c->Request.Type.Direction = XFER_READ;
2849			c->Request.Timeout = 0;
2850			c->Request.CDB[0] = HPSA_INQUIRY;
2851			c->Request.CDB[4] = size & 0xFF;
2852			break;
 
 
 
 
 
 
 
 
 
 
 
2853		case HPSA_REPORT_LOG:
2854		case HPSA_REPORT_PHYS:
2855			/* Talking to controller so It's a physical command
2856			   mode = 00 target = 0.  Nothing to write.
2857			 */
2858			c->Request.CDBLen = 12;
2859			c->Request.Type.Attribute = ATTR_SIMPLE;
2860			c->Request.Type.Direction = XFER_READ;
2861			c->Request.Timeout = 0;
2862			c->Request.CDB[0] = cmd;
2863			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2864			c->Request.CDB[7] = (size >> 16) & 0xFF;
2865			c->Request.CDB[8] = (size >> 8) & 0xFF;
2866			c->Request.CDB[9] = size & 0xFF;
2867			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2868		case HPSA_CACHE_FLUSH:
2869			c->Request.CDBLen = 12;
2870			c->Request.Type.Attribute = ATTR_SIMPLE;
2871			c->Request.Type.Direction = XFER_WRITE;
 
2872			c->Request.Timeout = 0;
2873			c->Request.CDB[0] = BMIC_WRITE;
2874			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
 
 
2875			break;
2876		case TEST_UNIT_READY:
2877			c->Request.CDBLen = 6;
2878			c->Request.Type.Attribute = ATTR_SIMPLE;
2879			c->Request.Type.Direction = XFER_NONE;
2880			c->Request.Timeout = 0;
2881			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2882		default:
2883			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2884			BUG();
2885			return;
2886		}
2887	} else if (cmd_type == TYPE_MSG) {
2888		switch (cmd) {
2889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2890		case  HPSA_DEVICE_RESET_MSG:
2891			c->Request.CDBLen = 16;
2892			c->Request.Type.Type =  1; /* It is a MSG not a CMD */
2893			c->Request.Type.Attribute = ATTR_SIMPLE;
2894			c->Request.Type.Direction = XFER_NONE;
2895			c->Request.Timeout = 0; /* Don't time out */
2896			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2897			c->Request.CDB[0] =  cmd;
2898			c->Request.CDB[1] = 0x03;  /* Reset target above */
2899			/* If bytes 4-7 are zero, it means reset the */
2900			/* LunID device */
2901			c->Request.CDB[4] = 0x00;
2902			c->Request.CDB[5] = 0x00;
2903			c->Request.CDB[6] = 0x00;
2904			c->Request.CDB[7] = 0x00;
2905		break;
2906
2907		default:
2908			dev_warn(&h->pdev->dev, "unknown message type %d\n",
2909				cmd);
2910			BUG();
2911		}
2912	} else {
2913		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2914		BUG();
2915	}
2916
2917	switch (c->Request.Type.Direction) {
2918	case XFER_READ:
2919		pci_dir = PCI_DMA_FROMDEVICE;
2920		break;
2921	case XFER_WRITE:
2922		pci_dir = PCI_DMA_TODEVICE;
2923		break;
2924	case XFER_NONE:
2925		pci_dir = PCI_DMA_NONE;
2926		break;
2927	default:
2928		pci_dir = PCI_DMA_BIDIRECTIONAL;
2929	}
2930
2931	hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2932
2933	return;
2934}
2935
2936/*
2937 * Map (physical) PCI mem into (virtual) kernel space
2938 */
2939static void __iomem *remap_pci_mem(ulong base, ulong size)
2940{
2941	ulong page_base = ((ulong) base) & PAGE_MASK;
2942	ulong page_offs = ((ulong) base) - page_base;
2943	void __iomem *page_remapped = ioremap(page_base, page_offs + size);
 
2944
2945	return page_remapped ? (page_remapped + page_offs) : NULL;
2946}
2947
2948/* Takes cmds off the submission queue and sends them to the hardware,
2949 * then puts them on the queue of cmds waiting for completion.
2950 */
2951static void start_io(struct ctlr_info *h)
2952{
2953	struct CommandList *c;
2954
2955	while (!list_empty(&h->reqQ)) {
2956		c = list_entry(h->reqQ.next, struct CommandList, list);
2957		/* can't do anything if fifo is full */
2958		if ((h->access.fifo_full(h))) {
2959			dev_warn(&h->pdev->dev, "fifo full\n");
2960			break;
2961		}
2962
2963		/* Get the first entry from the Request Q */
2964		removeQ(c);
2965		h->Qdepth--;
2966
2967		/* Tell the controller execute command */
2968		h->access.submit_command(h, c);
2969
2970		/* Put job onto the completed Q */
2971		addQ(&h->cmpQ, c);
2972	}
2973}
2974
2975static inline unsigned long get_next_completion(struct ctlr_info *h)
2976{
2977	return h->access.command_completed(h);
2978}
2979
2980static inline bool interrupt_pending(struct ctlr_info *h)
2981{
2982	return h->access.intr_pending(h);
2983}
2984
2985static inline long interrupt_not_for_us(struct ctlr_info *h)
2986{
2987	return (h->access.intr_pending(h) == 0) ||
2988		(h->interrupts_enabled == 0);
2989}
2990
2991static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2992	u32 raw_tag)
2993{
2994	if (unlikely(tag_index >= h->nr_cmds)) {
2995		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2996		return 1;
2997	}
2998	return 0;
2999}
3000
3001static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
3002{
3003	removeQ(c);
3004	if (likely(c->cmd_type == CMD_SCSI))
 
3005		complete_scsi_command(c);
3006	else if (c->cmd_type == CMD_IOCTL_PEND)
3007		complete(c->waiting);
3008}
3009
3010static inline u32 hpsa_tag_contains_index(u32 tag)
3011{
3012	return tag & DIRECT_LOOKUP_BIT;
3013}
3014
3015static inline u32 hpsa_tag_to_index(u32 tag)
3016{
3017	return tag >> DIRECT_LOOKUP_SHIFT;
3018}
3019
3020
3021static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
3022{
3023#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3024#define HPSA_SIMPLE_ERROR_BITS 0x03
3025	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
3026		return tag & ~HPSA_SIMPLE_ERROR_BITS;
3027	return tag & ~HPSA_PERF_ERROR_BITS;
3028}
3029
3030/* process completion of an indexed ("direct lookup") command */
3031static inline u32 process_indexed_cmd(struct ctlr_info *h,
3032	u32 raw_tag)
3033{
3034	u32 tag_index;
3035	struct CommandList *c;
3036
3037	tag_index = hpsa_tag_to_index(raw_tag);
3038	if (bad_tag(h, tag_index, raw_tag))
3039		return next_command(h);
3040	c = h->cmd_pool + tag_index;
3041	finish_cmd(c, raw_tag);
3042	return next_command(h);
3043}
3044
3045/* process completion of a non-indexed command */
3046static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
3047	u32 raw_tag)
3048{
3049	u32 tag;
3050	struct CommandList *c = NULL;
3051
3052	tag = hpsa_tag_discard_error_bits(h, raw_tag);
3053	list_for_each_entry(c, &h->cmpQ, list) {
3054		if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3055			finish_cmd(c, raw_tag);
3056			return next_command(h);
3057		}
3058	}
3059	bad_tag(h, h->nr_cmds + 1, raw_tag);
3060	return next_command(h);
3061}
3062
3063/* Some controllers, like p400, will give us one interrupt
3064 * after a soft reset, even if we turned interrupts off.
3065 * Only need to check for this in the hpsa_xxx_discard_completions
3066 * functions.
3067 */
3068static int ignore_bogus_interrupt(struct ctlr_info *h)
3069{
3070	if (likely(!reset_devices))
3071		return 0;
3072
3073	if (likely(h->interrupts_enabled))
3074		return 0;
3075
3076	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3077		"(known firmware bug.)  Ignoring.\n");
3078
3079	return 1;
3080}
3081
3082static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
 
 
 
 
 
 
 
 
 
 
3083{
3084	struct ctlr_info *h = dev_id;
3085	unsigned long flags;
3086	u32 raw_tag;
3087
3088	if (ignore_bogus_interrupt(h))
3089		return IRQ_NONE;
3090
3091	if (interrupt_not_for_us(h))
3092		return IRQ_NONE;
3093	spin_lock_irqsave(&h->lock, flags);
3094	while (interrupt_pending(h)) {
3095		raw_tag = get_next_completion(h);
3096		while (raw_tag != FIFO_EMPTY)
3097			raw_tag = next_command(h);
3098	}
3099	spin_unlock_irqrestore(&h->lock, flags);
3100	return IRQ_HANDLED;
3101}
3102
3103static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3104{
3105	struct ctlr_info *h = dev_id;
3106	unsigned long flags;
3107	u32 raw_tag;
 
3108
3109	if (ignore_bogus_interrupt(h))
3110		return IRQ_NONE;
3111
3112	spin_lock_irqsave(&h->lock, flags);
3113	raw_tag = get_next_completion(h);
3114	while (raw_tag != FIFO_EMPTY)
3115		raw_tag = next_command(h);
3116	spin_unlock_irqrestore(&h->lock, flags);
3117	return IRQ_HANDLED;
3118}
3119
3120static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
3121{
3122	struct ctlr_info *h = dev_id;
3123	unsigned long flags;
3124	u32 raw_tag;
 
3125
3126	if (interrupt_not_for_us(h))
3127		return IRQ_NONE;
3128	spin_lock_irqsave(&h->lock, flags);
3129	while (interrupt_pending(h)) {
3130		raw_tag = get_next_completion(h);
3131		while (raw_tag != FIFO_EMPTY) {
3132			if (hpsa_tag_contains_index(raw_tag))
3133				raw_tag = process_indexed_cmd(h, raw_tag);
3134			else
3135				raw_tag = process_nonindexed_cmd(h, raw_tag);
3136		}
3137	}
3138	spin_unlock_irqrestore(&h->lock, flags);
3139	return IRQ_HANDLED;
3140}
3141
3142static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
3143{
3144	struct ctlr_info *h = dev_id;
3145	unsigned long flags;
3146	u32 raw_tag;
 
3147
3148	spin_lock_irqsave(&h->lock, flags);
3149	raw_tag = get_next_completion(h);
3150	while (raw_tag != FIFO_EMPTY) {
3151		if (hpsa_tag_contains_index(raw_tag))
3152			raw_tag = process_indexed_cmd(h, raw_tag);
3153		else
3154			raw_tag = process_nonindexed_cmd(h, raw_tag);
3155	}
3156	spin_unlock_irqrestore(&h->lock, flags);
3157	return IRQ_HANDLED;
3158}
3159
3160/* Send a message CDB to the firmware. Careful, this only works
3161 * in simple mode, not performant mode due to the tag lookup.
3162 * We only ever use this immediately after a controller reset.
3163 */
3164static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3165						unsigned char type)
3166{
3167	struct Command {
3168		struct CommandListHeader CommandHeader;
3169		struct RequestBlock Request;
3170		struct ErrDescriptor ErrorDescriptor;
3171	};
3172	struct Command *cmd;
3173	static const size_t cmd_sz = sizeof(*cmd) +
3174					sizeof(cmd->ErrorDescriptor);
3175	dma_addr_t paddr64;
3176	uint32_t paddr32, tag;
 
3177	void __iomem *vaddr;
3178	int i, err;
3179
3180	vaddr = pci_ioremap_bar(pdev, 0);
3181	if (vaddr == NULL)
3182		return -ENOMEM;
3183
3184	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
3185	 * CCISS commands, so they must be allocated from the lower 4GiB of
3186	 * memory.
3187	 */
3188	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3189	if (err) {
3190		iounmap(vaddr);
3191		return -ENOMEM;
3192	}
3193
3194	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3195	if (cmd == NULL) {
3196		iounmap(vaddr);
3197		return -ENOMEM;
3198	}
3199
3200	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
3201	 * although there's no guarantee, we assume that the address is at
3202	 * least 4-byte aligned (most likely, it's page-aligned).
3203	 */
3204	paddr32 = paddr64;
3205
3206	cmd->CommandHeader.ReplyQueue = 0;
3207	cmd->CommandHeader.SGList = 0;
3208	cmd->CommandHeader.SGTotal = 0;
3209	cmd->CommandHeader.Tag.lower = paddr32;
3210	cmd->CommandHeader.Tag.upper = 0;
3211	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3212
3213	cmd->Request.CDBLen = 16;
3214	cmd->Request.Type.Type = TYPE_MSG;
3215	cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3216	cmd->Request.Type.Direction = XFER_NONE;
3217	cmd->Request.Timeout = 0; /* Don't time out */
3218	cmd->Request.CDB[0] = opcode;
3219	cmd->Request.CDB[1] = type;
3220	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3221	cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3222	cmd->ErrorDescriptor.Addr.upper = 0;
3223	cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3224
3225	writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3226
3227	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3228		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3229		if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3230			break;
3231		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3232	}
3233
3234	iounmap(vaddr);
3235
3236	/* we leak the DMA buffer here ... no choice since the controller could
3237	 *  still complete the command.
3238	 */
3239	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3240		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3241			opcode, type);
3242		return -ETIMEDOUT;
3243	}
3244
3245	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3246
3247	if (tag & HPSA_ERROR_BIT) {
3248		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3249			opcode, type);
3250		return -EIO;
3251	}
3252
3253	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3254		opcode, type);
3255	return 0;
3256}
3257
3258#define hpsa_noop(p) hpsa_message(p, 3, 0)
3259
3260static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3261	void * __iomem vaddr, u32 use_doorbell)
3262{
3263	u16 pmcsr;
3264	int pos;
3265
3266	if (use_doorbell) {
3267		/* For everything after the P600, the PCI power state method
3268		 * of resetting the controller doesn't work, so we have this
3269		 * other way using the doorbell register.
3270		 */
3271		dev_info(&pdev->dev, "using doorbell to reset controller\n");
3272		writel(use_doorbell, vaddr + SA5_DOORBELL);
 
 
 
 
 
 
 
3273	} else { /* Try to do it the PCI power state way */
3274
3275		/* Quoting from the Open CISS Specification: "The Power
3276		 * Management Control/Status Register (CSR) controls the power
3277		 * state of the device.  The normal operating state is D0,
3278		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
3279		 * the controller, place the interface device in D3 then to D0,
3280		 * this causes a secondary PCI reset which will reset the
3281		 * controller." */
3282
3283		pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3284		if (pos == 0) {
3285			dev_err(&pdev->dev,
3286				"hpsa_reset_controller: "
3287				"PCI PM not supported\n");
3288			return -ENODEV;
3289		}
3290		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
 
3291		/* enter the D3hot power management state */
3292		pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3293		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3294		pmcsr |= PCI_D3hot;
3295		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3296
3297		msleep(500);
3298
3299		/* enter the D0 power management state */
3300		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3301		pmcsr |= PCI_D0;
3302		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
 
 
 
 
 
 
 
3303	}
3304	return 0;
3305}
3306
3307static __devinit void init_driver_version(char *driver_version, int len)
3308{
3309	memset(driver_version, 0, len);
3310	strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3311}
3312
3313static __devinit int write_driver_ver_to_cfgtable(
3314	struct CfgTable __iomem *cfgtable)
3315{
3316	char *driver_version;
3317	int i, size = sizeof(cfgtable->driver_version);
3318
3319	driver_version = kmalloc(size, GFP_KERNEL);
3320	if (!driver_version)
3321		return -ENOMEM;
3322
3323	init_driver_version(driver_version, size);
3324	for (i = 0; i < size; i++)
3325		writeb(driver_version[i], &cfgtable->driver_version[i]);
3326	kfree(driver_version);
3327	return 0;
3328}
3329
3330static __devinit void read_driver_ver_from_cfgtable(
3331	struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3332{
3333	int i;
3334
3335	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3336		driver_ver[i] = readb(&cfgtable->driver_version[i]);
3337}
3338
3339static __devinit int controller_reset_failed(
3340	struct CfgTable __iomem *cfgtable)
3341{
3342
3343	char *driver_ver, *old_driver_ver;
3344	int rc, size = sizeof(cfgtable->driver_version);
3345
3346	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3347	if (!old_driver_ver)
3348		return -ENOMEM;
3349	driver_ver = old_driver_ver + size;
3350
3351	/* After a reset, the 32 bytes of "driver version" in the cfgtable
3352	 * should have been changed, otherwise we know the reset failed.
3353	 */
3354	init_driver_version(old_driver_ver, size);
3355	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3356	rc = !memcmp(driver_ver, old_driver_ver, size);
3357	kfree(old_driver_ver);
3358	return rc;
3359}
3360/* This does a hard reset of the controller using PCI power management
3361 * states or the using the doorbell register.
3362 */
3363static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3364{
3365	u64 cfg_offset;
3366	u32 cfg_base_addr;
3367	u64 cfg_base_addr_index;
3368	void __iomem *vaddr;
3369	unsigned long paddr;
3370	u32 misc_fw_support;
3371	int rc;
3372	struct CfgTable __iomem *cfgtable;
3373	u32 use_doorbell;
3374	u32 board_id;
3375	u16 command_register;
3376
3377	/* For controllers as old as the P600, this is very nearly
3378	 * the same thing as
3379	 *
3380	 * pci_save_state(pci_dev);
3381	 * pci_set_power_state(pci_dev, PCI_D3hot);
3382	 * pci_set_power_state(pci_dev, PCI_D0);
3383	 * pci_restore_state(pci_dev);
3384	 *
3385	 * For controllers newer than the P600, the pci power state
3386	 * method of resetting doesn't work so we have another way
3387	 * using the doorbell register.
3388	 */
3389
3390	rc = hpsa_lookup_board_id(pdev, &board_id);
3391	if (rc < 0 || !ctlr_is_resettable(board_id)) {
3392		dev_warn(&pdev->dev, "Not resetting device.\n");
3393		return -ENODEV;
3394	}
3395
3396	/* if controller is soft- but not hard resettable... */
3397	if (!ctlr_is_hard_resettable(board_id))
3398		return -ENOTSUPP; /* try soft reset later. */
3399
3400	/* Save the PCI command register */
3401	pci_read_config_word(pdev, 4, &command_register);
3402	/* Turn the board off.  This is so that later pci_restore_state()
3403	 * won't turn the board on before the rest of config space is ready.
3404	 */
3405	pci_disable_device(pdev);
3406	pci_save_state(pdev);
3407
3408	/* find the first memory BAR, so we can find the cfg table */
3409	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3410	if (rc)
3411		return rc;
3412	vaddr = remap_pci_mem(paddr, 0x250);
3413	if (!vaddr)
3414		return -ENOMEM;
3415
3416	/* find cfgtable in order to check if reset via doorbell is supported */
3417	rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3418					&cfg_base_addr_index, &cfg_offset);
3419	if (rc)
3420		goto unmap_vaddr;
3421	cfgtable = remap_pci_mem(pci_resource_start(pdev,
3422		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3423	if (!cfgtable) {
3424		rc = -ENOMEM;
3425		goto unmap_vaddr;
3426	}
3427	rc = write_driver_ver_to_cfgtable(cfgtable);
3428	if (rc)
3429		goto unmap_vaddr;
3430
3431	/* If reset via doorbell register is supported, use that.
3432	 * There are two such methods.  Favor the newest method.
3433	 */
3434	misc_fw_support = readl(&cfgtable->misc_fw_support);
3435	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3436	if (use_doorbell) {
3437		use_doorbell = DOORBELL_CTLR_RESET2;
3438	} else {
3439		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3440		if (use_doorbell) {
3441			dev_warn(&pdev->dev, "Controller claims that "
3442				"'Bit 2 doorbell reset' is "
3443				"supported, but not 'bit 5 doorbell reset'.  "
3444				"Firmware update is recommended.\n");
3445			rc = -ENOTSUPP; /* try soft reset */
3446			goto unmap_cfgtable;
3447		}
3448	}
3449
3450	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3451	if (rc)
3452		goto unmap_cfgtable;
3453
3454	pci_restore_state(pdev);
3455	rc = pci_enable_device(pdev);
3456	if (rc) {
3457		dev_warn(&pdev->dev, "failed to enable device.\n");
3458		goto unmap_cfgtable;
3459	}
3460	pci_write_config_word(pdev, 4, command_register);
3461
3462	/* Some devices (notably the HP Smart Array 5i Controller)
3463	   need a little pause here */
3464	msleep(HPSA_POST_RESET_PAUSE_MSECS);
3465
3466	/* Wait for board to become not ready, then ready. */
3467	dev_info(&pdev->dev, "Waiting for board to reset.\n");
3468	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3469	if (rc) {
3470		dev_warn(&pdev->dev,
3471			"failed waiting for board to reset."
3472			" Will try soft reset.\n");
3473		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3474		goto unmap_cfgtable;
3475	}
3476	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3477	if (rc) {
3478		dev_warn(&pdev->dev,
3479			"failed waiting for board to become ready "
3480			"after hard reset\n");
3481		goto unmap_cfgtable;
3482	}
3483
3484	rc = controller_reset_failed(vaddr);
3485	if (rc < 0)
3486		goto unmap_cfgtable;
3487	if (rc) {
3488		dev_warn(&pdev->dev, "Unable to successfully reset "
3489			"controller. Will try soft reset.\n");
3490		rc = -ENOTSUPP;
3491	} else {
3492		dev_info(&pdev->dev, "board ready after hard reset.\n");
3493	}
3494
3495unmap_cfgtable:
3496	iounmap(cfgtable);
3497
3498unmap_vaddr:
3499	iounmap(vaddr);
3500	return rc;
3501}
3502
3503/*
3504 *  We cannot read the structure directly, for portability we must use
3505 *   the io functions.
3506 *   This is for debug only.
3507 */
3508static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3509{
3510#ifdef HPSA_DEBUG
3511	int i;
3512	char temp_name[17];
3513
3514	dev_info(dev, "Controller Configuration information\n");
3515	dev_info(dev, "------------------------------------\n");
3516	for (i = 0; i < 4; i++)
3517		temp_name[i] = readb(&(tb->Signature[i]));
3518	temp_name[4] = '\0';
3519	dev_info(dev, "   Signature = %s\n", temp_name);
3520	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
3521	dev_info(dev, "   Transport methods supported = 0x%x\n",
3522	       readl(&(tb->TransportSupport)));
3523	dev_info(dev, "   Transport methods active = 0x%x\n",
3524	       readl(&(tb->TransportActive)));
3525	dev_info(dev, "   Requested transport Method = 0x%x\n",
3526	       readl(&(tb->HostWrite.TransportRequest)));
3527	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
3528	       readl(&(tb->HostWrite.CoalIntDelay)));
3529	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
3530	       readl(&(tb->HostWrite.CoalIntCount)));
3531	dev_info(dev, "   Max outstanding commands = 0x%d\n",
3532	       readl(&(tb->CmdsOutMax)));
3533	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3534	for (i = 0; i < 16; i++)
3535		temp_name[i] = readb(&(tb->ServerName[i]));
3536	temp_name[16] = '\0';
3537	dev_info(dev, "   Server Name = %s\n", temp_name);
3538	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
3539		readl(&(tb->HeartBeat)));
3540#endif				/* HPSA_DEBUG */
3541}
3542
3543static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3544{
3545	int i, offset, mem_type, bar_type;
3546
3547	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
3548		return 0;
3549	offset = 0;
3550	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3551		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3552		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3553			offset += 4;
3554		else {
3555			mem_type = pci_resource_flags(pdev, i) &
3556			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3557			switch (mem_type) {
3558			case PCI_BASE_ADDRESS_MEM_TYPE_32:
3559			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3560				offset += 4;	/* 32 bit */
3561				break;
3562			case PCI_BASE_ADDRESS_MEM_TYPE_64:
3563				offset += 8;
3564				break;
3565			default:	/* reserved in PCI 2.2 */
3566				dev_warn(&pdev->dev,
3567				       "base address is invalid\n");
3568				return -1;
3569				break;
3570			}
3571		}
3572		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3573			return i + 1;
3574	}
3575	return -1;
3576}
3577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3578/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3579 * controllers that are capable. If not, we use IO-APIC mode.
3580 */
3581
3582static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3583{
3584#ifdef CONFIG_PCI_MSI
3585	int err;
3586	struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3587	{0, 2}, {0, 3}
3588	};
3589
3590	/* Some boards advertise MSI but don't really support it */
3591	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3592	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3593		goto default_int_mode;
3594	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3595		dev_info(&h->pdev->dev, "MSIX\n");
3596		err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3597		if (!err) {
3598			h->intr[0] = hpsa_msix_entries[0].vector;
3599			h->intr[1] = hpsa_msix_entries[1].vector;
3600			h->intr[2] = hpsa_msix_entries[2].vector;
3601			h->intr[3] = hpsa_msix_entries[3].vector;
3602			h->msix_vector = 1;
3603			return;
3604		}
3605		if (err > 0) {
3606			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3607			       "available\n", err);
3608			goto default_int_mode;
3609		} else {
3610			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3611			       err);
3612			goto default_int_mode;
3613		}
 
 
 
3614	}
3615	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3616		dev_info(&h->pdev->dev, "MSI\n");
3617		if (!pci_enable_msi(h->pdev))
3618			h->msi_vector = 1;
3619		else
3620			dev_warn(&h->pdev->dev, "MSI init failed\n");
3621	}
3622default_int_mode:
3623#endif				/* CONFIG_PCI_MSI */
3624	/* if we get here we're going to use the default interrupt mode */
3625	h->intr[h->intr_mode] = h->pdev->irq;
3626}
3627
3628static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
 
3629{
3630	int i;
3631	u32 subsystem_vendor_id, subsystem_device_id;
3632
3633	subsystem_vendor_id = pdev->subsystem_vendor;
3634	subsystem_device_id = pdev->subsystem_device;
3635	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3636		    subsystem_vendor_id;
3637
 
 
3638	for (i = 0; i < ARRAY_SIZE(products); i++)
3639		if (*board_id == products[i].board_id)
 
 
 
 
 
 
 
 
3640			return i;
 
3641
3642	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3643		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3644		!hpsa_allow_any) {
3645		dev_warn(&pdev->dev, "unrecognized board ID: "
3646			"0x%08x, ignoring.\n", *board_id);
3647			return -ENODEV;
3648	}
3649	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3650}
3651
3652static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3653{
3654	u16 command;
3655
3656	(void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3657	return ((command & PCI_COMMAND_MEMORY) == 0);
3658}
3659
3660static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3661	unsigned long *memory_bar)
3662{
3663	int i;
3664
3665	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3666		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3667			/* addressing mode bits already removed */
3668			*memory_bar = pci_resource_start(pdev, i);
3669			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3670				*memory_bar);
3671			return 0;
3672		}
3673	dev_warn(&pdev->dev, "no memory BAR found\n");
3674	return -ENODEV;
3675}
3676
3677static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3678	void __iomem *vaddr, int wait_for_ready)
3679{
3680	int i, iterations;
3681	u32 scratchpad;
3682	if (wait_for_ready)
3683		iterations = HPSA_BOARD_READY_ITERATIONS;
3684	else
3685		iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
3686
3687	for (i = 0; i < iterations; i++) {
3688		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3689		if (wait_for_ready) {
3690			if (scratchpad == HPSA_FIRMWARE_READY)
3691				return 0;
3692		} else {
3693			if (scratchpad != HPSA_FIRMWARE_READY)
3694				return 0;
3695		}
3696		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3697	}
3698	dev_warn(&pdev->dev, "board not ready, timed out.\n");
3699	return -ENODEV;
3700}
3701
3702static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3703	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3704	u64 *cfg_offset)
3705{
3706	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3707	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3708	*cfg_base_addr &= (u32) 0x0000ffff;
3709	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3710	if (*cfg_base_addr_index == -1) {
3711		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3712		return -ENODEV;
3713	}
3714	return 0;
3715}
3716
3717static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3718{
3719	u64 cfg_offset;
3720	u32 cfg_base_addr;
3721	u64 cfg_base_addr_index;
3722	u32 trans_offset;
3723	int rc;
3724
3725	rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3726		&cfg_base_addr_index, &cfg_offset);
3727	if (rc)
3728		return rc;
3729	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3730		       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3731	if (!h->cfgtable)
 
3732		return -ENOMEM;
 
3733	rc = write_driver_ver_to_cfgtable(h->cfgtable);
3734	if (rc)
3735		return rc;
3736	/* Find performant mode table. */
3737	trans_offset = readl(&h->cfgtable->TransMethodOffset);
3738	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3739				cfg_base_addr_index)+cfg_offset+trans_offset,
3740				sizeof(*h->transtable));
3741	if (!h->transtable)
 
 
3742		return -ENOMEM;
 
3743	return 0;
3744}
3745
3746static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3747{
3748	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
 
 
 
3749
3750	/* Limit commands in memory limited kdump scenario. */
3751	if (reset_devices && h->max_commands > 32)
3752		h->max_commands = 32;
3753
3754	if (h->max_commands < 16) {
3755		dev_warn(&h->pdev->dev, "Controller reports "
3756			"max supported commands of %d, an obvious lie. "
3757			"Using 16.  Ensure that firmware is up to date.\n",
3758			h->max_commands);
3759		h->max_commands = 16;
3760	}
3761}
3762
 
 
 
 
 
 
 
 
 
3763/* Interrogate the hardware for some limits:
3764 * max commands, max SG elements without chaining, and with chaining,
3765 * SG chain block size, etc.
3766 */
3767static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3768{
3769	hpsa_get_max_perf_mode_cmds(h);
3770	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3771	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3772	/*
3773	 * Limit in-command s/g elements to 32 save dma'able memory.
3774	 * Howvever spec says if 0, use 31
3775	 */
3776	h->max_cmd_sg_entries = 31;
3777	if (h->maxsgentries > 512) {
3778		h->max_cmd_sg_entries = 32;
3779		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3780		h->maxsgentries--; /* save one for chain pointer */
3781	} else {
 
 
 
 
 
 
3782		h->maxsgentries = 31; /* default to traditional values */
3783		h->chainsize = 0;
3784	}
 
 
 
 
 
 
 
 
 
3785}
3786
3787static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3788{
3789	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3790	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
3791	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
3792	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
3793		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3794		return false;
3795	}
3796	return true;
3797}
3798
3799/* Need to enable prefetch in the SCSI core for 6400 in x86 */
3800static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3801{
 
 
 
 
3802#ifdef CONFIG_X86
3803	u32 prefetch;
3804
3805	prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3806	prefetch |= 0x100;
3807	writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3808#endif
 
 
3809}
3810
3811/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
3812 * in a prefetch beyond physical memory.
3813 */
3814static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3815{
3816	u32 dma_prefetch;
3817
3818	if (h->board_id != 0x3225103C)
3819		return;
3820	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3821	dma_prefetch |= 0x8000;
3822	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3823}
3824
3825static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3826{
3827	int i;
3828	u32 doorbell_value;
3829	unsigned long flags;
3830
3831	/* under certain very rare conditions, this can take awhile.
3832	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3833	 * as we enter this code.)
3834	 */
3835	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
 
 
3836		spin_lock_irqsave(&h->lock, flags);
3837		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3838		spin_unlock_irqrestore(&h->lock, flags);
3839		if (!(doorbell_value & CFGTBL_ChangeReq))
3840			break;
3841		/* delay and try again */
3842		usleep_range(10000, 20000);
3843	}
 
 
 
3844}
3845
3846static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
 
3847{
3848	u32 trans_support;
3849
3850	trans_support = readl(&(h->cfgtable->TransportSupport));
3851	if (!(trans_support & SIMPLE_MODE))
3852		return -ENOTSUPP;
3853
3854	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
 
3855	/* Update the field, and then ring the doorbell */
3856	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
 
3857	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3858	hpsa_wait_for_mode_change_ack(h);
 
3859	print_cfg_table(&h->pdev->dev, h->cfgtable);
3860	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3861		dev_warn(&h->pdev->dev,
3862			"unable to get board into simple mode\n");
3863		return -ENODEV;
3864	}
3865	h->transMethod = CFGTBL_Trans_Simple;
3866	return 0;
 
 
 
3867}
3868
3869static int __devinit hpsa_pci_init(struct ctlr_info *h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3870{
3871	int prod_index, err;
 
3872
3873	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3874	if (prod_index < 0)
3875		return -ENODEV;
3876	h->product_name = products[prod_index].product_name;
3877	h->access = *(products[prod_index].access);
 
 
 
3878
3879	if (hpsa_board_disabled(h->pdev)) {
3880		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3881		return -ENODEV;
3882	}
3883	err = pci_enable_device(h->pdev);
3884	if (err) {
3885		dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
 
3886		return err;
3887	}
3888
3889	err = pci_request_regions(h->pdev, "hpsa");
3890	if (err) {
3891		dev_err(&h->pdev->dev,
3892			"cannot obtain PCI resources, aborting\n");
 
3893		return err;
3894	}
3895	hpsa_interrupt_mode(h);
 
 
 
 
 
 
 
 
 
3896	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3897	if (err)
3898		goto err_out_free_res;
3899	h->vaddr = remap_pci_mem(h->paddr, 0x250);
3900	if (!h->vaddr) {
 
3901		err = -ENOMEM;
3902		goto err_out_free_res;
3903	}
3904	err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
3905	if (err)
3906		goto err_out_free_res;
3907	err = hpsa_find_cfgtables(h);
3908	if (err)
3909		goto err_out_free_res;
3910	hpsa_find_board_params(h);
3911
3912	if (!hpsa_CISS_signature_present(h)) {
3913		err = -ENODEV;
3914		goto err_out_free_res;
3915	}
3916	hpsa_enable_scsi_prefetch(h);
3917	hpsa_p600_dma_prefetch_quirk(h);
3918	err = hpsa_enter_simple_mode(h);
3919	if (err)
3920		goto err_out_free_res;
3921	return 0;
3922
3923err_out_free_res:
3924	if (h->transtable)
3925		iounmap(h->transtable);
3926	if (h->cfgtable)
3927		iounmap(h->cfgtable);
3928	if (h->vaddr)
3929		iounmap(h->vaddr);
 
3930	/*
3931	 * Deliberately omit pci_disable_device(): it does something nasty to
3932	 * Smart Array controllers that pci_enable_device does not undo
3933	 */
 
3934	pci_release_regions(h->pdev);
3935	return err;
3936}
3937
3938static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3939{
3940	int rc;
3941
3942#define HBA_INQUIRY_BYTE_COUNT 64
3943	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3944	if (!h->hba_inquiry_data)
3945		return;
3946	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3947		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3948	if (rc != 0) {
3949		kfree(h->hba_inquiry_data);
3950		h->hba_inquiry_data = NULL;
3951	}
3952}
3953
3954static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3955{
3956	int rc, i;
 
3957
3958	if (!reset_devices)
3959		return 0;
3960
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3961	/* Reset the controller with a PCI power-cycle or via doorbell */
3962	rc = hpsa_kdump_hard_reset_controller(pdev);
3963
3964	/* -ENOTSUPP here means we cannot reset the controller
3965	 * but it's already (and still) up and running in
3966	 * "performant mode".  Or, it might be 640x, which can't reset
3967	 * due to concerns about shared bbwc between 6402/6404 pair.
3968	 */
3969	if (rc == -ENOTSUPP)
3970		return rc; /* just try to do the kdump anyhow. */
3971	if (rc)
3972		return -ENODEV;
3973
3974	/* Now try to get the controller to respond to a no-op */
3975	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
3976	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3977		if (hpsa_noop(pdev) == 0)
3978			break;
3979		else
3980			dev_warn(&pdev->dev, "no-op failed%s\n",
3981					(i < 11 ? "; re-trying" : ""));
3982	}
3983	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3984}
3985
3986static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
3987{
3988	h->cmd_pool_bits = kzalloc(
3989		DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
3990		sizeof(unsigned long), GFP_KERNEL);
3991	h->cmd_pool = pci_alloc_consistent(h->pdev,
3992		    h->nr_cmds * sizeof(*h->cmd_pool),
3993		    &(h->cmd_pool_dhandle));
3994	h->errinfo_pool = pci_alloc_consistent(h->pdev,
3995		    h->nr_cmds * sizeof(*h->errinfo_pool),
3996		    &(h->errinfo_pool_dhandle));
3997	if ((h->cmd_pool_bits == NULL)
3998	    || (h->cmd_pool == NULL)
3999	    || (h->errinfo_pool == NULL)) {
4000		dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4001		return -ENOMEM;
4002	}
 
4003	return 0;
 
 
 
4004}
4005
4006static void hpsa_free_cmd_pool(struct ctlr_info *h)
 
4007{
4008	kfree(h->cmd_pool_bits);
4009	if (h->cmd_pool)
4010		pci_free_consistent(h->pdev,
4011			    h->nr_cmds * sizeof(struct CommandList),
4012			    h->cmd_pool, h->cmd_pool_dhandle);
4013	if (h->errinfo_pool)
4014		pci_free_consistent(h->pdev,
4015			    h->nr_cmds * sizeof(struct ErrorInfo),
4016			    h->errinfo_pool,
4017			    h->errinfo_pool_dhandle);
 
 
 
 
 
 
 
 
 
 
4018}
4019
4020static int hpsa_request_irq(struct ctlr_info *h,
 
4021	irqreturn_t (*msixhandler)(int, void *),
4022	irqreturn_t (*intxhandler)(int, void *))
4023{
4024	int rc;
 
 
 
 
 
 
 
 
 
 
 
4025
4026	if (h->msix_vector || h->msi_vector)
4027		rc = request_irq(h->intr[h->intr_mode], msixhandler,
4028				IRQF_DISABLED, h->devname, h);
4029	else
4030		rc = request_irq(h->intr[h->intr_mode], intxhandler,
4031				IRQF_DISABLED, h->devname, h);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4032	if (rc) {
4033		dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4034		       h->intr[h->intr_mode], h->devname);
 
4035		return -ENODEV;
4036	}
4037	return 0;
4038}
4039
4040static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4041{
4042	if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4043		HPSA_RESET_TYPE_CONTROLLER)) {
4044		dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4045		return -EIO;
4046	}
4047
4048	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4049	if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
 
4050		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4051		return -1;
4052	}
4053
4054	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4055	if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
 
4056		dev_warn(&h->pdev->dev, "Board failed to become ready "
4057			"after soft reset.\n");
4058		return -1;
4059	}
4060
4061	return 0;
4062}
4063
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4064static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4065{
4066	free_irq(h->intr[h->intr_mode], h);
4067#ifdef CONFIG_PCI_MSI
4068	if (h->msix_vector)
4069		pci_disable_msix(h->pdev);
4070	else if (h->msi_vector)
4071		pci_disable_msi(h->pdev);
4072#endif /* CONFIG_PCI_MSI */
4073	hpsa_free_sg_chain_blocks(h);
4074	hpsa_free_cmd_pool(h);
4075	kfree(h->blockFetchTable);
4076	pci_free_consistent(h->pdev, h->reply_pool_size,
4077		h->reply_pool, h->reply_pool_dhandle);
4078	if (h->vaddr)
4079		iounmap(h->vaddr);
4080	if (h->transtable)
4081		iounmap(h->transtable);
4082	if (h->cfgtable)
4083		iounmap(h->cfgtable);
4084	pci_release_regions(h->pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4085	kfree(h);
4086}
4087
4088static int __devinit hpsa_init_one(struct pci_dev *pdev,
4089				    const struct pci_device_id *ent)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4090{
4091	int dac, rc;
4092	struct ctlr_info *h;
4093	int try_soft_reset = 0;
4094	unsigned long flags;
 
4095
4096	if (number_of_controllers == 0)
4097		printk(KERN_INFO DRIVER_NAME "\n");
4098
4099	rc = hpsa_init_reset_devices(pdev);
 
 
 
 
 
 
4100	if (rc) {
4101		if (rc != -ENOTSUPP)
4102			return rc;
4103		/* If the reset fails in a particular way (it has no way to do
4104		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4105		 * a soft reset once we get the controller configured up to the
4106		 * point that it can accept a command.
4107		 */
4108		try_soft_reset = 1;
4109		rc = 0;
4110	}
4111
4112reinit_after_soft_reset:
4113
4114	/* Command structures must be aligned on a 32-byte boundary because
4115	 * the 5 lower bits of the address are used by the hardware. and by
4116	 * the driver.  See comments in hpsa.h for more info.
4117	 */
4118#define COMMANDLIST_ALIGNMENT 32
4119	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
4120	h = kzalloc(sizeof(*h), GFP_KERNEL);
4121	if (!h)
 
4122		return -ENOMEM;
 
4123
4124	h->pdev = pdev;
4125	h->busy_initializing = 1;
4126	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
4127	INIT_LIST_HEAD(&h->cmpQ);
4128	INIT_LIST_HEAD(&h->reqQ);
4129	spin_lock_init(&h->lock);
 
4130	spin_lock_init(&h->scan_lock);
 
 
 
 
 
 
 
 
 
 
 
 
4131	rc = hpsa_pci_init(h);
4132	if (rc != 0)
4133		goto clean1;
 
 
 
 
 
 
4134
4135	sprintf(h->devname, "hpsa%d", number_of_controllers);
4136	h->ctlr = number_of_controllers;
4137	number_of_controllers++;
4138
4139	/* configure PCI DMA stuff */
4140	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4141	if (rc == 0) {
4142		dac = 1;
4143	} else {
4144		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4145		if (rc == 0) {
4146			dac = 0;
4147		} else {
4148			dev_err(&pdev->dev, "no suitable DMA available\n");
4149			goto clean1;
4150		}
4151	}
4152
4153	/* make sure the board interrupts are off */
4154	h->access.set_intr_mask(h, HPSA_INTR_OFF);
4155
4156	if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
4157		goto clean2;
4158	dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4159	       h->devname, pdev->device,
4160	       h->intr[h->intr_mode], dac ? "" : " not");
4161	if (hpsa_allocate_cmd_pool(h))
4162		goto clean4;
4163	if (hpsa_allocate_sg_chain_blocks(h))
4164		goto clean4;
4165	init_waitqueue_head(&h->scan_wait_queue);
 
 
4166	h->scan_finished = 1; /* no scan currently in progress */
 
4167
4168	pci_set_drvdata(pdev, h);
4169	h->ndevices = 0;
4170	h->scsi_host = NULL;
4171	spin_lock_init(&h->devlock);
4172	hpsa_put_ctlr_into_performant_mode(h);
 
 
 
 
 
 
 
 
 
4173
4174	/* At this point, the controller is ready to take commands.
 
 
 
 
 
 
 
 
 
 
 
 
 
4175	 * Now, if reset_devices and the hard reset didn't work, try
4176	 * the soft reset and see if that works.
4177	 */
4178	if (try_soft_reset) {
4179
4180		/* This is kind of gross.  We may or may not get a completion
4181		 * from the soft reset command, and if we do, then the value
4182		 * from the fifo may or may not be valid.  So, we wait 10 secs
4183		 * after the reset throwing away any completions we get during
4184		 * that time.  Unregister the interrupt handler and register
4185		 * fake ones to scoop up any residual completions.
4186		 */
4187		spin_lock_irqsave(&h->lock, flags);
4188		h->access.set_intr_mask(h, HPSA_INTR_OFF);
4189		spin_unlock_irqrestore(&h->lock, flags);
4190		free_irq(h->intr[h->intr_mode], h);
4191		rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4192					hpsa_intx_discard_completions);
4193		if (rc) {
4194			dev_warn(&h->pdev->dev, "Failed to request_irq after "
4195				"soft reset.\n");
4196			goto clean4;
 
 
 
 
 
 
 
 
 
 
 
4197		}
4198
4199		rc = hpsa_kdump_soft_reset(h);
4200		if (rc)
4201			/* Neither hard nor soft reset worked, we're hosed. */
4202			goto clean4;
4203
4204		dev_info(&h->pdev->dev, "Board READY.\n");
4205		dev_info(&h->pdev->dev,
4206			"Waiting for stale completions to drain.\n");
4207		h->access.set_intr_mask(h, HPSA_INTR_ON);
4208		msleep(10000);
4209		h->access.set_intr_mask(h, HPSA_INTR_OFF);
4210
4211		rc = controller_reset_failed(h->cfgtable);
4212		if (rc)
4213			dev_info(&h->pdev->dev,
4214				"Soft reset appears to have failed.\n");
4215
4216		/* since the controller's reset, we have to go back and re-init
4217		 * everything.  Easiest to just forget what we've done and do it
4218		 * all over again.
4219		 */
4220		hpsa_undo_allocations_after_kdump_soft_reset(h);
4221		try_soft_reset = 0;
4222		if (rc)
4223			/* don't go to clean4, we already unallocated */
4224			return -ENODEV;
4225
4226		goto reinit_after_soft_reset;
4227	}
4228
 
 
 
 
 
 
4229	/* Turn the interrupts on so we can service requests */
4230	h->access.set_intr_mask(h, HPSA_INTR_ON);
4231
4232	hpsa_hba_inquiry(h);
4233	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
4234	h->busy_initializing = 0;
4235	return 1;
4236
4237clean4:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4238	hpsa_free_sg_chain_blocks(h);
 
4239	hpsa_free_cmd_pool(h);
4240	free_irq(h->intr[h->intr_mode], h);
4241clean2:
4242clean1:
4243	h->busy_initializing = 0;
4244	kfree(h);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4245	return rc;
4246}
4247
4248static void hpsa_flush_cache(struct ctlr_info *h)
4249{
4250	char *flush_buf;
4251	struct CommandList *c;
 
4252
 
 
4253	flush_buf = kzalloc(4, GFP_KERNEL);
4254	if (!flush_buf)
4255		return;
4256
4257	c = cmd_special_alloc(h);
4258	if (!c) {
4259		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4260		goto out_of_memory;
4261	}
4262	fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4263		RAID_CTLR_LUNID, TYPE_CMD);
4264	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
 
 
4265	if (c->err_info->CommandStatus != 0)
 
4266		dev_warn(&h->pdev->dev,
4267			"error flushing cache on controller\n");
4268	cmd_special_free(h, c);
4269out_of_memory:
4270	kfree(flush_buf);
4271}
4272
4273static void hpsa_shutdown(struct pci_dev *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4274{
4275	struct ctlr_info *h;
4276
4277	h = pci_get_drvdata(pdev);
4278	/* Turn board interrupts off  and send the flush cache command
4279	 * sendcmd will turn off interrupt, and send the flush...
4280	 * To write all data in the battery backed cache to disks
4281	 */
4282	hpsa_flush_cache(h);
4283	h->access.set_intr_mask(h, HPSA_INTR_OFF);
4284	free_irq(h->intr[h->intr_mode], h);
4285#ifdef CONFIG_PCI_MSI
4286	if (h->msix_vector)
4287		pci_disable_msix(h->pdev);
4288	else if (h->msi_vector)
4289		pci_disable_msi(h->pdev);
4290#endif				/* CONFIG_PCI_MSI */
 
 
 
 
 
 
 
 
 
 
 
4291}
4292
4293static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4294{
4295	struct ctlr_info *h;
 
4296
4297	if (pci_get_drvdata(pdev) == NULL) {
4298		dev_err(&pdev->dev, "unable to remove device \n");
4299		return;
4300	}
4301	h = pci_get_drvdata(pdev);
4302	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
4303	hpsa_shutdown(pdev);
4304	iounmap(h->vaddr);
4305	iounmap(h->transtable);
4306	iounmap(h->cfgtable);
4307	hpsa_free_sg_chain_blocks(h);
4308	pci_free_consistent(h->pdev,
4309		h->nr_cmds * sizeof(struct CommandList),
4310		h->cmd_pool, h->cmd_pool_dhandle);
4311	pci_free_consistent(h->pdev,
4312		h->nr_cmds * sizeof(struct ErrorInfo),
4313		h->errinfo_pool, h->errinfo_pool_dhandle);
4314	pci_free_consistent(h->pdev, h->reply_pool_size,
4315		h->reply_pool, h->reply_pool_dhandle);
4316	kfree(h->cmd_pool_bits);
4317	kfree(h->blockFetchTable);
4318	kfree(h->hba_inquiry_data);
4319	/*
4320	 * Deliberately omit pci_disable_device(): it does something nasty to
4321	 * Smart Array controllers that pci_enable_device does not undo
 
 
4322	 */
4323	pci_release_regions(pdev);
4324	pci_set_drvdata(pdev, NULL);
4325	kfree(h);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4326}
4327
4328static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4329	__attribute__((unused)) pm_message_t state)
4330{
4331	return -ENOSYS;
4332}
4333
4334static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
 
4335{
4336	return -ENOSYS;
4337}
4338
 
 
4339static struct pci_driver hpsa_pci_driver = {
4340	.name = "hpsa",
4341	.probe = hpsa_init_one,
4342	.remove = __devexit_p(hpsa_remove_one),
4343	.id_table = hpsa_pci_device_id,	/* id_table */
4344	.shutdown = hpsa_shutdown,
4345	.suspend = hpsa_suspend,
4346	.resume = hpsa_resume,
4347};
4348
4349/* Fill in bucket_map[], given nsgs (the max number of
4350 * scatter gather elements supported) and bucket[],
4351 * which is an array of 8 integers.  The bucket[] array
4352 * contains 8 different DMA transfer sizes (in 16
4353 * byte increments) which the controller uses to fetch
4354 * commands.  This function fills in bucket_map[], which
4355 * maps a given number of scatter gather elements to one of
4356 * the 8 DMA transfer sizes.  The point of it is to allow the
4357 * controller to only do as much DMA as needed to fetch the
4358 * command, with the DMA transfer size encoded in the lower
4359 * bits of the command address.
4360 */
4361static void  calc_bucket_map(int bucket[], int num_buckets,
4362	int nsgs, int *bucket_map)
4363{
4364	int i, j, b, size;
4365
4366	/* even a command with 0 SGs requires 4 blocks */
4367#define MINIMUM_TRANSFER_BLOCKS 4
4368#define NUM_BUCKETS 8
4369	/* Note, bucket_map must have nsgs+1 entries. */
4370	for (i = 0; i <= nsgs; i++) {
4371		/* Compute size of a command with i SG entries */
4372		size = i + MINIMUM_TRANSFER_BLOCKS;
4373		b = num_buckets; /* Assume the biggest bucket */
4374		/* Find the bucket that is just big enough */
4375		for (j = 0; j < 8; j++) {
4376			if (bucket[j] >= size) {
4377				b = j;
4378				break;
4379			}
4380		}
4381		/* for a command with i SG entries, use bucket b. */
4382		bucket_map[i] = b;
4383	}
4384}
4385
4386static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4387	u32 use_short_tags)
 
 
 
4388{
4389	int i;
4390	unsigned long register_value;
 
 
 
 
 
 
4391
4392	/* This is a bit complicated.  There are 8 registers on
4393	 * the controller which we write to to tell it 8 different
4394	 * sizes of commands which there may be.  It's a way of
4395	 * reducing the DMA done to fetch each command.  Encoded into
4396	 * each command's tag are 3 bits which communicate to the controller
4397	 * which of the eight sizes that command fits within.  The size of
4398	 * each command depends on how many scatter gather entries there are.
4399	 * Each SG entry requires 16 bytes.  The eight registers are programmed
4400	 * with the number of 16-byte blocks a command of that size requires.
4401	 * The smallest command possible requires 5 such 16 byte blocks.
4402	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
4403	 * blocks.  Note, this only extends to the SG entries contained
4404	 * within the command block, and does not extend to chained blocks
4405	 * of SG elements.   bft[] contains the eight values we write to
4406	 * the registers.  They are not evenly distributed, but have more
4407	 * sizes for small commands, and fewer sizes for larger commands.
4408	 */
4409	int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
4410	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
 
 
 
 
 
 
 
 
 
 
4411	/*  5 = 1 s/g entry or 4k
4412	 *  6 = 2 s/g entry or 8k
4413	 *  8 = 4 s/g entry or 16k
4414	 * 10 = 6 s/g entry or 24k
4415	 */
4416
4417	h->reply_pool_wraparound = 1; /* spec: init to 1 */
 
 
 
 
 
4418
4419	/* Controller spec: zero out this buffer. */
4420	memset(h->reply_pool, 0, h->reply_pool_size);
4421	h->reply_pool_head = h->reply_pool;
4422
4423	bft[7] = h->max_sg_entries + 4;
4424	calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
 
4425	for (i = 0; i < 8; i++)
4426		writel(bft[i], &h->transtable->BlockFetch[i]);
4427
4428	/* size of controller ring buffer */
4429	writel(h->max_commands, &h->transtable->RepQSize);
4430	writel(1, &h->transtable->RepQCount);
4431	writel(0, &h->transtable->RepQCtrAddrLow32);
4432	writel(0, &h->transtable->RepQCtrAddrHigh32);
4433	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4434	writel(0, &h->transtable->RepQAddr0High32);
4435	writel(CFGTBL_Trans_Performant | use_short_tags,
4436		&(h->cfgtable->HostWrite.TransportRequest));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4437	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4438	hpsa_wait_for_mode_change_ack(h);
 
 
 
 
4439	register_value = readl(&(h->cfgtable->TransportActive));
4440	if (!(register_value & CFGTBL_Trans_Performant)) {
4441		dev_warn(&h->pdev->dev, "unable to get board into"
4442					" performant mode\n");
4443		return;
4444	}
4445	/* Change the access methods to the performant access methods */
4446	h->access = SA5_performant_access;
4447	h->transMethod = CFGTBL_Trans_Performant;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4448}
4449
4450static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 
 
 
 
 
 
 
 
 
 
 
 
 
4451{
4452	u32 trans_support;
 
4453
4454	if (hpsa_simple_mode)
4455		return;
4456
4457	trans_support = readl(&(h->cfgtable->TransportSupport));
4458	if (!(trans_support & PERFORMANT_MODE))
4459		return;
 
 
 
 
 
 
 
 
 
 
 
4460
 
4461	hpsa_get_max_perf_mode_cmds(h);
4462	h->max_sg_entries = 32;
4463	/* Performant mode ring buffer and supporting data structures */
4464	h->reply_pool_size = h->max_commands * sizeof(u64);
4465	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4466				&(h->reply_pool_dhandle));
 
 
 
 
 
 
 
 
 
 
 
 
4467
4468	/* Need a block fetch table for performant mode */
4469	h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
4470				sizeof(u32)), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4471
4472	if ((h->reply_pool == NULL)
4473		|| (h->blockFetchTable == NULL))
4474		goto clean_up;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4475
4476	hpsa_enter_performant_mode(h,
4477		trans_support & CFGTBL_Trans_use_short_tags);
 
 
 
4478
4479	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4480
4481clean_up:
4482	if (h->reply_pool)
4483		pci_free_consistent(h->pdev, h->reply_pool_size,
4484			h->reply_pool, h->reply_pool_dhandle);
4485	kfree(h->blockFetchTable);
4486}
4487
 
 
 
 
 
 
 
 
 
 
 
4488/*
4489 *  This is it.  Register the PCI driver information for the cards we control
4490 *  the OS will call our registered routines when it finds one of our cards.
4491 */
4492static int __init hpsa_init(void)
4493{
4494	return pci_register_driver(&hpsa_pci_driver);
 
 
 
 
 
 
 
 
 
 
 
 
4495}
4496
4497static void __exit hpsa_cleanup(void)
4498{
4499	pci_unregister_driver(&hpsa_pci_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4500}
4501
4502module_init(hpsa_init);
4503module_exit(hpsa_cleanup);