Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1// SPDX-License-Identifier: GPL-2.0
    2/*
    3 *    driver for Microchip PQI-based storage controllers
    4 *    Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
    5 *    Copyright (c) 2016-2018 Microsemi Corporation
    6 *    Copyright (c) 2016 PMC-Sierra, Inc.
    7 *
    8 *    Questions/Comments/Bugfixes to storagedev@microchip.com
    9 *
   10 */
   11
   12#include <linux/module.h>
   13#include <linux/kernel.h>
   14#include <linux/pci.h>
   15#include <linux/delay.h>
   16#include <linux/interrupt.h>
   17#include <linux/sched.h>
   18#include <linux/rtc.h>
   19#include <linux/bcd.h>
   20#include <linux/reboot.h>
   21#include <linux/cciss_ioctl.h>
   22#include <linux/blk-mq-pci.h>
   23#include <scsi/scsi_host.h>
   24#include <scsi/scsi_cmnd.h>
   25#include <scsi/scsi_device.h>
   26#include <scsi/scsi_eh.h>
   27#include <scsi/scsi_transport_sas.h>
   28#include <asm/unaligned.h>
   29#include "smartpqi.h"
   30#include "smartpqi_sis.h"
   31
   32#if !defined(BUILD_TIMESTAMP)
   33#define BUILD_TIMESTAMP
   34#endif
   35
   36#define DRIVER_VERSION		"2.1.20-035"
   37#define DRIVER_MAJOR		2
   38#define DRIVER_MINOR		1
   39#define DRIVER_RELEASE		20
   40#define DRIVER_REVISION		35
   41
   42#define DRIVER_NAME		"Microchip SmartPQI Driver (v" \
   43				DRIVER_VERSION BUILD_TIMESTAMP ")"
   44#define DRIVER_NAME_SHORT	"smartpqi"
   45
   46#define PQI_EXTRA_SGL_MEMORY	(12 * sizeof(struct pqi_sg_descriptor))
   47
   48#define PQI_POST_RESET_DELAY_SECS			5
   49#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS	10
   50
   51MODULE_AUTHOR("Microchip");
   52MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
   53	DRIVER_VERSION);
   54MODULE_VERSION(DRIVER_VERSION);
   55MODULE_LICENSE("GPL");
   56
   57struct pqi_cmd_priv {
   58	int this_residual;
   59};
   60
   61static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
   62{
   63	return scsi_cmd_priv(cmd);
   64}
   65
   66static void pqi_verify_structures(void);
   67static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
   68	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
   69static void pqi_ctrl_offline_worker(struct work_struct *work);
   70static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
   71static void pqi_scan_start(struct Scsi_Host *shost);
   72static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
   73	struct pqi_queue_group *queue_group, enum pqi_io_path path,
   74	struct pqi_io_request *io_request);
   75static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
   76	struct pqi_iu_header *request, unsigned int flags,
   77	struct pqi_raid_error_info *error_info);
   78static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
   79	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
   80	unsigned int cdb_length, struct pqi_queue_group *queue_group,
   81	struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
   82static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
   83	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
   84	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
   85	struct pqi_scsi_dev_raid_map_data *rmd);
   86static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
   87	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
   88	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
   89	struct pqi_scsi_dev_raid_map_data *rmd);
   90static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
   91static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
   92static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
   93static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
   94static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
   95static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
   96static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
   97	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
   98static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
   99
  100/* for flags argument to pqi_submit_raid_request_synchronous() */
  101#define PQI_SYNC_FLAGS_INTERRUPTABLE	0x1
  102
  103static struct scsi_transport_template *pqi_sas_transport_template;
  104
  105static atomic_t pqi_controller_count = ATOMIC_INIT(0);
  106
  107enum pqi_lockup_action {
  108	NONE,
  109	REBOOT,
  110	PANIC
  111};
  112
  113static enum pqi_lockup_action pqi_lockup_action = NONE;
  114
  115static struct {
  116	enum pqi_lockup_action	action;
  117	char			*name;
  118} pqi_lockup_actions[] = {
  119	{
  120		.action = NONE,
  121		.name = "none",
  122	},
  123	{
  124		.action = REBOOT,
  125		.name = "reboot",
  126	},
  127	{
  128		.action = PANIC,
  129		.name = "panic",
  130	},
  131};
  132
  133static unsigned int pqi_supported_event_types[] = {
  134	PQI_EVENT_TYPE_HOTPLUG,
  135	PQI_EVENT_TYPE_HARDWARE,
  136	PQI_EVENT_TYPE_PHYSICAL_DEVICE,
  137	PQI_EVENT_TYPE_LOGICAL_DEVICE,
  138	PQI_EVENT_TYPE_OFA,
  139	PQI_EVENT_TYPE_AIO_STATE_CHANGE,
  140	PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
  141};
  142
  143static int pqi_disable_device_id_wildcards;
  144module_param_named(disable_device_id_wildcards,
  145	pqi_disable_device_id_wildcards, int, 0644);
  146MODULE_PARM_DESC(disable_device_id_wildcards,
  147	"Disable device ID wildcards.");
  148
  149static int pqi_disable_heartbeat;
  150module_param_named(disable_heartbeat,
  151	pqi_disable_heartbeat, int, 0644);
  152MODULE_PARM_DESC(disable_heartbeat,
  153	"Disable heartbeat.");
  154
  155static int pqi_disable_ctrl_shutdown;
  156module_param_named(disable_ctrl_shutdown,
  157	pqi_disable_ctrl_shutdown, int, 0644);
  158MODULE_PARM_DESC(disable_ctrl_shutdown,
  159	"Disable controller shutdown when controller locked up.");
  160
  161static char *pqi_lockup_action_param;
  162module_param_named(lockup_action,
  163	pqi_lockup_action_param, charp, 0644);
  164MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
  165	"\t\tSupported: none, reboot, panic\n"
  166	"\t\tDefault: none");
  167
  168static int pqi_expose_ld_first;
  169module_param_named(expose_ld_first,
  170	pqi_expose_ld_first, int, 0644);
  171MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
  172
  173static int pqi_hide_vsep;
  174module_param_named(hide_vsep,
  175	pqi_hide_vsep, int, 0644);
  176MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
  177
  178static int pqi_disable_managed_interrupts;
  179module_param_named(disable_managed_interrupts,
  180	pqi_disable_managed_interrupts, int, 0644);
  181MODULE_PARM_DESC(disable_managed_interrupts,
  182	"Disable the kernel automatically assigning SMP affinity to IRQs.");
  183
  184static unsigned int pqi_ctrl_ready_timeout_secs;
  185module_param_named(ctrl_ready_timeout,
  186	pqi_ctrl_ready_timeout_secs, uint, 0644);
  187MODULE_PARM_DESC(ctrl_ready_timeout,
  188	"Timeout in seconds for driver to wait for controller ready.");
  189
  190static char *raid_levels[] = {
  191	"RAID-0",
  192	"RAID-4",
  193	"RAID-1(1+0)",
  194	"RAID-5",
  195	"RAID-5+1",
  196	"RAID-6",
  197	"RAID-1(Triple)",
  198};
  199
  200static char *pqi_raid_level_to_string(u8 raid_level)
  201{
  202	if (raid_level < ARRAY_SIZE(raid_levels))
  203		return raid_levels[raid_level];
  204
  205	return "RAID UNKNOWN";
  206}
  207
  208#define SA_RAID_0		0
  209#define SA_RAID_4		1
  210#define SA_RAID_1		2	/* also used for RAID 10 */
  211#define SA_RAID_5		3	/* also used for RAID 50 */
  212#define SA_RAID_51		4
  213#define SA_RAID_6		5	/* also used for RAID 60 */
  214#define SA_RAID_TRIPLE		6	/* also used for RAID 1+0 Triple */
  215#define SA_RAID_MAX		SA_RAID_TRIPLE
  216#define SA_RAID_UNKNOWN		0xff
  217
  218static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
  219{
  220	pqi_prep_for_scsi_done(scmd);
  221	scsi_done(scmd);
  222}
  223
  224static inline void pqi_disable_write_same(struct scsi_device *sdev)
  225{
  226	sdev->no_write_same = 1;
  227}
  228
  229static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
  230{
  231	return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
  232}
  233
  234static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
  235{
  236	return !device->is_physical_device;
  237}
  238
  239static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
  240{
  241	return scsi3addr[2] != 0;
  242}
  243
  244static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
  245{
  246	return !ctrl_info->controller_online;
  247}
  248
  249static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
  250{
  251	if (ctrl_info->controller_online)
  252		if (!sis_is_firmware_running(ctrl_info))
  253			pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
  254}
  255
  256static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
  257{
  258	return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
  259}
  260
  261#define PQI_DRIVER_SCRATCH_PQI_MODE			0x1
  262#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED		0x2
  263
  264static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
  265{
  266	return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
  267}
  268
  269static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
  270	enum pqi_ctrl_mode mode)
  271{
  272	u32 driver_scratch;
  273
  274	driver_scratch = sis_read_driver_scratch(ctrl_info);
  275
  276	if (mode == PQI_MODE)
  277		driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
  278	else
  279		driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
  280
  281	sis_write_driver_scratch(ctrl_info, driver_scratch);
  282}
  283
  284static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
  285{
  286	return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
  287}
  288
  289static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
  290{
  291	u32 driver_scratch;
  292
  293	driver_scratch = sis_read_driver_scratch(ctrl_info);
  294
  295	if (is_supported)
  296		driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
  297	else
  298		driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
  299
  300	sis_write_driver_scratch(ctrl_info, driver_scratch);
  301}
  302
  303static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
  304{
  305	ctrl_info->scan_blocked = true;
  306	mutex_lock(&ctrl_info->scan_mutex);
  307}
  308
  309static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
  310{
  311	ctrl_info->scan_blocked = false;
  312	mutex_unlock(&ctrl_info->scan_mutex);
  313}
  314
  315static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
  316{
  317	return ctrl_info->scan_blocked;
  318}
  319
  320static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
  321{
  322	mutex_lock(&ctrl_info->lun_reset_mutex);
  323}
  324
  325static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
  326{
  327	mutex_unlock(&ctrl_info->lun_reset_mutex);
  328}
  329
  330static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
  331{
  332	struct Scsi_Host *shost;
  333	unsigned int num_loops;
  334	int msecs_sleep;
  335
  336	shost = ctrl_info->scsi_host;
  337
  338	scsi_block_requests(shost);
  339
  340	num_loops = 0;
  341	msecs_sleep = 20;
  342	while (scsi_host_busy(shost)) {
  343		num_loops++;
  344		if (num_loops == 10)
  345			msecs_sleep = 500;
  346		msleep(msecs_sleep);
  347	}
  348}
  349
  350static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
  351{
  352	scsi_unblock_requests(ctrl_info->scsi_host);
  353}
  354
  355static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
  356{
  357	atomic_inc(&ctrl_info->num_busy_threads);
  358}
  359
  360static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
  361{
  362	atomic_dec(&ctrl_info->num_busy_threads);
  363}
  364
  365static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
  366{
  367	return ctrl_info->block_requests;
  368}
  369
  370static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
  371{
  372	ctrl_info->block_requests = true;
  373}
  374
  375static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
  376{
  377	ctrl_info->block_requests = false;
  378	wake_up_all(&ctrl_info->block_requests_wait);
  379}
  380
  381static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
  382{
  383	if (!pqi_ctrl_blocked(ctrl_info))
  384		return;
  385
  386	atomic_inc(&ctrl_info->num_blocked_threads);
  387	wait_event(ctrl_info->block_requests_wait,
  388		!pqi_ctrl_blocked(ctrl_info));
  389	atomic_dec(&ctrl_info->num_blocked_threads);
  390}
  391
  392#define PQI_QUIESCE_WARNING_TIMEOUT_SECS		10
  393
  394static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
  395{
  396	unsigned long start_jiffies;
  397	unsigned long warning_timeout;
  398	bool displayed_warning;
  399
  400	displayed_warning = false;
  401	start_jiffies = jiffies;
  402	warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
  403
  404	while (atomic_read(&ctrl_info->num_busy_threads) >
  405		atomic_read(&ctrl_info->num_blocked_threads)) {
  406		if (time_after(jiffies, warning_timeout)) {
  407			dev_warn(&ctrl_info->pci_dev->dev,
  408				"waiting %u seconds for driver activity to quiesce\n",
  409				jiffies_to_msecs(jiffies - start_jiffies) / 1000);
  410			displayed_warning = true;
  411			warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
  412		}
  413		usleep_range(1000, 2000);
  414	}
  415
  416	if (displayed_warning)
  417		dev_warn(&ctrl_info->pci_dev->dev,
  418			"driver activity quiesced after waiting for %u seconds\n",
  419			jiffies_to_msecs(jiffies - start_jiffies) / 1000);
  420}
  421
  422static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
  423{
  424	return device->device_offline;
  425}
  426
  427static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
  428{
  429	mutex_lock(&ctrl_info->ofa_mutex);
  430}
  431
  432static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
  433{
  434	mutex_unlock(&ctrl_info->ofa_mutex);
  435}
  436
  437static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
  438{
  439	mutex_lock(&ctrl_info->ofa_mutex);
  440	mutex_unlock(&ctrl_info->ofa_mutex);
  441}
  442
  443static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
  444{
  445	return mutex_is_locked(&ctrl_info->ofa_mutex);
  446}
  447
  448static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
  449{
  450	device->in_remove = true;
  451}
  452
  453static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
  454{
  455	return device->in_remove;
  456}
  457
  458static inline int pqi_event_type_to_event_index(unsigned int event_type)
  459{
  460	int index;
  461
  462	for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
  463		if (event_type == pqi_supported_event_types[index])
  464			return index;
  465
  466	return -1;
  467}
  468
  469static inline bool pqi_is_supported_event(unsigned int event_type)
  470{
  471	return pqi_event_type_to_event_index(event_type) != -1;
  472}
  473
  474static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
  475	unsigned long delay)
  476{
  477	if (pqi_ctrl_offline(ctrl_info))
  478		return;
  479
  480	schedule_delayed_work(&ctrl_info->rescan_work, delay);
  481}
  482
  483static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
  484{
  485	pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
  486}
  487
  488#define PQI_RESCAN_WORK_DELAY	(10 * HZ)
  489
  490static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
  491{
  492	pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
  493}
  494
  495static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
  496{
  497	cancel_delayed_work_sync(&ctrl_info->rescan_work);
  498}
  499
  500static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
  501{
  502	if (!ctrl_info->heartbeat_counter)
  503		return 0;
  504
  505	return readl(ctrl_info->heartbeat_counter);
  506}
  507
  508static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
  509{
  510	return readb(ctrl_info->soft_reset_status);
  511}
  512
  513static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
  514{
  515	u8 status;
  516
  517	status = pqi_read_soft_reset_status(ctrl_info);
  518	status &= ~PQI_SOFT_RESET_ABORT;
  519	writeb(status, ctrl_info->soft_reset_status);
  520}
  521
  522static int pqi_map_single(struct pci_dev *pci_dev,
  523	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
  524	size_t buffer_length, enum dma_data_direction data_direction)
  525{
  526	dma_addr_t bus_address;
  527
  528	if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
  529		return 0;
  530
  531	bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
  532		data_direction);
  533	if (dma_mapping_error(&pci_dev->dev, bus_address))
  534		return -ENOMEM;
  535
  536	put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
  537	put_unaligned_le32(buffer_length, &sg_descriptor->length);
  538	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  539
  540	return 0;
  541}
  542
  543static void pqi_pci_unmap(struct pci_dev *pci_dev,
  544	struct pqi_sg_descriptor *descriptors, int num_descriptors,
  545	enum dma_data_direction data_direction)
  546{
  547	int i;
  548
  549	if (data_direction == DMA_NONE)
  550		return;
  551
  552	for (i = 0; i < num_descriptors; i++)
  553		dma_unmap_single(&pci_dev->dev,
  554			(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
  555			get_unaligned_le32(&descriptors[i].length),
  556			data_direction);
  557}
  558
  559static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
  560	struct pqi_raid_path_request *request, u8 cmd,
  561	u8 *scsi3addr, void *buffer, size_t buffer_length,
  562	u16 vpd_page, enum dma_data_direction *dir)
  563{
  564	u8 *cdb;
  565	size_t cdb_length = buffer_length;
  566
  567	memset(request, 0, sizeof(*request));
  568
  569	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  570	put_unaligned_le16(offsetof(struct pqi_raid_path_request,
  571		sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
  572		&request->header.iu_length);
  573	put_unaligned_le32(buffer_length, &request->buffer_length);
  574	memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
  575	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  576	request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
  577
  578	cdb = request->cdb;
  579
  580	switch (cmd) {
  581	case TEST_UNIT_READY:
  582		request->data_direction = SOP_READ_FLAG;
  583		cdb[0] = TEST_UNIT_READY;
  584		break;
  585	case INQUIRY:
  586		request->data_direction = SOP_READ_FLAG;
  587		cdb[0] = INQUIRY;
  588		if (vpd_page & VPD_PAGE) {
  589			cdb[1] = 0x1;
  590			cdb[2] = (u8)vpd_page;
  591		}
  592		cdb[4] = (u8)cdb_length;
  593		break;
  594	case CISS_REPORT_LOG:
  595	case CISS_REPORT_PHYS:
  596		request->data_direction = SOP_READ_FLAG;
  597		cdb[0] = cmd;
  598		if (cmd == CISS_REPORT_PHYS) {
  599			if (ctrl_info->rpl_extended_format_4_5_supported)
  600				cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
  601			else
  602				cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
  603		} else {
  604			cdb[1] = ctrl_info->ciss_report_log_flags;
  605		}
  606		put_unaligned_be32(cdb_length, &cdb[6]);
  607		break;
  608	case CISS_GET_RAID_MAP:
  609		request->data_direction = SOP_READ_FLAG;
  610		cdb[0] = CISS_READ;
  611		cdb[1] = CISS_GET_RAID_MAP;
  612		put_unaligned_be32(cdb_length, &cdb[6]);
  613		break;
  614	case SA_FLUSH_CACHE:
  615		request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
  616		request->data_direction = SOP_WRITE_FLAG;
  617		cdb[0] = BMIC_WRITE;
  618		cdb[6] = BMIC_FLUSH_CACHE;
  619		put_unaligned_be16(cdb_length, &cdb[7]);
  620		break;
  621	case BMIC_SENSE_DIAG_OPTIONS:
  622		cdb_length = 0;
  623		fallthrough;
  624	case BMIC_IDENTIFY_CONTROLLER:
  625	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
  626	case BMIC_SENSE_SUBSYSTEM_INFORMATION:
  627	case BMIC_SENSE_FEATURE:
  628		request->data_direction = SOP_READ_FLAG;
  629		cdb[0] = BMIC_READ;
  630		cdb[6] = cmd;
  631		put_unaligned_be16(cdb_length, &cdb[7]);
  632		break;
  633	case BMIC_SET_DIAG_OPTIONS:
  634		cdb_length = 0;
  635		fallthrough;
  636	case BMIC_WRITE_HOST_WELLNESS:
  637		request->data_direction = SOP_WRITE_FLAG;
  638		cdb[0] = BMIC_WRITE;
  639		cdb[6] = cmd;
  640		put_unaligned_be16(cdb_length, &cdb[7]);
  641		break;
  642	case BMIC_CSMI_PASSTHRU:
  643		request->data_direction = SOP_BIDIRECTIONAL;
  644		cdb[0] = BMIC_WRITE;
  645		cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
  646		cdb[6] = cmd;
  647		put_unaligned_be16(cdb_length, &cdb[7]);
  648		break;
  649	default:
  650		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
  651		break;
  652	}
  653
  654	switch (request->data_direction) {
  655	case SOP_READ_FLAG:
  656		*dir = DMA_FROM_DEVICE;
  657		break;
  658	case SOP_WRITE_FLAG:
  659		*dir = DMA_TO_DEVICE;
  660		break;
  661	case SOP_NO_DIRECTION_FLAG:
  662		*dir = DMA_NONE;
  663		break;
  664	default:
  665		*dir = DMA_BIDIRECTIONAL;
  666		break;
  667	}
  668
  669	return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
  670		buffer, buffer_length, *dir);
  671}
  672
  673static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
  674{
  675	io_request->scmd = NULL;
  676	io_request->status = 0;
  677	io_request->error_info = NULL;
  678	io_request->raid_bypass = false;
  679}
  680
  681static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
  682{
  683	struct pqi_io_request *io_request;
  684	u16 i;
  685
  686	if (scmd) { /* SML I/O request */
  687		u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
  688
  689		i = blk_mq_unique_tag_to_tag(blk_tag);
  690		io_request = &ctrl_info->io_request_pool[i];
  691		if (atomic_inc_return(&io_request->refcount) > 1) {
  692			atomic_dec(&io_request->refcount);
  693			return NULL;
  694		}
  695	} else { /* IOCTL or driver internal request */
  696		/*
  697		 * benignly racy - may have to wait for an open slot.
  698		 * command slot range is scsi_ml_can_queue -
  699		 *         [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
  700		 */
  701		i = 0;
  702		while (1) {
  703			io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
  704			if (atomic_inc_return(&io_request->refcount) == 1)
  705				break;
  706			atomic_dec(&io_request->refcount);
  707			i = (i + 1) % PQI_RESERVED_IO_SLOTS;
  708		}
  709	}
  710
  711	pqi_reinit_io_request(io_request);
  712
  713	return io_request;
  714}
  715
  716static void pqi_free_io_request(struct pqi_io_request *io_request)
  717{
  718	atomic_dec(&io_request->refcount);
  719}
  720
  721static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
  722	u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
  723	struct pqi_raid_error_info *error_info)
  724{
  725	int rc;
  726	struct pqi_raid_path_request request;
  727	enum dma_data_direction dir;
  728
  729	rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
  730		buffer, buffer_length, vpd_page, &dir);
  731	if (rc)
  732		return rc;
  733
  734	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
  735
  736	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
  737
  738	return rc;
  739}
  740
  741/* helper functions for pqi_send_scsi_raid_request */
  742
  743static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
  744	u8 cmd, void *buffer, size_t buffer_length)
  745{
  746	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
  747		buffer, buffer_length, 0, NULL);
  748}
  749
  750static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
  751	u8 cmd, void *buffer, size_t buffer_length,
  752	struct pqi_raid_error_info *error_info)
  753{
  754	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
  755		buffer, buffer_length, 0, error_info);
  756}
  757
  758static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
  759	struct bmic_identify_controller *buffer)
  760{
  761	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
  762		buffer, sizeof(*buffer));
  763}
  764
  765static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
  766	struct bmic_sense_subsystem_info *sense_info)
  767{
  768	return pqi_send_ctrl_raid_request(ctrl_info,
  769		BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
  770		sizeof(*sense_info));
  771}
  772
  773static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
  774	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
  775{
  776	return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
  777		buffer, buffer_length, vpd_page, NULL);
  778}
  779
  780static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
  781	struct pqi_scsi_dev *device,
  782	struct bmic_identify_physical_device *buffer, size_t buffer_length)
  783{
  784	int rc;
  785	enum dma_data_direction dir;
  786	u16 bmic_device_index;
  787	struct pqi_raid_path_request request;
  788
  789	rc = pqi_build_raid_path_request(ctrl_info, &request,
  790		BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
  791		buffer_length, 0, &dir);
  792	if (rc)
  793		return rc;
  794
  795	bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
  796	request.cdb[2] = (u8)bmic_device_index;
  797	request.cdb[9] = (u8)(bmic_device_index >> 8);
  798
  799	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  800
  801	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
  802
  803	return rc;
  804}
  805
  806static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
  807{
  808	u32 bytes;
  809
  810	bytes = get_unaligned_le16(limit);
  811	if (bytes == 0)
  812		bytes = ~0;
  813	else
  814		bytes *= 1024;
  815
  816	return bytes;
  817}
  818
  819#pragma pack(1)
  820
  821struct bmic_sense_feature_buffer {
  822	struct bmic_sense_feature_buffer_header header;
  823	struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
  824};
  825
  826#pragma pack()
  827
  828#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH	\
  829	offsetofend(struct bmic_sense_feature_buffer, \
  830		aio_subpage.max_write_raid_1_10_3drive)
  831
  832#define MINIMUM_AIO_SUBPAGE_LENGTH	\
  833	(offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
  834		max_write_raid_1_10_3drive) - \
  835		sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
  836
  837static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
  838{
  839	int rc;
  840	enum dma_data_direction dir;
  841	struct pqi_raid_path_request request;
  842	struct bmic_sense_feature_buffer *buffer;
  843
  844	buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
  845	if (!buffer)
  846		return -ENOMEM;
  847
  848	rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
  849		buffer, sizeof(*buffer), 0, &dir);
  850	if (rc)
  851		goto error;
  852
  853	request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
  854	request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
  855
  856	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  857
  858	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
  859
  860	if (rc)
  861		goto error;
  862
  863	if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
  864		buffer->header.subpage_code !=
  865			BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
  866		get_unaligned_le16(&buffer->header.buffer_length) <
  867			MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
  868		buffer->aio_subpage.header.page_code !=
  869			BMIC_SENSE_FEATURE_IO_PAGE ||
  870		buffer->aio_subpage.header.subpage_code !=
  871			BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
  872		get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
  873			MINIMUM_AIO_SUBPAGE_LENGTH) {
  874		goto error;
  875	}
  876
  877	ctrl_info->max_transfer_encrypted_sas_sata =
  878		pqi_aio_limit_to_bytes(
  879			&buffer->aio_subpage.max_transfer_encrypted_sas_sata);
  880
  881	ctrl_info->max_transfer_encrypted_nvme =
  882		pqi_aio_limit_to_bytes(
  883			&buffer->aio_subpage.max_transfer_encrypted_nvme);
  884
  885	ctrl_info->max_write_raid_5_6 =
  886		pqi_aio_limit_to_bytes(
  887			&buffer->aio_subpage.max_write_raid_5_6);
  888
  889	ctrl_info->max_write_raid_1_10_2drive =
  890		pqi_aio_limit_to_bytes(
  891			&buffer->aio_subpage.max_write_raid_1_10_2drive);
  892
  893	ctrl_info->max_write_raid_1_10_3drive =
  894		pqi_aio_limit_to_bytes(
  895			&buffer->aio_subpage.max_write_raid_1_10_3drive);
  896
  897error:
  898	kfree(buffer);
  899
  900	return rc;
  901}
  902
  903static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
  904	enum bmic_flush_cache_shutdown_event shutdown_event)
  905{
  906	int rc;
  907	struct bmic_flush_cache *flush_cache;
  908
  909	flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
  910	if (!flush_cache)
  911		return -ENOMEM;
  912
  913	flush_cache->shutdown_event = shutdown_event;
  914
  915	rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
  916		sizeof(*flush_cache));
  917
  918	kfree(flush_cache);
  919
  920	return rc;
  921}
  922
  923int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
  924	struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
  925	struct pqi_raid_error_info *error_info)
  926{
  927	return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
  928		buffer, buffer_length, error_info);
  929}
  930
  931#define PQI_FETCH_PTRAID_DATA		(1 << 31)
  932
  933static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
  934{
  935	int rc;
  936	struct bmic_diag_options *diag;
  937
  938	diag = kzalloc(sizeof(*diag), GFP_KERNEL);
  939	if (!diag)
  940		return -ENOMEM;
  941
  942	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
  943		diag, sizeof(*diag));
  944	if (rc)
  945		goto out;
  946
  947	diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
  948
  949	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
  950		sizeof(*diag));
  951
  952out:
  953	kfree(diag);
  954
  955	return rc;
  956}
  957
  958static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
  959	void *buffer, size_t buffer_length)
  960{
  961	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
  962		buffer, buffer_length);
  963}
  964
  965#pragma pack(1)
  966
  967struct bmic_host_wellness_driver_version {
  968	u8	start_tag[4];
  969	u8	driver_version_tag[2];
  970	__le16	driver_version_length;
  971	char	driver_version[32];
  972	u8	dont_write_tag[2];
  973	u8	end_tag[2];
  974};
  975
  976#pragma pack()
  977
  978static int pqi_write_driver_version_to_host_wellness(
  979	struct pqi_ctrl_info *ctrl_info)
  980{
  981	int rc;
  982	struct bmic_host_wellness_driver_version *buffer;
  983	size_t buffer_length;
  984
  985	buffer_length = sizeof(*buffer);
  986
  987	buffer = kmalloc(buffer_length, GFP_KERNEL);
  988	if (!buffer)
  989		return -ENOMEM;
  990
  991	buffer->start_tag[0] = '<';
  992	buffer->start_tag[1] = 'H';
  993	buffer->start_tag[2] = 'W';
  994	buffer->start_tag[3] = '>';
  995	buffer->driver_version_tag[0] = 'D';
  996	buffer->driver_version_tag[1] = 'V';
  997	put_unaligned_le16(sizeof(buffer->driver_version),
  998		&buffer->driver_version_length);
  999	strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
 1000		sizeof(buffer->driver_version) - 1);
 1001	buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
 1002	buffer->dont_write_tag[0] = 'D';
 1003	buffer->dont_write_tag[1] = 'W';
 1004	buffer->end_tag[0] = 'Z';
 1005	buffer->end_tag[1] = 'Z';
 1006
 1007	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
 1008
 1009	kfree(buffer);
 1010
 1011	return rc;
 1012}
 1013
 1014#pragma pack(1)
 1015
 1016struct bmic_host_wellness_time {
 1017	u8	start_tag[4];
 1018	u8	time_tag[2];
 1019	__le16	time_length;
 1020	u8	time[8];
 1021	u8	dont_write_tag[2];
 1022	u8	end_tag[2];
 1023};
 1024
 1025#pragma pack()
 1026
 1027static int pqi_write_current_time_to_host_wellness(
 1028	struct pqi_ctrl_info *ctrl_info)
 1029{
 1030	int rc;
 1031	struct bmic_host_wellness_time *buffer;
 1032	size_t buffer_length;
 1033	time64_t local_time;
 1034	unsigned int year;
 1035	struct tm tm;
 1036
 1037	buffer_length = sizeof(*buffer);
 1038
 1039	buffer = kmalloc(buffer_length, GFP_KERNEL);
 1040	if (!buffer)
 1041		return -ENOMEM;
 1042
 1043	buffer->start_tag[0] = '<';
 1044	buffer->start_tag[1] = 'H';
 1045	buffer->start_tag[2] = 'W';
 1046	buffer->start_tag[3] = '>';
 1047	buffer->time_tag[0] = 'T';
 1048	buffer->time_tag[1] = 'D';
 1049	put_unaligned_le16(sizeof(buffer->time),
 1050		&buffer->time_length);
 1051
 1052	local_time = ktime_get_real_seconds();
 1053	time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
 1054	year = tm.tm_year + 1900;
 1055
 1056	buffer->time[0] = bin2bcd(tm.tm_hour);
 1057	buffer->time[1] = bin2bcd(tm.tm_min);
 1058	buffer->time[2] = bin2bcd(tm.tm_sec);
 1059	buffer->time[3] = 0;
 1060	buffer->time[4] = bin2bcd(tm.tm_mon + 1);
 1061	buffer->time[5] = bin2bcd(tm.tm_mday);
 1062	buffer->time[6] = bin2bcd(year / 100);
 1063	buffer->time[7] = bin2bcd(year % 100);
 1064
 1065	buffer->dont_write_tag[0] = 'D';
 1066	buffer->dont_write_tag[1] = 'W';
 1067	buffer->end_tag[0] = 'Z';
 1068	buffer->end_tag[1] = 'Z';
 1069
 1070	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
 1071
 1072	kfree(buffer);
 1073
 1074	return rc;
 1075}
 1076
 1077#define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * HZ)
 1078
 1079static void pqi_update_time_worker(struct work_struct *work)
 1080{
 1081	int rc;
 1082	struct pqi_ctrl_info *ctrl_info;
 1083
 1084	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
 1085		update_time_work);
 1086
 1087	rc = pqi_write_current_time_to_host_wellness(ctrl_info);
 1088	if (rc)
 1089		dev_warn(&ctrl_info->pci_dev->dev,
 1090			"error updating time on controller\n");
 1091
 1092	schedule_delayed_work(&ctrl_info->update_time_work,
 1093		PQI_UPDATE_TIME_WORK_INTERVAL);
 1094}
 1095
 1096static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
 1097{
 1098	schedule_delayed_work(&ctrl_info->update_time_work, 0);
 1099}
 1100
 1101static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
 1102{
 1103	cancel_delayed_work_sync(&ctrl_info->update_time_work);
 1104}
 1105
 1106static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
 1107	size_t buffer_length)
 1108{
 1109	return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
 1110}
 1111
 1112static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
 1113{
 1114	int rc;
 1115	size_t lun_list_length;
 1116	size_t lun_data_length;
 1117	size_t new_lun_list_length;
 1118	void *lun_data = NULL;
 1119	struct report_lun_header *report_lun_header;
 1120
 1121	report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
 1122	if (!report_lun_header) {
 1123		rc = -ENOMEM;
 1124		goto out;
 1125	}
 1126
 1127	rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
 1128	if (rc)
 1129		goto out;
 1130
 1131	lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
 1132
 1133again:
 1134	lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
 1135
 1136	lun_data = kmalloc(lun_data_length, GFP_KERNEL);
 1137	if (!lun_data) {
 1138		rc = -ENOMEM;
 1139		goto out;
 1140	}
 1141
 1142	if (lun_list_length == 0) {
 1143		memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
 1144		goto out;
 1145	}
 1146
 1147	rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
 1148	if (rc)
 1149		goto out;
 1150
 1151	new_lun_list_length =
 1152		get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
 1153
 1154	if (new_lun_list_length > lun_list_length) {
 1155		lun_list_length = new_lun_list_length;
 1156		kfree(lun_data);
 1157		goto again;
 1158	}
 1159
 1160out:
 1161	kfree(report_lun_header);
 1162
 1163	if (rc) {
 1164		kfree(lun_data);
 1165		lun_data = NULL;
 1166	}
 1167
 1168	*buffer = lun_data;
 1169
 1170	return rc;
 1171}
 1172
 1173static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
 1174{
 1175	int rc;
 1176	unsigned int i;
 1177	u8 rpl_response_format;
 1178	u32 num_physicals;
 1179	size_t rpl_16byte_wwid_list_length;
 1180	void *rpl_list;
 1181	struct report_lun_header *rpl_header;
 1182	struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
 1183	struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
 1184
 1185	rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
 1186	if (rc)
 1187		return rc;
 1188
 1189	if (ctrl_info->rpl_extended_format_4_5_supported) {
 1190		rpl_header = rpl_list;
 1191		rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
 1192		if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
 1193			*buffer = rpl_list;
 1194			return 0;
 1195		} else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
 1196			dev_err(&ctrl_info->pci_dev->dev,
 1197				"RPL returned unsupported data format %u\n",
 1198				rpl_response_format);
 1199			return -EINVAL;
 1200		} else {
 1201			dev_warn(&ctrl_info->pci_dev->dev,
 1202				"RPL returned extended format 2 instead of 4\n");
 1203		}
 1204	}
 1205
 1206	rpl_8byte_wwid_list = rpl_list;
 1207	num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
 1208	rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
 1209
 1210	rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
 1211	if (!rpl_16byte_wwid_list)
 1212		return -ENOMEM;
 1213
 1214	put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
 1215		&rpl_16byte_wwid_list->header.list_length);
 1216	rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
 1217
 1218	for (i = 0; i < num_physicals; i++) {
 1219		memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
 1220		memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
 1221		memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
 1222		rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
 1223		rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
 1224		rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
 1225		rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
 1226		rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
 1227	}
 1228
 1229	kfree(rpl_8byte_wwid_list);
 1230	*buffer = rpl_16byte_wwid_list;
 1231
 1232	return 0;
 1233}
 1234
 1235static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
 1236{
 1237	return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
 1238}
 1239
 1240static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
 1241	struct report_phys_lun_16byte_wwid_list **physdev_list,
 1242	struct report_log_lun_list **logdev_list)
 1243{
 1244	int rc;
 1245	size_t logdev_list_length;
 1246	size_t logdev_data_length;
 1247	struct report_log_lun_list *internal_logdev_list;
 1248	struct report_log_lun_list *logdev_data;
 1249	struct report_lun_header report_lun_header;
 1250
 1251	rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
 1252	if (rc)
 1253		dev_err(&ctrl_info->pci_dev->dev,
 1254			"report physical LUNs failed\n");
 1255
 1256	rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
 1257	if (rc)
 1258		dev_err(&ctrl_info->pci_dev->dev,
 1259			"report logical LUNs failed\n");
 1260
 1261	/*
 1262	 * Tack the controller itself onto the end of the logical device list.
 1263	 */
 1264
 1265	logdev_data = *logdev_list;
 1266
 1267	if (logdev_data) {
 1268		logdev_list_length =
 1269			get_unaligned_be32(&logdev_data->header.list_length);
 1270	} else {
 1271		memset(&report_lun_header, 0, sizeof(report_lun_header));
 1272		logdev_data =
 1273			(struct report_log_lun_list *)&report_lun_header;
 1274		logdev_list_length = 0;
 1275	}
 1276
 1277	logdev_data_length = sizeof(struct report_lun_header) +
 1278		logdev_list_length;
 1279
 1280	internal_logdev_list = kmalloc(logdev_data_length +
 1281		sizeof(struct report_log_lun), GFP_KERNEL);
 1282	if (!internal_logdev_list) {
 1283		kfree(*logdev_list);
 1284		*logdev_list = NULL;
 1285		return -ENOMEM;
 1286	}
 1287
 1288	memcpy(internal_logdev_list, logdev_data, logdev_data_length);
 1289	memset((u8 *)internal_logdev_list + logdev_data_length, 0,
 1290		sizeof(struct report_log_lun));
 1291	put_unaligned_be32(logdev_list_length +
 1292		sizeof(struct report_log_lun),
 1293		&internal_logdev_list->header.list_length);
 1294
 1295	kfree(*logdev_list);
 1296	*logdev_list = internal_logdev_list;
 1297
 1298	return 0;
 1299}
 1300
 1301static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
 1302	int bus, int target, int lun)
 1303{
 1304	device->bus = bus;
 1305	device->target = target;
 1306	device->lun = lun;
 1307}
 1308
 1309static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
 1310{
 1311	u8 *scsi3addr;
 1312	u32 lunid;
 1313	int bus;
 1314	int target;
 1315	int lun;
 1316
 1317	scsi3addr = device->scsi3addr;
 1318	lunid = get_unaligned_le32(scsi3addr);
 1319
 1320	if (pqi_is_hba_lunid(scsi3addr)) {
 1321		/* The specified device is the controller. */
 1322		pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
 1323		device->target_lun_valid = true;
 1324		return;
 1325	}
 1326
 1327	if (pqi_is_logical_device(device)) {
 1328		if (device->is_external_raid_device) {
 1329			bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
 1330			target = (lunid >> 16) & 0x3fff;
 1331			lun = lunid & 0xff;
 1332		} else {
 1333			bus = PQI_RAID_VOLUME_BUS;
 1334			target = 0;
 1335			lun = lunid & 0x3fff;
 1336		}
 1337		pqi_set_bus_target_lun(device, bus, target, lun);
 1338		device->target_lun_valid = true;
 1339		return;
 1340	}
 1341
 1342	/*
 1343	 * Defer target and LUN assignment for non-controller physical devices
 1344	 * because the SAS transport layer will make these assignments later.
 1345	 */
 1346	pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
 1347}
 1348
 1349static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
 1350	struct pqi_scsi_dev *device)
 1351{
 1352	int rc;
 1353	u8 raid_level;
 1354	u8 *buffer;
 1355
 1356	raid_level = SA_RAID_UNKNOWN;
 1357
 1358	buffer = kmalloc(64, GFP_KERNEL);
 1359	if (buffer) {
 1360		rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
 1361			VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
 1362		if (rc == 0) {
 1363			raid_level = buffer[8];
 1364			if (raid_level > SA_RAID_MAX)
 1365				raid_level = SA_RAID_UNKNOWN;
 1366		}
 1367		kfree(buffer);
 1368	}
 1369
 1370	device->raid_level = raid_level;
 1371}
 1372
 1373static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
 1374	struct pqi_scsi_dev *device, struct raid_map *raid_map)
 1375{
 1376	char *err_msg;
 1377	u32 raid_map_size;
 1378	u32 r5or6_blocks_per_row;
 1379
 1380	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
 1381
 1382	if (raid_map_size < offsetof(struct raid_map, disk_data)) {
 1383		err_msg = "RAID map too small";
 1384		goto bad_raid_map;
 1385	}
 1386
 1387	if (device->raid_level == SA_RAID_1) {
 1388		if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
 1389			err_msg = "invalid RAID-1 map";
 1390			goto bad_raid_map;
 1391		}
 1392	} else if (device->raid_level == SA_RAID_TRIPLE) {
 1393		if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
 1394			err_msg = "invalid RAID-1(Triple) map";
 1395			goto bad_raid_map;
 1396		}
 1397	} else if ((device->raid_level == SA_RAID_5 ||
 1398		device->raid_level == SA_RAID_6) &&
 1399		get_unaligned_le16(&raid_map->layout_map_count) > 1) {
 1400		/* RAID 50/60 */
 1401		r5or6_blocks_per_row =
 1402			get_unaligned_le16(&raid_map->strip_size) *
 1403			get_unaligned_le16(&raid_map->data_disks_per_row);
 1404		if (r5or6_blocks_per_row == 0) {
 1405			err_msg = "invalid RAID-5 or RAID-6 map";
 1406			goto bad_raid_map;
 1407		}
 1408	}
 1409
 1410	return 0;
 1411
 1412bad_raid_map:
 1413	dev_warn(&ctrl_info->pci_dev->dev,
 1414		"logical device %08x%08x %s\n",
 1415		*((u32 *)&device->scsi3addr),
 1416		*((u32 *)&device->scsi3addr[4]), err_msg);
 1417
 1418	return -EINVAL;
 1419}
 1420
 1421static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
 1422	struct pqi_scsi_dev *device)
 1423{
 1424	int rc;
 1425	u32 raid_map_size;
 1426	struct raid_map *raid_map;
 1427
 1428	raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
 1429	if (!raid_map)
 1430		return -ENOMEM;
 1431
 1432	rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
 1433		device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
 1434	if (rc)
 1435		goto error;
 1436
 1437	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
 1438
 1439	if (raid_map_size > sizeof(*raid_map)) {
 1440
 1441		kfree(raid_map);
 1442
 1443		raid_map = kmalloc(raid_map_size, GFP_KERNEL);
 1444		if (!raid_map)
 1445			return -ENOMEM;
 1446
 1447		rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
 1448			device->scsi3addr, raid_map, raid_map_size, 0, NULL);
 1449		if (rc)
 1450			goto error;
 1451
 1452		if (get_unaligned_le32(&raid_map->structure_size)
 1453			!= raid_map_size) {
 1454			dev_warn(&ctrl_info->pci_dev->dev,
 1455				"requested %u bytes, received %u bytes\n",
 1456				raid_map_size,
 1457				get_unaligned_le32(&raid_map->structure_size));
 1458			rc = -EINVAL;
 1459			goto error;
 1460		}
 1461	}
 1462
 1463	rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
 1464	if (rc)
 1465		goto error;
 1466
 1467	device->raid_map = raid_map;
 1468
 1469	return 0;
 1470
 1471error:
 1472	kfree(raid_map);
 1473
 1474	return rc;
 1475}
 1476
 1477static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
 1478	struct pqi_scsi_dev *device)
 1479{
 1480	if (!ctrl_info->lv_drive_type_mix_valid) {
 1481		device->max_transfer_encrypted = ~0;
 1482		return;
 1483	}
 1484
 1485	switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
 1486	case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
 1487	case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
 1488	case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
 1489	case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
 1490	case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
 1491	case LV_DRIVE_TYPE_MIX_SAS_ONLY:
 1492	case LV_DRIVE_TYPE_MIX_SATA_ONLY:
 1493		device->max_transfer_encrypted =
 1494			ctrl_info->max_transfer_encrypted_sas_sata;
 1495		break;
 1496	case LV_DRIVE_TYPE_MIX_NVME_ONLY:
 1497		device->max_transfer_encrypted =
 1498			ctrl_info->max_transfer_encrypted_nvme;
 1499		break;
 1500	case LV_DRIVE_TYPE_MIX_UNKNOWN:
 1501	case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
 1502	default:
 1503		device->max_transfer_encrypted =
 1504			min(ctrl_info->max_transfer_encrypted_sas_sata,
 1505				ctrl_info->max_transfer_encrypted_nvme);
 1506		break;
 1507	}
 1508}
 1509
 1510static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
 1511	struct pqi_scsi_dev *device)
 1512{
 1513	int rc;
 1514	u8 *buffer;
 1515	u8 bypass_status;
 1516
 1517	buffer = kmalloc(64, GFP_KERNEL);
 1518	if (!buffer)
 1519		return;
 1520
 1521	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
 1522		VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
 1523	if (rc)
 1524		goto out;
 1525
 1526#define RAID_BYPASS_STATUS		4
 1527#define RAID_BYPASS_CONFIGURED		0x1
 1528#define RAID_BYPASS_ENABLED		0x2
 1529
 1530	bypass_status = buffer[RAID_BYPASS_STATUS];
 1531	device->raid_bypass_configured =
 1532		(bypass_status & RAID_BYPASS_CONFIGURED) != 0;
 1533	if (device->raid_bypass_configured &&
 1534		(bypass_status & RAID_BYPASS_ENABLED) &&
 1535		pqi_get_raid_map(ctrl_info, device) == 0) {
 1536		device->raid_bypass_enabled = true;
 1537		if (get_unaligned_le16(&device->raid_map->flags) &
 1538			RAID_MAP_ENCRYPTION_ENABLED)
 1539			pqi_set_max_transfer_encrypted(ctrl_info, device);
 1540	}
 1541
 1542out:
 1543	kfree(buffer);
 1544}
 1545
 1546/*
 1547 * Use vendor-specific VPD to determine online/offline status of a volume.
 1548 */
 1549
 1550static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
 1551	struct pqi_scsi_dev *device)
 1552{
 1553	int rc;
 1554	size_t page_length;
 1555	u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
 1556	bool volume_offline = true;
 1557	u32 volume_flags;
 1558	struct ciss_vpd_logical_volume_status *vpd;
 1559
 1560	vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
 1561	if (!vpd)
 1562		goto no_buffer;
 1563
 1564	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
 1565		VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
 1566	if (rc)
 1567		goto out;
 1568
 1569	if (vpd->page_code != CISS_VPD_LV_STATUS)
 1570		goto out;
 1571
 1572	page_length = offsetof(struct ciss_vpd_logical_volume_status,
 1573		volume_status) + vpd->page_length;
 1574	if (page_length < sizeof(*vpd))
 1575		goto out;
 1576
 1577	volume_status = vpd->volume_status;
 1578	volume_flags = get_unaligned_be32(&vpd->flags);
 1579	volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
 1580
 1581out:
 1582	kfree(vpd);
 1583no_buffer:
 1584	device->volume_status = volume_status;
 1585	device->volume_offline = volume_offline;
 1586}
 1587
 1588#define PQI_DEVICE_NCQ_PRIO_SUPPORTED	0x01
 1589#define PQI_DEVICE_PHY_MAP_SUPPORTED	0x10
 1590
 1591static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
 1592	struct pqi_scsi_dev *device,
 1593	struct bmic_identify_physical_device *id_phys)
 1594{
 1595	int rc;
 1596
 1597	memset(id_phys, 0, sizeof(*id_phys));
 1598
 1599	rc = pqi_identify_physical_device(ctrl_info, device,
 1600		id_phys, sizeof(*id_phys));
 1601	if (rc) {
 1602		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
 1603		return rc;
 1604	}
 1605
 1606	scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
 1607	scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
 1608
 1609	memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
 1610	memcpy(device->model, &id_phys->model[8], sizeof(device->model));
 1611
 1612	device->box_index = id_phys->box_index;
 1613	device->phys_box_on_bus = id_phys->phys_box_on_bus;
 1614	device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
 1615	device->queue_depth =
 1616		get_unaligned_le16(&id_phys->current_queue_depth_limit);
 1617	device->active_path_index = id_phys->active_path_number;
 1618	device->path_map = id_phys->redundant_path_present_map;
 1619	memcpy(&device->box,
 1620		&id_phys->alternate_paths_phys_box_on_port,
 1621		sizeof(device->box));
 1622	memcpy(&device->phys_connector,
 1623		&id_phys->alternate_paths_phys_connector,
 1624		sizeof(device->phys_connector));
 1625	device->bay = id_phys->phys_bay_in_box;
 1626	device->lun_count = id_phys->multi_lun_device_lun_count;
 1627	if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
 1628		id_phys->phy_count)
 1629		device->phy_id =
 1630			id_phys->phy_to_phy_map[device->active_path_index];
 1631	else
 1632		device->phy_id = 0xFF;
 1633
 1634	device->ncq_prio_support =
 1635		((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
 1636		PQI_DEVICE_NCQ_PRIO_SUPPORTED);
 1637
 1638	return 0;
 1639}
 1640
 1641static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
 1642	struct pqi_scsi_dev *device)
 1643{
 1644	int rc;
 1645	u8 *buffer;
 1646
 1647	buffer = kmalloc(64, GFP_KERNEL);
 1648	if (!buffer)
 1649		return -ENOMEM;
 1650
 1651	/* Send an inquiry to the device to see what it is. */
 1652	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
 1653	if (rc)
 1654		goto out;
 1655
 1656	scsi_sanitize_inquiry_string(&buffer[8], 8);
 1657	scsi_sanitize_inquiry_string(&buffer[16], 16);
 1658
 1659	device->devtype = buffer[0] & 0x1f;
 1660	memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
 1661	memcpy(device->model, &buffer[16], sizeof(device->model));
 1662
 1663	if (device->devtype == TYPE_DISK) {
 1664		if (device->is_external_raid_device) {
 1665			device->raid_level = SA_RAID_UNKNOWN;
 1666			device->volume_status = CISS_LV_OK;
 1667			device->volume_offline = false;
 1668		} else {
 1669			pqi_get_raid_level(ctrl_info, device);
 1670			pqi_get_raid_bypass_status(ctrl_info, device);
 1671			pqi_get_volume_status(ctrl_info, device);
 1672		}
 1673	}
 1674
 1675out:
 1676	kfree(buffer);
 1677
 1678	return rc;
 1679}
 1680
 1681/*
 1682 * Prevent adding drive to OS for some corner cases such as a drive
 1683 * undergoing a sanitize operation. Some OSes will continue to poll
 1684 * the drive until the sanitize completes, which can take hours,
 1685 * resulting in long bootup delays. Commands such as TUR, READ_CAP
 1686 * are allowed, but READ/WRITE cause check condition. So the OS
 1687 * cannot check/read the partition table.
 1688 * Note: devices that have completed sanitize must be re-enabled
 1689 *       using the management utility.
 1690 */
 1691static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
 1692	struct pqi_scsi_dev *device)
 1693{
 1694	u8 scsi_status;
 1695	int rc;
 1696	enum dma_data_direction dir;
 1697	char *buffer;
 1698	int buffer_length = 64;
 1699	size_t sense_data_length;
 1700	struct scsi_sense_hdr sshdr;
 1701	struct pqi_raid_path_request request;
 1702	struct pqi_raid_error_info error_info;
 1703	bool offline = false; /* Assume keep online */
 1704
 1705	/* Do not check controllers. */
 1706	if (pqi_is_hba_lunid(device->scsi3addr))
 1707		return false;
 1708
 1709	/* Do not check LVs. */
 1710	if (pqi_is_logical_device(device))
 1711		return false;
 1712
 1713	buffer = kmalloc(buffer_length, GFP_KERNEL);
 1714	if (!buffer)
 1715		return false; /* Assume not offline */
 1716
 1717	/* Check for SANITIZE in progress using TUR */
 1718	rc = pqi_build_raid_path_request(ctrl_info, &request,
 1719		TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
 1720		buffer_length, 0, &dir);
 1721	if (rc)
 1722		goto out; /* Assume not offline */
 1723
 1724	memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
 1725
 1726	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
 1727
 1728	if (rc)
 1729		goto out; /* Assume not offline */
 1730
 1731	scsi_status = error_info.status;
 1732	sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
 1733	if (sense_data_length == 0)
 1734		sense_data_length =
 1735			get_unaligned_le16(&error_info.response_data_length);
 1736	if (sense_data_length) {
 1737		if (sense_data_length > sizeof(error_info.data))
 1738			sense_data_length = sizeof(error_info.data);
 1739
 1740		/*
 1741		 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
 1742		 */
 1743		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
 1744			scsi_normalize_sense(error_info.data,
 1745				sense_data_length, &sshdr) &&
 1746				sshdr.sense_key == NOT_READY &&
 1747				sshdr.asc == 0x04 &&
 1748				sshdr.ascq == 0x1b) {
 1749			device->device_offline = true;
 1750			offline = true;
 1751			goto out; /* Keep device offline */
 1752		}
 1753	}
 1754
 1755out:
 1756	kfree(buffer);
 1757	return offline;
 1758}
 1759
 1760static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
 1761	struct pqi_scsi_dev *device,
 1762	struct bmic_identify_physical_device *id_phys)
 1763{
 1764	int rc;
 1765
 1766	if (device->is_expander_smp_device)
 1767		return 0;
 1768
 1769	if (pqi_is_logical_device(device))
 1770		rc = pqi_get_logical_device_info(ctrl_info, device);
 1771	else
 1772		rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
 1773
 1774	return rc;
 1775}
 1776
 1777static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
 1778	struct pqi_scsi_dev *device,
 1779	struct bmic_identify_physical_device *id_phys)
 1780{
 1781	int rc;
 1782
 1783	rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
 1784
 1785	if (rc == 0 && device->lun_count == 0)
 1786		device->lun_count = 1;
 1787
 1788	return rc;
 1789}
 1790
 1791static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
 1792	struct pqi_scsi_dev *device)
 1793{
 1794	char *status;
 1795	static const char unknown_state_str[] =
 1796		"Volume is in an unknown state (%u)";
 1797	char unknown_state_buffer[sizeof(unknown_state_str) + 10];
 1798
 1799	switch (device->volume_status) {
 1800	case CISS_LV_OK:
 1801		status = "Volume online";
 1802		break;
 1803	case CISS_LV_FAILED:
 1804		status = "Volume failed";
 1805		break;
 1806	case CISS_LV_NOT_CONFIGURED:
 1807		status = "Volume not configured";
 1808		break;
 1809	case CISS_LV_DEGRADED:
 1810		status = "Volume degraded";
 1811		break;
 1812	case CISS_LV_READY_FOR_RECOVERY:
 1813		status = "Volume ready for recovery operation";
 1814		break;
 1815	case CISS_LV_UNDERGOING_RECOVERY:
 1816		status = "Volume undergoing recovery";
 1817		break;
 1818	case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
 1819		status = "Wrong physical drive was replaced";
 1820		break;
 1821	case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
 1822		status = "A physical drive not properly connected";
 1823		break;
 1824	case CISS_LV_HARDWARE_OVERHEATING:
 1825		status = "Hardware is overheating";
 1826		break;
 1827	case CISS_LV_HARDWARE_HAS_OVERHEATED:
 1828		status = "Hardware has overheated";
 1829		break;
 1830	case CISS_LV_UNDERGOING_EXPANSION:
 1831		status = "Volume undergoing expansion";
 1832		break;
 1833	case CISS_LV_NOT_AVAILABLE:
 1834		status = "Volume waiting for transforming volume";
 1835		break;
 1836	case CISS_LV_QUEUED_FOR_EXPANSION:
 1837		status = "Volume queued for expansion";
 1838		break;
 1839	case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
 1840		status = "Volume disabled due to SCSI ID conflict";
 1841		break;
 1842	case CISS_LV_EJECTED:
 1843		status = "Volume has been ejected";
 1844		break;
 1845	case CISS_LV_UNDERGOING_ERASE:
 1846		status = "Volume undergoing background erase";
 1847		break;
 1848	case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
 1849		status = "Volume ready for predictive spare rebuild";
 1850		break;
 1851	case CISS_LV_UNDERGOING_RPI:
 1852		status = "Volume undergoing rapid parity initialization";
 1853		break;
 1854	case CISS_LV_PENDING_RPI:
 1855		status = "Volume queued for rapid parity initialization";
 1856		break;
 1857	case CISS_LV_ENCRYPTED_NO_KEY:
 1858		status = "Encrypted volume inaccessible - key not present";
 1859		break;
 1860	case CISS_LV_UNDERGOING_ENCRYPTION:
 1861		status = "Volume undergoing encryption process";
 1862		break;
 1863	case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
 1864		status = "Volume undergoing encryption re-keying process";
 1865		break;
 1866	case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
 1867		status = "Volume encrypted but encryption is disabled";
 1868		break;
 1869	case CISS_LV_PENDING_ENCRYPTION:
 1870		status = "Volume pending migration to encrypted state";
 1871		break;
 1872	case CISS_LV_PENDING_ENCRYPTION_REKEYING:
 1873		status = "Volume pending encryption rekeying";
 1874		break;
 1875	case CISS_LV_NOT_SUPPORTED:
 1876		status = "Volume not supported on this controller";
 1877		break;
 1878	case CISS_LV_STATUS_UNAVAILABLE:
 1879		status = "Volume status not available";
 1880		break;
 1881	default:
 1882		snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
 1883			unknown_state_str, device->volume_status);
 1884		status = unknown_state_buffer;
 1885		break;
 1886	}
 1887
 1888	dev_info(&ctrl_info->pci_dev->dev,
 1889		"scsi %d:%d:%d:%d %s\n",
 1890		ctrl_info->scsi_host->host_no,
 1891		device->bus, device->target, device->lun, status);
 1892}
 1893
 1894static void pqi_rescan_worker(struct work_struct *work)
 1895{
 1896	struct pqi_ctrl_info *ctrl_info;
 1897
 1898	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
 1899		rescan_work);
 1900
 1901	pqi_scan_scsi_devices(ctrl_info);
 1902}
 1903
 1904static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
 1905	struct pqi_scsi_dev *device)
 1906{
 1907	int rc;
 1908
 1909	if (pqi_is_logical_device(device))
 1910		rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
 1911			device->target, device->lun);
 1912	else
 1913		rc = pqi_add_sas_device(ctrl_info->sas_host, device);
 1914
 1915	return rc;
 1916}
 1917
 1918#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS	(20 * 1000)
 1919
 1920static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
 1921{
 1922	int rc;
 1923	int lun;
 1924
 1925	for (lun = 0; lun < device->lun_count; lun++) {
 1926		rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
 1927			PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
 1928		if (rc)
 1929			dev_err(&ctrl_info->pci_dev->dev,
 1930				"scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
 1931				ctrl_info->scsi_host->host_no, device->bus,
 1932				device->target, lun,
 1933				atomic_read(&device->scsi_cmds_outstanding[lun]));
 1934	}
 1935
 1936	if (pqi_is_logical_device(device))
 1937		scsi_remove_device(device->sdev);
 1938	else
 1939		pqi_remove_sas_device(device);
 1940
 1941	pqi_device_remove_start(device);
 1942}
 1943
 1944/* Assumes the SCSI device list lock is held. */
 1945
 1946static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
 1947	int bus, int target, int lun)
 1948{
 1949	struct pqi_scsi_dev *device;
 1950
 1951	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
 1952		if (device->bus == bus && device->target == target && device->lun == lun)
 1953			return device;
 1954
 1955	return NULL;
 1956}
 1957
 1958static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
 1959{
 1960	if (dev1->is_physical_device != dev2->is_physical_device)
 1961		return false;
 1962
 1963	if (dev1->is_physical_device)
 1964		return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
 1965
 1966	return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
 1967}
 1968
 1969enum pqi_find_result {
 1970	DEVICE_NOT_FOUND,
 1971	DEVICE_CHANGED,
 1972	DEVICE_SAME,
 1973};
 1974
 1975static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
 1976	struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
 1977{
 1978	struct pqi_scsi_dev *device;
 1979
 1980	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
 1981		if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
 1982			*matching_device = device;
 1983			if (pqi_device_equal(device_to_find, device)) {
 1984				if (device_to_find->volume_offline)
 1985					return DEVICE_CHANGED;
 1986				return DEVICE_SAME;
 1987			}
 1988			return DEVICE_CHANGED;
 1989		}
 1990	}
 1991
 1992	return DEVICE_NOT_FOUND;
 1993}
 1994
 1995static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
 1996{
 1997	if (device->is_expander_smp_device)
 1998		return "Enclosure SMP    ";
 1999
 2000	return scsi_device_type(device->devtype);
 2001}
 2002
 2003#define PQI_DEV_INFO_BUFFER_LENGTH	128
 2004
 2005static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
 2006	char *action, struct pqi_scsi_dev *device)
 2007{
 2008	ssize_t count;
 2009	char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
 2010
 2011	count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
 2012		"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
 2013
 2014	if (device->target_lun_valid)
 2015		count += scnprintf(buffer + count,
 2016			PQI_DEV_INFO_BUFFER_LENGTH - count,
 2017			"%d:%d",
 2018			device->target,
 2019			device->lun);
 2020	else
 2021		count += scnprintf(buffer + count,
 2022			PQI_DEV_INFO_BUFFER_LENGTH - count,
 2023			"-:-");
 2024
 2025	if (pqi_is_logical_device(device))
 2026		count += scnprintf(buffer + count,
 2027			PQI_DEV_INFO_BUFFER_LENGTH - count,
 2028			" %08x%08x",
 2029			*((u32 *)&device->scsi3addr),
 2030			*((u32 *)&device->scsi3addr[4]));
 2031	else
 2032		count += scnprintf(buffer + count,
 2033			PQI_DEV_INFO_BUFFER_LENGTH - count,
 2034			" %016llx%016llx",
 2035			get_unaligned_be64(&device->wwid[0]),
 2036			get_unaligned_be64(&device->wwid[8]));
 2037
 2038	count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
 2039		" %s %.8s %.16s ",
 2040		pqi_device_type(device),
 2041		device->vendor,
 2042		device->model);
 2043
 2044	if (pqi_is_logical_device(device)) {
 2045		if (device->devtype == TYPE_DISK)
 2046			count += scnprintf(buffer + count,
 2047				PQI_DEV_INFO_BUFFER_LENGTH - count,
 2048				"SSDSmartPathCap%c En%c %-12s",
 2049				device->raid_bypass_configured ? '+' : '-',
 2050				device->raid_bypass_enabled ? '+' : '-',
 2051				pqi_raid_level_to_string(device->raid_level));
 2052	} else {
 2053		count += scnprintf(buffer + count,
 2054			PQI_DEV_INFO_BUFFER_LENGTH - count,
 2055			"AIO%c", device->aio_enabled ? '+' : '-');
 2056		if (device->devtype == TYPE_DISK ||
 2057			device->devtype == TYPE_ZBC)
 2058			count += scnprintf(buffer + count,
 2059				PQI_DEV_INFO_BUFFER_LENGTH - count,
 2060				" qd=%-6d", device->queue_depth);
 2061	}
 2062
 2063	dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
 2064}
 2065
 2066static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
 2067{
 2068	u32 raid_map1_size;
 2069	u32 raid_map2_size;
 2070
 2071	if (raid_map1 == NULL || raid_map2 == NULL)
 2072		return raid_map1 == raid_map2;
 2073
 2074	raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
 2075	raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
 2076
 2077	if (raid_map1_size != raid_map2_size)
 2078		return false;
 2079
 2080	return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
 2081}
 2082
 2083/* Assumes the SCSI device list lock is held. */
 2084
 2085static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
 2086	struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
 2087{
 2088	existing_device->device_type = new_device->device_type;
 2089	existing_device->bus = new_device->bus;
 2090	if (new_device->target_lun_valid) {
 2091		existing_device->target = new_device->target;
 2092		existing_device->lun = new_device->lun;
 2093		existing_device->target_lun_valid = true;
 2094	}
 2095
 2096	/* By definition, the scsi3addr and wwid fields are already the same. */
 2097
 2098	existing_device->is_physical_device = new_device->is_physical_device;
 2099	memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
 2100	memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
 2101	existing_device->sas_address = new_device->sas_address;
 2102	existing_device->queue_depth = new_device->queue_depth;
 2103	existing_device->device_offline = false;
 2104	existing_device->lun_count = new_device->lun_count;
 2105
 2106	if (pqi_is_logical_device(existing_device)) {
 2107		existing_device->is_external_raid_device = new_device->is_external_raid_device;
 2108
 2109		if (existing_device->devtype == TYPE_DISK) {
 2110			existing_device->raid_level = new_device->raid_level;
 2111			existing_device->volume_status = new_device->volume_status;
 2112			if (ctrl_info->logical_volume_rescan_needed)
 2113				existing_device->rescan = true;
 2114			memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
 2115			if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
 2116				kfree(existing_device->raid_map);
 2117				existing_device->raid_map = new_device->raid_map;
 2118				/* To prevent this from being freed later. */
 2119				new_device->raid_map = NULL;
 2120			}
 2121			existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
 2122			existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
 2123		}
 2124	} else {
 2125		existing_device->aio_enabled = new_device->aio_enabled;
 2126		existing_device->aio_handle = new_device->aio_handle;
 2127		existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
 2128		existing_device->active_path_index = new_device->active_path_index;
 2129		existing_device->phy_id = new_device->phy_id;
 2130		existing_device->path_map = new_device->path_map;
 2131		existing_device->bay = new_device->bay;
 2132		existing_device->box_index = new_device->box_index;
 2133		existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
 2134		existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
 2135		memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
 2136		memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
 2137	}
 2138}
 2139
 2140static inline void pqi_free_device(struct pqi_scsi_dev *device)
 2141{
 2142	if (device) {
 2143		kfree(device->raid_map);
 2144		kfree(device);
 2145	}
 2146}
 2147
 2148/*
 2149 * Called when exposing a new device to the OS fails in order to re-adjust
 2150 * our internal SCSI device list to match the SCSI ML's view.
 2151 */
 2152
 2153static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
 2154	struct pqi_scsi_dev *device)
 2155{
 2156	unsigned long flags;
 2157
 2158	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 2159	list_del(&device->scsi_device_list_entry);
 2160	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 2161
 2162	/* Allow the device structure to be freed later. */
 2163	device->keep_device = false;
 2164}
 2165
 2166static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
 2167{
 2168	if (device->is_expander_smp_device)
 2169		return device->sas_port != NULL;
 2170
 2171	return device->sdev != NULL;
 2172}
 2173
 2174static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
 2175	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
 2176{
 2177	int rc;
 2178	unsigned int i;
 2179	unsigned long flags;
 2180	enum pqi_find_result find_result;
 2181	struct pqi_scsi_dev *device;
 2182	struct pqi_scsi_dev *next;
 2183	struct pqi_scsi_dev *matching_device;
 2184	LIST_HEAD(add_list);
 2185	LIST_HEAD(delete_list);
 2186
 2187	/*
 2188	 * The idea here is to do as little work as possible while holding the
 2189	 * spinlock.  That's why we go to great pains to defer anything other
 2190	 * than updating the internal device list until after we release the
 2191	 * spinlock.
 2192	 */
 2193
 2194	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 2195
 2196	/* Assume that all devices in the existing list have gone away. */
 2197	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
 2198		device->device_gone = true;
 2199
 2200	for (i = 0; i < num_new_devices; i++) {
 2201		device = new_device_list[i];
 2202
 2203		find_result = pqi_scsi_find_entry(ctrl_info, device,
 2204			&matching_device);
 2205
 2206		switch (find_result) {
 2207		case DEVICE_SAME:
 2208			/*
 2209			 * The newly found device is already in the existing
 2210			 * device list.
 2211			 */
 2212			device->new_device = false;
 2213			matching_device->device_gone = false;
 2214			pqi_scsi_update_device(ctrl_info, matching_device, device);
 2215			break;
 2216		case DEVICE_NOT_FOUND:
 2217			/*
 2218			 * The newly found device is NOT in the existing device
 2219			 * list.
 2220			 */
 2221			device->new_device = true;
 2222			break;
 2223		case DEVICE_CHANGED:
 2224			/*
 2225			 * The original device has gone away and we need to add
 2226			 * the new device.
 2227			 */
 2228			device->new_device = true;
 2229			break;
 2230		}
 2231	}
 2232
 2233	/* Process all devices that have gone away. */
 2234	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
 2235		scsi_device_list_entry) {
 2236		if (device->device_gone) {
 2237			list_del(&device->scsi_device_list_entry);
 2238			list_add_tail(&device->delete_list_entry, &delete_list);
 2239		}
 2240	}
 2241
 2242	/* Process all new devices. */
 2243	for (i = 0; i < num_new_devices; i++) {
 2244		device = new_device_list[i];
 2245		if (!device->new_device)
 2246			continue;
 2247		if (device->volume_offline)
 2248			continue;
 2249		list_add_tail(&device->scsi_device_list_entry,
 2250			&ctrl_info->scsi_device_list);
 2251		list_add_tail(&device->add_list_entry, &add_list);
 2252		/* To prevent this device structure from being freed later. */
 2253		device->keep_device = true;
 2254	}
 2255
 2256	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 2257
 2258	/*
 2259	 * If OFA is in progress and there are devices that need to be deleted,
 2260	 * allow any pending reset operations to continue and unblock any SCSI
 2261	 * requests before removal.
 2262	 */
 2263	if (pqi_ofa_in_progress(ctrl_info)) {
 2264		list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
 2265			if (pqi_is_device_added(device))
 2266				pqi_device_remove_start(device);
 2267		pqi_ctrl_unblock_device_reset(ctrl_info);
 2268		pqi_scsi_unblock_requests(ctrl_info);
 2269	}
 2270
 2271	/* Remove all devices that have gone away. */
 2272	list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
 2273		if (device->volume_offline) {
 2274			pqi_dev_info(ctrl_info, "offline", device);
 2275			pqi_show_volume_status(ctrl_info, device);
 2276		} else {
 2277			pqi_dev_info(ctrl_info, "removed", device);
 2278		}
 2279		if (pqi_is_device_added(device))
 2280			pqi_remove_device(ctrl_info, device);
 2281		list_del(&device->delete_list_entry);
 2282		pqi_free_device(device);
 2283	}
 2284
 2285	/*
 2286	 * Notify the SML of any existing device changes such as;
 2287	 * queue depth, device size.
 2288	 */
 2289	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
 2290		if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
 2291			device->advertised_queue_depth = device->queue_depth;
 2292			scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
 2293			if (device->rescan) {
 2294				scsi_rescan_device(&device->sdev->sdev_gendev);
 2295				device->rescan = false;
 2296			}
 2297		}
 2298	}
 2299
 2300	/* Expose any new devices. */
 2301	list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
 2302		if (!pqi_is_device_added(device)) {
 2303			rc = pqi_add_device(ctrl_info, device);
 2304			if (rc == 0) {
 2305				pqi_dev_info(ctrl_info, "added", device);
 2306			} else {
 2307				dev_warn(&ctrl_info->pci_dev->dev,
 2308					"scsi %d:%d:%d:%d addition failed, device not added\n",
 2309					ctrl_info->scsi_host->host_no,
 2310					device->bus, device->target,
 2311					device->lun);
 2312				pqi_fixup_botched_add(ctrl_info, device);
 2313			}
 2314		}
 2315	}
 2316
 2317	ctrl_info->logical_volume_rescan_needed = false;
 2318
 2319}
 2320
 2321static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
 2322{
 2323	/*
 2324	 * Only support the HBA controller itself as a RAID
 2325	 * controller.  If it's a RAID controller other than
 2326	 * the HBA itself (an external RAID controller, for
 2327	 * example), we don't support it.
 2328	 */
 2329	if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
 2330		!pqi_is_hba_lunid(device->scsi3addr))
 2331			return false;
 2332
 2333	return true;
 2334}
 2335
 2336static inline bool pqi_skip_device(u8 *scsi3addr)
 2337{
 2338	/* Ignore all masked devices. */
 2339	if (MASKED_DEVICE(scsi3addr))
 2340		return true;
 2341
 2342	return false;
 2343}
 2344
 2345static inline void pqi_mask_device(u8 *scsi3addr)
 2346{
 2347	scsi3addr[3] |= 0xc0;
 2348}
 2349
 2350static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
 2351{
 2352	if (pqi_is_logical_device(device))
 2353		return false;
 2354
 2355	return (device->path_map & (device->path_map - 1)) != 0;
 2356}
 2357
 2358static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
 2359{
 2360	return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
 2361}
 2362
 2363static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 2364{
 2365	int i;
 2366	int rc;
 2367	LIST_HEAD(new_device_list_head);
 2368	struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
 2369	struct report_log_lun_list *logdev_list = NULL;
 2370	struct report_phys_lun_16byte_wwid *phys_lun;
 2371	struct report_log_lun *log_lun;
 2372	struct bmic_identify_physical_device *id_phys = NULL;
 2373	u32 num_physicals;
 2374	u32 num_logicals;
 2375	struct pqi_scsi_dev **new_device_list = NULL;
 2376	struct pqi_scsi_dev *device;
 2377	struct pqi_scsi_dev *next;
 2378	unsigned int num_new_devices;
 2379	unsigned int num_valid_devices;
 2380	bool is_physical_device;
 2381	u8 *scsi3addr;
 2382	unsigned int physical_index;
 2383	unsigned int logical_index;
 2384	static char *out_of_memory_msg =
 2385		"failed to allocate memory, device discovery stopped";
 2386
 2387	rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
 2388	if (rc)
 2389		goto out;
 2390
 2391	if (physdev_list)
 2392		num_physicals =
 2393			get_unaligned_be32(&physdev_list->header.list_length)
 2394				/ sizeof(physdev_list->lun_entries[0]);
 2395	else
 2396		num_physicals = 0;
 2397
 2398	if (logdev_list)
 2399		num_logicals =
 2400			get_unaligned_be32(&logdev_list->header.list_length)
 2401				/ sizeof(logdev_list->lun_entries[0]);
 2402	else
 2403		num_logicals = 0;
 2404
 2405	if (num_physicals) {
 2406		/*
 2407		 * We need this buffer for calls to pqi_get_physical_disk_info()
 2408		 * below.  We allocate it here instead of inside
 2409		 * pqi_get_physical_disk_info() because it's a fairly large
 2410		 * buffer.
 2411		 */
 2412		id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
 2413		if (!id_phys) {
 2414			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
 2415				out_of_memory_msg);
 2416			rc = -ENOMEM;
 2417			goto out;
 2418		}
 2419
 2420		if (pqi_hide_vsep) {
 2421			for (i = num_physicals - 1; i >= 0; i--) {
 2422				phys_lun = &physdev_list->lun_entries[i];
 2423				if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
 2424					pqi_mask_device(phys_lun->lunid);
 2425					break;
 2426				}
 2427			}
 2428		}
 2429	}
 2430
 2431	if (num_logicals &&
 2432		(logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
 2433		ctrl_info->lv_drive_type_mix_valid = true;
 2434
 2435	num_new_devices = num_physicals + num_logicals;
 2436
 2437	new_device_list = kmalloc_array(num_new_devices,
 2438					sizeof(*new_device_list),
 2439					GFP_KERNEL);
 2440	if (!new_device_list) {
 2441		dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
 2442		rc = -ENOMEM;
 2443		goto out;
 2444	}
 2445
 2446	for (i = 0; i < num_new_devices; i++) {
 2447		device = kzalloc(sizeof(*device), GFP_KERNEL);
 2448		if (!device) {
 2449			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
 2450				out_of_memory_msg);
 2451			rc = -ENOMEM;
 2452			goto out;
 2453		}
 2454		list_add_tail(&device->new_device_list_entry,
 2455			&new_device_list_head);
 2456	}
 2457
 2458	device = NULL;
 2459	num_valid_devices = 0;
 2460	physical_index = 0;
 2461	logical_index = 0;
 2462
 2463	for (i = 0; i < num_new_devices; i++) {
 2464
 2465		if ((!pqi_expose_ld_first && i < num_physicals) ||
 2466			(pqi_expose_ld_first && i >= num_logicals)) {
 2467			is_physical_device = true;
 2468			phys_lun = &physdev_list->lun_entries[physical_index++];
 2469			log_lun = NULL;
 2470			scsi3addr = phys_lun->lunid;
 2471		} else {
 2472			is_physical_device = false;
 2473			phys_lun = NULL;
 2474			log_lun = &logdev_list->lun_entries[logical_index++];
 2475			scsi3addr = log_lun->lunid;
 2476		}
 2477
 2478		if (is_physical_device && pqi_skip_device(scsi3addr))
 2479			continue;
 2480
 2481		if (device)
 2482			device = list_next_entry(device, new_device_list_entry);
 2483		else
 2484			device = list_first_entry(&new_device_list_head,
 2485				struct pqi_scsi_dev, new_device_list_entry);
 2486
 2487		memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
 2488		device->is_physical_device = is_physical_device;
 2489		if (is_physical_device) {
 2490			device->device_type = phys_lun->device_type;
 2491			if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
 2492				device->is_expander_smp_device = true;
 2493		} else {
 2494			device->is_external_raid_device =
 2495				pqi_is_external_raid_addr(scsi3addr);
 2496		}
 2497
 2498		if (!pqi_is_supported_device(device))
 2499			continue;
 2500
 2501		/* Do not present disks that the OS cannot fully probe */
 2502		if (pqi_keep_device_offline(ctrl_info, device))
 2503			continue;
 2504
 2505		/* Gather information about the device. */
 2506		rc = pqi_get_device_info(ctrl_info, device, id_phys);
 2507		if (rc == -ENOMEM) {
 2508			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
 2509				out_of_memory_msg);
 2510			goto out;
 2511		}
 2512		if (rc) {
 2513			if (device->is_physical_device)
 2514				dev_warn(&ctrl_info->pci_dev->dev,
 2515					"obtaining device info failed, skipping physical device %016llx%016llx\n",
 2516					get_unaligned_be64(&phys_lun->wwid[0]),
 2517					get_unaligned_be64(&phys_lun->wwid[8]));
 2518			else
 2519				dev_warn(&ctrl_info->pci_dev->dev,
 2520					"obtaining device info failed, skipping logical device %08x%08x\n",
 2521					*((u32 *)&device->scsi3addr),
 2522					*((u32 *)&device->scsi3addr[4]));
 2523			rc = 0;
 2524			continue;
 2525		}
 2526
 2527		pqi_assign_bus_target_lun(device);
 2528
 2529		if (device->is_physical_device) {
 2530			memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
 2531			if ((phys_lun->device_flags &
 2532				CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
 2533				phys_lun->aio_handle) {
 2534					device->aio_enabled = true;
 2535					device->aio_handle =
 2536						phys_lun->aio_handle;
 2537			}
 2538		} else {
 2539			memcpy(device->volume_id, log_lun->volume_id,
 2540				sizeof(device->volume_id));
 2541		}
 2542
 2543		device->sas_address = get_unaligned_be64(&device->wwid[0]);
 2544
 2545		new_device_list[num_valid_devices++] = device;
 2546	}
 2547
 2548	pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
 2549
 2550out:
 2551	list_for_each_entry_safe(device, next, &new_device_list_head,
 2552		new_device_list_entry) {
 2553		if (device->keep_device)
 2554			continue;
 2555		list_del(&device->new_device_list_entry);
 2556		pqi_free_device(device);
 2557	}
 2558
 2559	kfree(new_device_list);
 2560	kfree(physdev_list);
 2561	kfree(logdev_list);
 2562	kfree(id_phys);
 2563
 2564	return rc;
 2565}
 2566
 2567static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 2568{
 2569	int rc;
 2570	int mutex_acquired;
 2571
 2572	if (pqi_ctrl_offline(ctrl_info))
 2573		return -ENXIO;
 2574
 2575	mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
 2576
 2577	if (!mutex_acquired) {
 2578		if (pqi_ctrl_scan_blocked(ctrl_info))
 2579			return -EBUSY;
 2580		pqi_schedule_rescan_worker_delayed(ctrl_info);
 2581		return -EINPROGRESS;
 2582	}
 2583
 2584	rc = pqi_update_scsi_devices(ctrl_info);
 2585	if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
 2586		pqi_schedule_rescan_worker_delayed(ctrl_info);
 2587
 2588	mutex_unlock(&ctrl_info->scan_mutex);
 2589
 2590	return rc;
 2591}
 2592
 2593static void pqi_scan_start(struct Scsi_Host *shost)
 2594{
 2595	struct pqi_ctrl_info *ctrl_info;
 2596
 2597	ctrl_info = shost_to_hba(shost);
 2598
 2599	pqi_scan_scsi_devices(ctrl_info);
 2600}
 2601
 2602/* Returns TRUE if scan is finished. */
 2603
 2604static int pqi_scan_finished(struct Scsi_Host *shost,
 2605	unsigned long elapsed_time)
 2606{
 2607	struct pqi_ctrl_info *ctrl_info;
 2608
 2609	ctrl_info = shost_priv(shost);
 2610
 2611	return !mutex_is_locked(&ctrl_info->scan_mutex);
 2612}
 2613
 2614static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
 2615	struct raid_map *raid_map, u64 first_block)
 2616{
 2617	u32 volume_blk_size;
 2618
 2619	/*
 2620	 * Set the encryption tweak values based on logical block address.
 2621	 * If the block size is 512, the tweak value is equal to the LBA.
 2622	 * For other block sizes, tweak value is (LBA * block size) / 512.
 2623	 */
 2624	volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
 2625	if (volume_blk_size != 512)
 2626		first_block = (first_block * volume_blk_size) / 512;
 2627
 2628	encryption_info->data_encryption_key_index =
 2629		get_unaligned_le16(&raid_map->data_encryption_key_index);
 2630	encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
 2631	encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
 2632}
 2633
 2634/*
 2635 * Attempt to perform RAID bypass mapping for a logical volume I/O.
 2636 */
 2637
 2638static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
 2639	struct pqi_scsi_dev_raid_map_data *rmd)
 2640{
 2641	bool is_supported = true;
 2642
 2643	switch (rmd->raid_level) {
 2644	case SA_RAID_0:
 2645		break;
 2646	case SA_RAID_1:
 2647		if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
 2648			rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
 2649			is_supported = false;
 2650		break;
 2651	case SA_RAID_TRIPLE:
 2652		if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
 2653			rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
 2654			is_supported = false;
 2655		break;
 2656	case SA_RAID_5:
 2657		if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
 2658			rmd->data_length > ctrl_info->max_write_raid_5_6))
 2659			is_supported = false;
 2660		break;
 2661	case SA_RAID_6:
 2662		if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
 2663			rmd->data_length > ctrl_info->max_write_raid_5_6))
 2664			is_supported = false;
 2665		break;
 2666	default:
 2667		is_supported = false;
 2668		break;
 2669	}
 2670
 2671	return is_supported;
 2672}
 2673
 2674#define PQI_RAID_BYPASS_INELIGIBLE	1
 2675
 2676static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
 2677	struct pqi_scsi_dev_raid_map_data *rmd)
 2678{
 2679	/* Check for valid opcode, get LBA and block count. */
 2680	switch (scmd->cmnd[0]) {
 2681	case WRITE_6:
 2682		rmd->is_write = true;
 2683		fallthrough;
 2684	case READ_6:
 2685		rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
 2686			(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
 2687		rmd->block_cnt = (u32)scmd->cmnd[4];
 2688		if (rmd->block_cnt == 0)
 2689			rmd->block_cnt = 256;
 2690		break;
 2691	case WRITE_10:
 2692		rmd->is_write = true;
 2693		fallthrough;
 2694	case READ_10:
 2695		rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
 2696		rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
 2697		break;
 2698	case WRITE_12:
 2699		rmd->is_write = true;
 2700		fallthrough;
 2701	case READ_12:
 2702		rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
 2703		rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
 2704		break;
 2705	case WRITE_16:
 2706		rmd->is_write = true;
 2707		fallthrough;
 2708	case READ_16:
 2709		rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
 2710		rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
 2711		break;
 2712	default:
 2713		/* Process via normal I/O path. */
 2714		return PQI_RAID_BYPASS_INELIGIBLE;
 2715	}
 2716
 2717	put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
 2718
 2719	return 0;
 2720}
 2721
 2722static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
 2723	struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
 2724{
 2725#if BITS_PER_LONG == 32
 2726	u64 tmpdiv;
 2727#endif
 2728
 2729	rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
 2730
 2731	/* Check for invalid block or wraparound. */
 2732	if (rmd->last_block >=
 2733		get_unaligned_le64(&raid_map->volume_blk_cnt) ||
 2734		rmd->last_block < rmd->first_block)
 2735		return PQI_RAID_BYPASS_INELIGIBLE;
 2736
 2737	rmd->data_disks_per_row =
 2738		get_unaligned_le16(&raid_map->data_disks_per_row);
 2739	rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
 2740	rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
 2741
 2742	/* Calculate stripe information for the request. */
 2743	rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
 2744	if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
 2745		return PQI_RAID_BYPASS_INELIGIBLE;
 2746#if BITS_PER_LONG == 32
 2747	tmpdiv = rmd->first_block;
 2748	do_div(tmpdiv, rmd->blocks_per_row);
 2749	rmd->first_row = tmpdiv;
 2750	tmpdiv = rmd->last_block;
 2751	do_div(tmpdiv, rmd->blocks_per_row);
 2752	rmd->last_row = tmpdiv;
 2753	rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
 2754	rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
 2755	tmpdiv = rmd->first_row_offset;
 2756	do_div(tmpdiv, rmd->strip_size);
 2757	rmd->first_column = tmpdiv;
 2758	tmpdiv = rmd->last_row_offset;
 2759	do_div(tmpdiv, rmd->strip_size);
 2760	rmd->last_column = tmpdiv;
 2761#else
 2762	rmd->first_row = rmd->first_block / rmd->blocks_per_row;
 2763	rmd->last_row = rmd->last_block / rmd->blocks_per_row;
 2764	rmd->first_row_offset = (u32)(rmd->first_block -
 2765		(rmd->first_row * rmd->blocks_per_row));
 2766	rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
 2767		rmd->blocks_per_row));
 2768	rmd->first_column = rmd->first_row_offset / rmd->strip_size;
 2769	rmd->last_column = rmd->last_row_offset / rmd->strip_size;
 2770#endif
 2771
 2772	/* If this isn't a single row/column then give to the controller. */
 2773	if (rmd->first_row != rmd->last_row ||
 2774		rmd->first_column != rmd->last_column)
 2775		return PQI_RAID_BYPASS_INELIGIBLE;
 2776
 2777	/* Proceeding with driver mapping. */
 2778	rmd->total_disks_per_row = rmd->data_disks_per_row +
 2779		get_unaligned_le16(&raid_map->metadata_disks_per_row);
 2780	rmd->map_row = ((u32)(rmd->first_row >>
 2781		raid_map->parity_rotation_shift)) %
 2782		get_unaligned_le16(&raid_map->row_cnt);
 2783	rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
 2784		rmd->first_column;
 2785
 2786	return 0;
 2787}
 2788
 2789static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
 2790	struct raid_map *raid_map)
 2791{
 2792#if BITS_PER_LONG == 32
 2793	u64 tmpdiv;
 2794#endif
 2795
 2796	if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
 2797		return PQI_RAID_BYPASS_INELIGIBLE;
 2798
 2799	/* RAID 50/60 */
 2800	/* Verify first and last block are in same RAID group. */
 2801	rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
 2802#if BITS_PER_LONG == 32
 2803	tmpdiv = rmd->first_block;
 2804	rmd->first_group = do_div(tmpdiv, rmd->stripesize);
 2805	tmpdiv = rmd->first_group;
 2806	do_div(tmpdiv, rmd->blocks_per_row);
 2807	rmd->first_group = tmpdiv;
 2808	tmpdiv = rmd->last_block;
 2809	rmd->last_group = do_div(tmpdiv, rmd->stripesize);
 2810	tmpdiv = rmd->last_group;
 2811	do_div(tmpdiv, rmd->blocks_per_row);
 2812	rmd->last_group = tmpdiv;
 2813#else
 2814	rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
 2815	rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
 2816#endif
 2817	if (rmd->first_group != rmd->last_group)
 2818		return PQI_RAID_BYPASS_INELIGIBLE;
 2819
 2820	/* Verify request is in a single row of RAID 5/6. */
 2821#if BITS_PER_LONG == 32
 2822	tmpdiv = rmd->first_block;
 2823	do_div(tmpdiv, rmd->stripesize);
 2824	rmd->first_row = tmpdiv;
 2825	rmd->r5or6_first_row = tmpdiv;
 2826	tmpdiv = rmd->last_block;
 2827	do_div(tmpdiv, rmd->stripesize);
 2828	rmd->r5or6_last_row = tmpdiv;
 2829#else
 2830	rmd->first_row = rmd->r5or6_first_row =
 2831		rmd->first_block / rmd->stripesize;
 2832	rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
 2833#endif
 2834	if (rmd->r5or6_first_row != rmd->r5or6_last_row)
 2835		return PQI_RAID_BYPASS_INELIGIBLE;
 2836
 2837	/* Verify request is in a single column. */
 2838#if BITS_PER_LONG == 32
 2839	tmpdiv = rmd->first_block;
 2840	rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
 2841	tmpdiv = rmd->first_row_offset;
 2842	rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
 2843	rmd->r5or6_first_row_offset = rmd->first_row_offset;
 2844	tmpdiv = rmd->last_block;
 2845	rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
 2846	tmpdiv = rmd->r5or6_last_row_offset;
 2847	rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
 2848	tmpdiv = rmd->r5or6_first_row_offset;
 2849	do_div(tmpdiv, rmd->strip_size);
 2850	rmd->first_column = rmd->r5or6_first_column = tmpdiv;
 2851	tmpdiv = rmd->r5or6_last_row_offset;
 2852	do_div(tmpdiv, rmd->strip_size);
 2853	rmd->r5or6_last_column = tmpdiv;
 2854#else
 2855	rmd->first_row_offset = rmd->r5or6_first_row_offset =
 2856		(u32)((rmd->first_block % rmd->stripesize) %
 2857		rmd->blocks_per_row);
 2858
 2859	rmd->r5or6_last_row_offset =
 2860		(u32)((rmd->last_block % rmd->stripesize) %
 2861		rmd->blocks_per_row);
 2862
 2863	rmd->first_column =
 2864		rmd->r5or6_first_row_offset / rmd->strip_size;
 2865	rmd->r5or6_first_column = rmd->first_column;
 2866	rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
 2867#endif
 2868	if (rmd->r5or6_first_column != rmd->r5or6_last_column)
 2869		return PQI_RAID_BYPASS_INELIGIBLE;
 2870
 2871	/* Request is eligible. */
 2872	rmd->map_row =
 2873		((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
 2874		get_unaligned_le16(&raid_map->row_cnt);
 2875
 2876	rmd->map_index = (rmd->first_group *
 2877		(get_unaligned_le16(&raid_map->row_cnt) *
 2878		rmd->total_disks_per_row)) +
 2879		(rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
 2880
 2881	if (rmd->is_write) {
 2882		u32 index;
 2883
 2884		/*
 2885		 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
 2886		 * parity entries inside the device's raid_map.
 2887		 *
 2888		 * A device's RAID map is bounded by: number of RAID disks squared.
 2889		 *
 2890		 * The devices RAID map size is checked during device
 2891		 * initialization.
 2892		 */
 2893		index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
 2894		index *= rmd->total_disks_per_row;
 2895		index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
 2896
 2897		rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
 2898		if (rmd->raid_level == SA_RAID_6) {
 2899			rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
 2900			rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
 2901		}
 2902#if BITS_PER_LONG == 32
 2903		tmpdiv = rmd->first_block;
 2904		do_div(tmpdiv, rmd->blocks_per_row);
 2905		rmd->row = tmpdiv;
 2906#else
 2907		rmd->row = rmd->first_block / rmd->blocks_per_row;
 2908#endif
 2909	}
 2910
 2911	return 0;
 2912}
 2913
 2914static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
 2915{
 2916	/* Build the new CDB for the physical disk I/O. */
 2917	if (rmd->disk_block > 0xffffffff) {
 2918		rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
 2919		rmd->cdb[1] = 0;
 2920		put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
 2921		put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
 2922		rmd->cdb[14] = 0;
 2923		rmd->cdb[15] = 0;
 2924		rmd->cdb_length = 16;
 2925	} else {
 2926		rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
 2927		rmd->cdb[1] = 0;
 2928		put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
 2929		rmd->cdb[6] = 0;
 2930		put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
 2931		rmd->cdb[9] = 0;
 2932		rmd->cdb_length = 10;
 2933	}
 2934}
 2935
 2936static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
 2937	struct pqi_scsi_dev_raid_map_data *rmd)
 2938{
 2939	u32 index;
 2940	u32 group;
 2941
 2942	group = rmd->map_index / rmd->data_disks_per_row;
 2943
 2944	index = rmd->map_index - (group * rmd->data_disks_per_row);
 2945	rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
 2946	index += rmd->data_disks_per_row;
 2947	rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
 2948	if (rmd->layout_map_count > 2) {
 2949		index += rmd->data_disks_per_row;
 2950		rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
 2951	}
 2952
 2953	rmd->num_it_nexus_entries = rmd->layout_map_count;
 2954}
 2955
 2956static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
 2957	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
 2958	struct pqi_queue_group *queue_group)
 2959{
 2960	int rc;
 2961	struct raid_map *raid_map;
 2962	u32 group;
 2963	u32 next_bypass_group;
 2964	struct pqi_encryption_info *encryption_info_ptr;
 2965	struct pqi_encryption_info encryption_info;
 2966	struct pqi_scsi_dev_raid_map_data rmd = { 0 };
 2967
 2968	rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
 2969	if (rc)
 2970		return PQI_RAID_BYPASS_INELIGIBLE;
 2971
 2972	rmd.raid_level = device->raid_level;
 2973
 2974	if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
 2975		return PQI_RAID_BYPASS_INELIGIBLE;
 2976
 2977	if (unlikely(rmd.block_cnt == 0))
 2978		return PQI_RAID_BYPASS_INELIGIBLE;
 2979
 2980	raid_map = device->raid_map;
 2981
 2982	rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
 2983	if (rc)
 2984		return PQI_RAID_BYPASS_INELIGIBLE;
 2985
 2986	if (device->raid_level == SA_RAID_1 ||
 2987		device->raid_level == SA_RAID_TRIPLE) {
 2988		if (rmd.is_write) {
 2989			pqi_calc_aio_r1_nexus(raid_map, &rmd);
 2990		} else {
 2991			group = device->next_bypass_group[rmd.map_index];
 2992			next_bypass_group = group + 1;
 2993			if (next_bypass_group >= rmd.layout_map_count)
 2994				next_bypass_group = 0;
 2995			device->next_bypass_group[rmd.map_index] = next_bypass_group;
 2996			rmd.map_index += group * rmd.data_disks_per_row;
 2997		}
 2998	} else if ((device->raid_level == SA_RAID_5 ||
 2999		device->raid_level == SA_RAID_6) &&
 3000		(rmd.layout_map_count > 1 || rmd.is_write)) {
 3001		rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
 3002		if (rc)
 3003			return PQI_RAID_BYPASS_INELIGIBLE;
 3004	}
 3005
 3006	if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
 3007		return PQI_RAID_BYPASS_INELIGIBLE;
 3008
 3009	rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
 3010	rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
 3011		rmd.first_row * rmd.strip_size +
 3012		(rmd.first_row_offset - rmd.first_column * rmd.strip_size);
 3013	rmd.disk_block_cnt = rmd.block_cnt;
 3014
 3015	/* Handle differing logical/physical block sizes. */
 3016	if (raid_map->phys_blk_shift) {
 3017		rmd.disk_block <<= raid_map->phys_blk_shift;
 3018		rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
 3019	}
 3020
 3021	if (unlikely(rmd.disk_block_cnt > 0xffff))
 3022		return PQI_RAID_BYPASS_INELIGIBLE;
 3023
 3024	pqi_set_aio_cdb(&rmd);
 3025
 3026	if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
 3027		if (rmd.data_length > device->max_transfer_encrypted)
 3028			return PQI_RAID_BYPASS_INELIGIBLE;
 3029		pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
 3030		encryption_info_ptr = &encryption_info;
 3031	} else {
 3032		encryption_info_ptr = NULL;
 3033	}
 3034
 3035	if (rmd.is_write) {
 3036		switch (device->raid_level) {
 3037		case SA_RAID_1:
 3038		case SA_RAID_TRIPLE:
 3039			return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
 3040				encryption_info_ptr, device, &rmd);
 3041		case SA_RAID_5:
 3042		case SA_RAID_6:
 3043			return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
 3044				encryption_info_ptr, device, &rmd);
 3045		}
 3046	}
 3047
 3048	return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
 3049		rmd.cdb, rmd.cdb_length, queue_group,
 3050		encryption_info_ptr, true, false);
 3051}
 3052
 3053#define PQI_STATUS_IDLE		0x0
 3054
 3055#define PQI_CREATE_ADMIN_QUEUE_PAIR	1
 3056#define PQI_DELETE_ADMIN_QUEUE_PAIR	2
 3057
 3058#define PQI_DEVICE_STATE_POWER_ON_AND_RESET		0x0
 3059#define PQI_DEVICE_STATE_STATUS_AVAILABLE		0x1
 3060#define PQI_DEVICE_STATE_ALL_REGISTERS_READY		0x2
 3061#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY		0x3
 3062#define PQI_DEVICE_STATE_ERROR				0x4
 3063
 3064#define PQI_MODE_READY_TIMEOUT_SECS		30
 3065#define PQI_MODE_READY_POLL_INTERVAL_MSECS	1
 3066
 3067static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
 3068{
 3069	struct pqi_device_registers __iomem *pqi_registers;
 3070	unsigned long timeout;
 3071	u64 signature;
 3072	u8 status;
 3073
 3074	pqi_registers = ctrl_info->pqi_registers;
 3075	timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
 3076
 3077	while (1) {
 3078		signature = readq(&pqi_registers->signature);
 3079		if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
 3080			sizeof(signature)) == 0)
 3081			break;
 3082		if (time_after(jiffies, timeout)) {
 3083			dev_err(&ctrl_info->pci_dev->dev,
 3084				"timed out waiting for PQI signature\n");
 3085			return -ETIMEDOUT;
 3086		}
 3087		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
 3088	}
 3089
 3090	while (1) {
 3091		status = readb(&pqi_registers->function_and_status_code);
 3092		if (status == PQI_STATUS_IDLE)
 3093			break;
 3094		if (time_after(jiffies, timeout)) {
 3095			dev_err(&ctrl_info->pci_dev->dev,
 3096				"timed out waiting for PQI IDLE\n");
 3097			return -ETIMEDOUT;
 3098		}
 3099		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
 3100	}
 3101
 3102	while (1) {
 3103		if (readl(&pqi_registers->device_status) ==
 3104			PQI_DEVICE_STATE_ALL_REGISTERS_READY)
 3105			break;
 3106		if (time_after(jiffies, timeout)) {
 3107			dev_err(&ctrl_info->pci_dev->dev,
 3108				"timed out waiting for PQI all registers ready\n");
 3109			return -ETIMEDOUT;
 3110		}
 3111		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
 3112	}
 3113
 3114	return 0;
 3115}
 3116
 3117static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
 3118{
 3119	struct pqi_scsi_dev *device;
 3120
 3121	device = io_request->scmd->device->hostdata;
 3122	device->raid_bypass_enabled = false;
 3123	device->aio_enabled = false;
 3124}
 3125
 3126static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
 3127{
 3128	struct pqi_ctrl_info *ctrl_info;
 3129	struct pqi_scsi_dev *device;
 3130
 3131	device = sdev->hostdata;
 3132	if (device->device_offline)
 3133		return;
 3134
 3135	device->device_offline = true;
 3136	ctrl_info = shost_to_hba(sdev->host);
 3137	pqi_schedule_rescan_worker(ctrl_info);
 3138	dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
 3139		path, ctrl_info->scsi_host->host_no, device->bus,
 3140		device->target, device->lun);
 3141}
 3142
 3143static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
 3144{
 3145	u8 scsi_status;
 3146	u8 host_byte;
 3147	struct scsi_cmnd *scmd;
 3148	struct pqi_raid_error_info *error_info;
 3149	size_t sense_data_length;
 3150	int residual_count;
 3151	int xfer_count;
 3152	struct scsi_sense_hdr sshdr;
 3153
 3154	scmd = io_request->scmd;
 3155	if (!scmd)
 3156		return;
 3157
 3158	error_info = io_request->error_info;
 3159	scsi_status = error_info->status;
 3160	host_byte = DID_OK;
 3161
 3162	switch (error_info->data_out_result) {
 3163	case PQI_DATA_IN_OUT_GOOD:
 3164		break;
 3165	case PQI_DATA_IN_OUT_UNDERFLOW:
 3166		xfer_count =
 3167			get_unaligned_le32(&error_info->data_out_transferred);
 3168		residual_count = scsi_bufflen(scmd) - xfer_count;
 3169		scsi_set_resid(scmd, residual_count);
 3170		if (xfer_count < scmd->underflow)
 3171			host_byte = DID_SOFT_ERROR;
 3172		break;
 3173	case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
 3174	case PQI_DATA_IN_OUT_ABORTED:
 3175		host_byte = DID_ABORT;
 3176		break;
 3177	case PQI_DATA_IN_OUT_TIMEOUT:
 3178		host_byte = DID_TIME_OUT;
 3179		break;
 3180	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
 3181	case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
 3182	case PQI_DATA_IN_OUT_BUFFER_ERROR:
 3183	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
 3184	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
 3185	case PQI_DATA_IN_OUT_ERROR:
 3186	case PQI_DATA_IN_OUT_HARDWARE_ERROR:
 3187	case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
 3188	case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
 3189	case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
 3190	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
 3191	case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
 3192	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
 3193	case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
 3194	case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
 3195	case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
 3196	default:
 3197		host_byte = DID_ERROR;
 3198		break;
 3199	}
 3200
 3201	sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
 3202	if (sense_data_length == 0)
 3203		sense_data_length =
 3204			get_unaligned_le16(&error_info->response_data_length);
 3205	if (sense_data_length) {
 3206		if (sense_data_length > sizeof(error_info->data))
 3207			sense_data_length = sizeof(error_info->data);
 3208
 3209		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
 3210			scsi_normalize_sense(error_info->data,
 3211				sense_data_length, &sshdr) &&
 3212				sshdr.sense_key == HARDWARE_ERROR &&
 3213				sshdr.asc == 0x3e) {
 3214			struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
 3215			struct pqi_scsi_dev *device = scmd->device->hostdata;
 3216
 3217			switch (sshdr.ascq) {
 3218			case 0x1: /* LOGICAL UNIT FAILURE */
 3219				if (printk_ratelimit())
 3220					scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
 3221						ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
 3222				pqi_take_device_offline(scmd->device, "RAID");
 3223				host_byte = DID_NO_CONNECT;
 3224				break;
 3225
 3226			default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
 3227				if (printk_ratelimit())
 3228					scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
 3229						sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
 3230				break;
 3231			}
 3232		}
 3233
 3234		if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
 3235			sense_data_length = SCSI_SENSE_BUFFERSIZE;
 3236		memcpy(scmd->sense_buffer, error_info->data,
 3237			sense_data_length);
 3238	}
 3239
 3240	scmd->result = scsi_status;
 3241	set_host_byte(scmd, host_byte);
 3242}
 3243
 3244static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
 3245{
 3246	u8 scsi_status;
 3247	u8 host_byte;
 3248	struct scsi_cmnd *scmd;
 3249	struct pqi_aio_error_info *error_info;
 3250	size_t sense_data_length;
 3251	int residual_count;
 3252	int xfer_count;
 3253	bool device_offline;
 3254	struct pqi_scsi_dev *device;
 3255
 3256	scmd = io_request->scmd;
 3257	error_info = io_request->error_info;
 3258	host_byte = DID_OK;
 3259	sense_data_length = 0;
 3260	device_offline = false;
 3261	device = scmd->device->hostdata;
 3262
 3263	switch (error_info->service_response) {
 3264	case PQI_AIO_SERV_RESPONSE_COMPLETE:
 3265		scsi_status = error_info->status;
 3266		break;
 3267	case PQI_AIO_SERV_RESPONSE_FAILURE:
 3268		switch (error_info->status) {
 3269		case PQI_AIO_STATUS_IO_ABORTED:
 3270			scsi_status = SAM_STAT_TASK_ABORTED;
 3271			break;
 3272		case PQI_AIO_STATUS_UNDERRUN:
 3273			scsi_status = SAM_STAT_GOOD;
 3274			residual_count = get_unaligned_le32(
 3275						&error_info->residual_count);
 3276			scsi_set_resid(scmd, residual_count);
 3277			xfer_count = scsi_bufflen(scmd) - residual_count;
 3278			if (xfer_count < scmd->underflow)
 3279				host_byte = DID_SOFT_ERROR;
 3280			break;
 3281		case PQI_AIO_STATUS_OVERRUN:
 3282			scsi_status = SAM_STAT_GOOD;
 3283			break;
 3284		case PQI_AIO_STATUS_AIO_PATH_DISABLED:
 3285			pqi_aio_path_disabled(io_request);
 3286			if (pqi_is_multipath_device(device)) {
 3287				pqi_device_remove_start(device);
 3288				host_byte = DID_NO_CONNECT;
 3289				scsi_status = SAM_STAT_CHECK_CONDITION;
 3290			} else {
 3291				scsi_status = SAM_STAT_GOOD;
 3292				io_request->status = -EAGAIN;
 3293			}
 3294			break;
 3295		case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
 3296		case PQI_AIO_STATUS_INVALID_DEVICE:
 3297			if (!io_request->raid_bypass) {
 3298				device_offline = true;
 3299				pqi_take_device_offline(scmd->device, "AIO");
 3300				host_byte = DID_NO_CONNECT;
 3301			}
 3302			scsi_status = SAM_STAT_CHECK_CONDITION;
 3303			break;
 3304		case PQI_AIO_STATUS_IO_ERROR:
 3305		default:
 3306			scsi_status = SAM_STAT_CHECK_CONDITION;
 3307			break;
 3308		}
 3309		break;
 3310	case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
 3311	case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
 3312		scsi_status = SAM_STAT_GOOD;
 3313		break;
 3314	case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
 3315	case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
 3316	default:
 3317		scsi_status = SAM_STAT_CHECK_CONDITION;
 3318		break;
 3319	}
 3320
 3321	if (error_info->data_present) {
 3322		sense_data_length =
 3323			get_unaligned_le16(&error_info->data_length);
 3324		if (sense_data_length) {
 3325			if (sense_data_length > sizeof(error_info->data))
 3326				sense_data_length = sizeof(error_info->data);
 3327			if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
 3328				sense_data_length = SCSI_SENSE_BUFFERSIZE;
 3329			memcpy(scmd->sense_buffer, error_info->data,
 3330				sense_data_length);
 3331		}
 3332	}
 3333
 3334	if (device_offline && sense_data_length == 0)
 3335		scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
 3336
 3337	scmd->result = scsi_status;
 3338	set_host_byte(scmd, host_byte);
 3339}
 3340
 3341static void pqi_process_io_error(unsigned int iu_type,
 3342	struct pqi_io_request *io_request)
 3343{
 3344	switch (iu_type) {
 3345	case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
 3346		pqi_process_raid_io_error(io_request);
 3347		break;
 3348	case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
 3349		pqi_process_aio_io_error(io_request);
 3350		break;
 3351	}
 3352}
 3353
 3354static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
 3355	struct pqi_task_management_response *response)
 3356{
 3357	int rc;
 3358
 3359	switch (response->response_code) {
 3360	case SOP_TMF_COMPLETE:
 3361	case SOP_TMF_FUNCTION_SUCCEEDED:
 3362		rc = 0;
 3363		break;
 3364	case SOP_TMF_REJECTED:
 3365		rc = -EAGAIN;
 3366		break;
 3367	case SOP_RC_INCORRECT_LOGICAL_UNIT:
 3368		rc = -ENODEV;
 3369		break;
 3370	default:
 3371		rc = -EIO;
 3372		break;
 3373	}
 3374
 3375	if (rc)
 3376		dev_err(&ctrl_info->pci_dev->dev,
 3377			"Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
 3378
 3379	return rc;
 3380}
 3381
 3382static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
 3383	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
 3384{
 3385	pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
 3386}
 3387
 3388static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
 3389{
 3390	int num_responses;
 3391	pqi_index_t oq_pi;
 3392	pqi_index_t oq_ci;
 3393	struct pqi_io_request *io_request;
 3394	struct pqi_io_response *response;
 3395	u16 request_id;
 3396
 3397	num_responses = 0;
 3398	oq_ci = queue_group->oq_ci_copy;
 3399
 3400	while (1) {
 3401		oq_pi = readl(queue_group->oq_pi);
 3402		if (oq_pi >= ctrl_info->num_elements_per_oq) {
 3403			pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
 3404			dev_err(&ctrl_info->pci_dev->dev,
 3405				"I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
 3406				oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
 3407			return -1;
 3408		}
 3409		if (oq_pi == oq_ci)
 3410			break;
 3411
 3412		num_responses++;
 3413		response = queue_group->oq_element_array +
 3414			(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
 3415
 3416		request_id = get_unaligned_le16(&response->request_id);
 3417		if (request_id >= ctrl_info->max_io_slots) {
 3418			pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
 3419			dev_err(&ctrl_info->pci_dev->dev,
 3420				"request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
 3421				request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
 3422			return -1;
 3423		}
 3424
 3425		io_request = &ctrl_info->io_request_pool[request_id];
 3426		if (atomic_read(&io_request->refcount) == 0) {
 3427			pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
 3428			dev_err(&ctrl_info->pci_dev->dev,
 3429				"request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
 3430				request_id, oq_pi, oq_ci);
 3431			return -1;
 3432		}
 3433
 3434		switch (response->header.iu_type) {
 3435		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
 3436		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
 3437			if (io_request->scmd)
 3438				io_request->scmd->result = 0;
 3439			fallthrough;
 3440		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
 3441			break;
 3442		case PQI_RESPONSE_IU_VENDOR_GENERAL:
 3443			io_request->status =
 3444				get_unaligned_le16(
 3445				&((struct pqi_vendor_general_response *)response)->status);
 3446			break;
 3447		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
 3448			io_request->status = pqi_interpret_task_management_response(ctrl_info,
 3449				(void *)response);
 3450			break;
 3451		case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
 3452			pqi_aio_path_disabled(io_request);
 3453			io_request->status = -EAGAIN;
 3454			break;
 3455		case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
 3456		case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
 3457			io_request->error_info = ctrl_info->error_buffer +
 3458				(get_unaligned_le16(&response->error_index) *
 3459				PQI_ERROR_BUFFER_ELEMENT_LENGTH);
 3460			pqi_process_io_error(response->header.iu_type, io_request);
 3461			break;
 3462		default:
 3463			pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
 3464			dev_err(&ctrl_info->pci_dev->dev,
 3465				"unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
 3466				response->header.iu_type, oq_pi, oq_ci);
 3467			return -1;
 3468		}
 3469
 3470		io_request->io_complete_callback(io_request, io_request->context);
 3471
 3472		/*
 3473		 * Note that the I/O request structure CANNOT BE TOUCHED after
 3474		 * returning from the I/O completion callback!
 3475		 */
 3476		oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
 3477	}
 3478
 3479	if (num_responses) {
 3480		queue_group->oq_ci_copy = oq_ci;
 3481		writel(oq_ci, queue_group->oq_ci);
 3482	}
 3483
 3484	return num_responses;
 3485}
 3486
 3487static inline unsigned int pqi_num_elements_free(unsigned int pi,
 3488	unsigned int ci, unsigned int elements_in_queue)
 3489{
 3490	unsigned int num_elements_used;
 3491
 3492	if (pi >= ci)
 3493		num_elements_used = pi - ci;
 3494	else
 3495		num_elements_used = elements_in_queue - ci + pi;
 3496
 3497	return elements_in_queue - num_elements_used - 1;
 3498}
 3499
 3500static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
 3501	struct pqi_event_acknowledge_request *iu, size_t iu_length)
 3502{
 3503	pqi_index_t iq_pi;
 3504	pqi_index_t iq_ci;
 3505	unsigned long flags;
 3506	void *next_element;
 3507	struct pqi_queue_group *queue_group;
 3508
 3509	queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
 3510	put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
 3511
 3512	while (1) {
 3513		spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
 3514
 3515		iq_pi = queue_group->iq_pi_copy[RAID_PATH];
 3516		iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
 3517
 3518		if (pqi_num_elements_free(iq_pi, iq_ci,
 3519			ctrl_info->num_elements_per_iq))
 3520			break;
 3521
 3522		spin_unlock_irqrestore(
 3523			&queue_group->submit_lock[RAID_PATH], flags);
 3524
 3525		if (pqi_ctrl_offline(ctrl_info))
 3526			return;
 3527	}
 3528
 3529	next_element = queue_group->iq_element_array[RAID_PATH] +
 3530		(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
 3531
 3532	memcpy(next_element, iu, iu_length);
 3533
 3534	iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
 3535	queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
 3536
 3537	/*
 3538	 * This write notifies the controller that an IU is available to be
 3539	 * processed.
 3540	 */
 3541	writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
 3542
 3543	spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
 3544}
 3545
 3546static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
 3547	struct pqi_event *event)
 3548{
 3549	struct pqi_event_acknowledge_request request;
 3550
 3551	memset(&request, 0, sizeof(request));
 3552
 3553	request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
 3554	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
 3555		&request.header.iu_length);
 3556	request.event_type = event->event_type;
 3557	put_unaligned_le16(event->event_id, &request.event_id);
 3558	put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
 3559
 3560	pqi_send_event_ack(ctrl_info, &request, sizeof(request));
 3561}
 3562
 3563#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS		30
 3564#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS	1
 3565
 3566static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
 3567	struct pqi_ctrl_info *ctrl_info)
 3568{
 3569	u8 status;
 3570	unsigned long timeout;
 3571
 3572	timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
 3573
 3574	while (1) {
 3575		status = pqi_read_soft_reset_status(ctrl_info);
 3576		if (status & PQI_SOFT_RESET_INITIATE)
 3577			return RESET_INITIATE_DRIVER;
 3578
 3579		if (status & PQI_SOFT_RESET_ABORT)
 3580			return RESET_ABORT;
 3581
 3582		if (!sis_is_firmware_running(ctrl_info))
 3583			return RESET_NORESPONSE;
 3584
 3585		if (time_after(jiffies, timeout)) {
 3586			dev_warn(&ctrl_info->pci_dev->dev,
 3587				"timed out waiting for soft reset status\n");
 3588			return RESET_TIMEDOUT;
 3589		}
 3590
 3591		ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
 3592	}
 3593}
 3594
 3595static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
 3596{
 3597	int rc;
 3598	unsigned int delay_secs;
 3599	enum pqi_soft_reset_status reset_status;
 3600
 3601	if (ctrl_info->soft_reset_handshake_supported)
 3602		reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
 3603	else
 3604		reset_status = RESET_INITIATE_FIRMWARE;
 3605
 3606	delay_secs = PQI_POST_RESET_DELAY_SECS;
 3607
 3608	switch (reset_status) {
 3609	case RESET_TIMEDOUT:
 3610		delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
 3611		fallthrough;
 3612	case RESET_INITIATE_DRIVER:
 3613		dev_info(&ctrl_info->pci_dev->dev,
 3614				"Online Firmware Activation: resetting controller\n");
 3615		sis_soft_reset(ctrl_info);
 3616		fallthrough;
 3617	case RESET_INITIATE_FIRMWARE:
 3618		ctrl_info->pqi_mode_enabled = false;
 3619		pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
 3620		rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
 3621		pqi_ofa_free_host_buffer(ctrl_info);
 3622		pqi_ctrl_ofa_done(ctrl_info);
 3623		dev_info(&ctrl_info->pci_dev->dev,
 3624				"Online Firmware Activation: %s\n",
 3625				rc == 0 ? "SUCCESS" : "FAILED");
 3626		break;
 3627	case RESET_ABORT:
 3628		dev_info(&ctrl_info->pci_dev->dev,
 3629				"Online Firmware Activation ABORTED\n");
 3630		if (ctrl_info->soft_reset_handshake_supported)
 3631			pqi_clear_soft_reset_status(ctrl_info);
 3632		pqi_ofa_free_host_buffer(ctrl_info);
 3633		pqi_ctrl_ofa_done(ctrl_info);
 3634		pqi_ofa_ctrl_unquiesce(ctrl_info);
 3635		break;
 3636	case RESET_NORESPONSE:
 3637		fallthrough;
 3638	default:
 3639		dev_err(&ctrl_info->pci_dev->dev,
 3640			"unexpected Online Firmware Activation reset status: 0x%x\n",
 3641			reset_status);
 3642		pqi_ofa_free_host_buffer(ctrl_info);
 3643		pqi_ctrl_ofa_done(ctrl_info);
 3644		pqi_ofa_ctrl_unquiesce(ctrl_info);
 3645		pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
 3646		break;
 3647	}
 3648}
 3649
 3650static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
 3651{
 3652	struct pqi_ctrl_info *ctrl_info;
 3653
 3654	ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
 3655
 3656	pqi_ctrl_ofa_start(ctrl_info);
 3657	pqi_ofa_setup_host_buffer(ctrl_info);
 3658	pqi_ofa_host_memory_update(ctrl_info);
 3659}
 3660
 3661static void pqi_ofa_quiesce_worker(struct work_struct *work)
 3662{
 3663	struct pqi_ctrl_info *ctrl_info;
 3664	struct pqi_event *event;
 3665
 3666	ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
 3667
 3668	event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
 3669
 3670	pqi_ofa_ctrl_quiesce(ctrl_info);
 3671	pqi_acknowledge_event(ctrl_info, event);
 3672	pqi_process_soft_reset(ctrl_info);
 3673}
 3674
 3675static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
 3676	struct pqi_event *event)
 3677{
 3678	bool ack_event;
 3679
 3680	ack_event = true;
 3681
 3682	switch (event->event_id) {
 3683	case PQI_EVENT_OFA_MEMORY_ALLOCATION:
 3684		dev_info(&ctrl_info->pci_dev->dev,
 3685			"received Online Firmware Activation memory allocation request\n");
 3686		schedule_work(&ctrl_info->ofa_memory_alloc_work);
 3687		break;
 3688	case PQI_EVENT_OFA_QUIESCE:
 3689		dev_info(&ctrl_info->pci_dev->dev,
 3690			"received Online Firmware Activation quiesce request\n");
 3691		schedule_work(&ctrl_info->ofa_quiesce_work);
 3692		ack_event = false;
 3693		break;
 3694	case PQI_EVENT_OFA_CANCELED:
 3695		dev_info(&ctrl_info->pci_dev->dev,
 3696			"received Online Firmware Activation cancel request: reason: %u\n",
 3697			ctrl_info->ofa_cancel_reason);
 3698		pqi_ofa_free_host_buffer(ctrl_info);
 3699		pqi_ctrl_ofa_done(ctrl_info);
 3700		break;
 3701	default:
 3702		dev_err(&ctrl_info->pci_dev->dev,
 3703			"received unknown Online Firmware Activation request: event ID: %u\n",
 3704			event->event_id);
 3705		break;
 3706	}
 3707
 3708	return ack_event;
 3709}
 3710
 3711static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
 3712{
 3713	unsigned long flags;
 3714	struct pqi_scsi_dev *device;
 3715
 3716	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 3717
 3718	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
 3719		if (device->raid_bypass_enabled)
 3720			device->raid_bypass_enabled = false;
 3721
 3722	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 3723}
 3724
 3725static void pqi_event_worker(struct work_struct *work)
 3726{
 3727	unsigned int i;
 3728	bool rescan_needed;
 3729	struct pqi_ctrl_info *ctrl_info;
 3730	struct pqi_event *event;
 3731	bool ack_event;
 3732
 3733	ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
 3734
 3735	pqi_ctrl_busy(ctrl_info);
 3736	pqi_wait_if_ctrl_blocked(ctrl_info);
 3737	if (pqi_ctrl_offline(ctrl_info))
 3738		goto out;
 3739
 3740	rescan_needed = false;
 3741	event = ctrl_info->events;
 3742	for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
 3743		if (event->pending) {
 3744			event->pending = false;
 3745			if (event->event_type == PQI_EVENT_TYPE_OFA) {
 3746				ack_event = pqi_ofa_process_event(ctrl_info, event);
 3747			} else {
 3748				ack_event = true;
 3749				rescan_needed = true;
 3750				if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
 3751					ctrl_info->logical_volume_rescan_needed = true;
 3752				else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
 3753					pqi_disable_raid_bypass(ctrl_info);
 3754			}
 3755			if (ack_event)
 3756				pqi_acknowledge_event(ctrl_info, event);
 3757		}
 3758		event++;
 3759	}
 3760
 3761#define PQI_RESCAN_WORK_FOR_EVENT_DELAY		(5 * HZ)
 3762
 3763	if (rescan_needed)
 3764		pqi_schedule_rescan_worker_with_delay(ctrl_info,
 3765			PQI_RESCAN_WORK_FOR_EVENT_DELAY);
 3766
 3767out:
 3768	pqi_ctrl_unbusy(ctrl_info);
 3769}
 3770
 3771#define PQI_HEARTBEAT_TIMER_INTERVAL	(10 * HZ)
 3772
 3773static void pqi_heartbeat_timer_handler(struct timer_list *t)
 3774{
 3775	int num_interrupts;
 3776	u32 heartbeat_count;
 3777	struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
 3778
 3779	pqi_check_ctrl_health(ctrl_info);
 3780	if (pqi_ctrl_offline(ctrl_info))
 3781		return;
 3782
 3783	num_interrupts = atomic_read(&ctrl_info->num_interrupts);
 3784	heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
 3785
 3786	if (num_interrupts == ctrl_info->previous_num_interrupts) {
 3787		if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
 3788			dev_err(&ctrl_info->pci_dev->dev,
 3789				"no heartbeat detected - last heartbeat count: %u\n",
 3790				heartbeat_count);
 3791			pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
 3792			return;
 3793		}
 3794	} else {
 3795		ctrl_info->previous_num_interrupts = num_interrupts;
 3796	}
 3797
 3798	ctrl_info->previous_heartbeat_count = heartbeat_count;
 3799	mod_timer(&ctrl_info->heartbeat_timer,
 3800		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
 3801}
 3802
 3803static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
 3804{
 3805	if (!ctrl_info->heartbeat_counter)
 3806		return;
 3807
 3808	ctrl_info->previous_num_interrupts =
 3809		atomic_read(&ctrl_info->num_interrupts);
 3810	ctrl_info->previous_heartbeat_count =
 3811		pqi_read_heartbeat_counter(ctrl_info);
 3812
 3813	ctrl_info->heartbeat_timer.expires =
 3814		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
 3815	add_timer(&ctrl_info->heartbeat_timer);
 3816}
 3817
 3818static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
 3819{
 3820	del_timer_sync(&ctrl_info->heartbeat_timer);
 3821}
 3822
 3823static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
 3824	struct pqi_event *event, struct pqi_event_response *response)
 3825{
 3826	switch (event->event_id) {
 3827	case PQI_EVENT_OFA_MEMORY_ALLOCATION:
 3828		ctrl_info->ofa_bytes_requested =
 3829			get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
 3830		break;
 3831	case PQI_EVENT_OFA_CANCELED:
 3832		ctrl_info->ofa_cancel_reason =
 3833			get_unaligned_le16(&response->data.ofa_cancelled.reason);
 3834		break;
 3835	}
 3836}
 3837
 3838static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
 3839{
 3840	int num_events;
 3841	pqi_index_t oq_pi;
 3842	pqi_index_t oq_ci;
 3843	struct pqi_event_queue *event_queue;
 3844	struct pqi_event_response *response;
 3845	struct pqi_event *event;
 3846	int event_index;
 3847
 3848	event_queue = &ctrl_info->event_queue;
 3849	num_events = 0;
 3850	oq_ci = event_queue->oq_ci_copy;
 3851
 3852	while (1) {
 3853		oq_pi = readl(event_queue->oq_pi);
 3854		if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
 3855			pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
 3856			dev_err(&ctrl_info->pci_dev->dev,
 3857				"event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
 3858				oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
 3859			return -1;
 3860		}
 3861
 3862		if (oq_pi == oq_ci)
 3863			break;
 3864
 3865		num_events++;
 3866		response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
 3867
 3868		event_index = pqi_event_type_to_event_index(response->event_type);
 3869
 3870		if (event_index >= 0 && response->request_acknowledge) {
 3871			event = &ctrl_info->events[event_index];
 3872			event->pending = true;
 3873			event->event_type = response->event_type;
 3874			event->event_id = get_unaligned_le16(&response->event_id);
 3875			event->additional_event_id =
 3876				get_unaligned_le32(&response->additional_event_id);
 3877			if (event->event_type == PQI_EVENT_TYPE_OFA)
 3878				pqi_ofa_capture_event_payload(ctrl_info, event, response);
 3879		}
 3880
 3881		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
 3882	}
 3883
 3884	if (num_events) {
 3885		event_queue->oq_ci_copy = oq_ci;
 3886		writel(oq_ci, event_queue->oq_ci);
 3887		schedule_work(&ctrl_info->event_work);
 3888	}
 3889
 3890	return num_events;
 3891}
 3892
 3893#define PQI_LEGACY_INTX_MASK	0x1
 3894
 3895static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
 3896{
 3897	u32 intx_mask;
 3898	struct pqi_device_registers __iomem *pqi_registers;
 3899	volatile void __iomem *register_addr;
 3900
 3901	pqi_registers = ctrl_info->pqi_registers;
 3902
 3903	if (enable_intx)
 3904		register_addr = &pqi_registers->legacy_intx_mask_clear;
 3905	else
 3906		register_addr = &pqi_registers->legacy_intx_mask_set;
 3907
 3908	intx_mask = readl(register_addr);
 3909	intx_mask |= PQI_LEGACY_INTX_MASK;
 3910	writel(intx_mask, register_addr);
 3911}
 3912
 3913static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
 3914	enum pqi_irq_mode new_mode)
 3915{
 3916	switch (ctrl_info->irq_mode) {
 3917	case IRQ_MODE_MSIX:
 3918		switch (new_mode) {
 3919		case IRQ_MODE_MSIX:
 3920			break;
 3921		case IRQ_MODE_INTX:
 3922			pqi_configure_legacy_intx(ctrl_info, true);
 3923			sis_enable_intx(ctrl_info);
 3924			break;
 3925		case IRQ_MODE_NONE:
 3926			break;
 3927		}
 3928		break;
 3929	case IRQ_MODE_INTX:
 3930		switch (new_mode) {
 3931		case IRQ_MODE_MSIX:
 3932			pqi_configure_legacy_intx(ctrl_info, false);
 3933			sis_enable_msix(ctrl_info);
 3934			break;
 3935		case IRQ_MODE_INTX:
 3936			break;
 3937		case IRQ_MODE_NONE:
 3938			pqi_configure_legacy_intx(ctrl_info, false);
 3939			break;
 3940		}
 3941		break;
 3942	case IRQ_MODE_NONE:
 3943		switch (new_mode) {
 3944		case IRQ_MODE_MSIX:
 3945			sis_enable_msix(ctrl_info);
 3946			break;
 3947		case IRQ_MODE_INTX:
 3948			pqi_configure_legacy_intx(ctrl_info, true);
 3949			sis_enable_intx(ctrl_info);
 3950			break;
 3951		case IRQ_MODE_NONE:
 3952			break;
 3953		}
 3954		break;
 3955	}
 3956
 3957	ctrl_info->irq_mode = new_mode;
 3958}
 3959
 3960#define PQI_LEGACY_INTX_PENDING		0x1
 3961
 3962static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
 3963{
 3964	bool valid_irq;
 3965	u32 intx_status;
 3966
 3967	switch (ctrl_info->irq_mode) {
 3968	case IRQ_MODE_MSIX:
 3969		valid_irq = true;
 3970		break;
 3971	case IRQ_MODE_INTX:
 3972		intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
 3973		if (intx_status & PQI_LEGACY_INTX_PENDING)
 3974			valid_irq = true;
 3975		else
 3976			valid_irq = false;
 3977		break;
 3978	case IRQ_MODE_NONE:
 3979	default:
 3980		valid_irq = false;
 3981		break;
 3982	}
 3983
 3984	return valid_irq;
 3985}
 3986
 3987static irqreturn_t pqi_irq_handler(int irq, void *data)
 3988{
 3989	struct pqi_ctrl_info *ctrl_info;
 3990	struct pqi_queue_group *queue_group;
 3991	int num_io_responses_handled;
 3992	int num_events_handled;
 3993
 3994	queue_group = data;
 3995	ctrl_info = queue_group->ctrl_info;
 3996
 3997	if (!pqi_is_valid_irq(ctrl_info))
 3998		return IRQ_NONE;
 3999
 4000	num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
 4001	if (num_io_responses_handled < 0)
 4002		goto out;
 4003
 4004	if (irq == ctrl_info->event_irq) {
 4005		num_events_handled = pqi_process_event_intr(ctrl_info);
 4006		if (num_events_handled < 0)
 4007			goto out;
 4008	} else {
 4009		num_events_handled = 0;
 4010	}
 4011
 4012	if (num_io_responses_handled + num_events_handled > 0)
 4013		atomic_inc(&ctrl_info->num_interrupts);
 4014
 4015	pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
 4016	pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
 4017
 4018out:
 4019	return IRQ_HANDLED;
 4020}
 4021
 4022static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
 4023{
 4024	struct pci_dev *pci_dev = ctrl_info->pci_dev;
 4025	int i;
 4026	int rc;
 4027
 4028	ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
 4029
 4030	for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
 4031		rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
 4032			DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
 4033		if (rc) {
 4034			dev_err(&pci_dev->dev,
 4035				"irq %u init failed with error %d\n",
 4036				pci_irq_vector(pci_dev, i), rc);
 4037			return rc;
 4038		}
 4039		ctrl_info->num_msix_vectors_initialized++;
 4040	}
 4041
 4042	return 0;
 4043}
 4044
 4045static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
 4046{
 4047	int i;
 4048
 4049	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
 4050		free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
 4051			&ctrl_info->queue_groups[i]);
 4052
 4053	ctrl_info->num_msix_vectors_initialized = 0;
 4054}
 4055
 4056static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
 4057{
 4058	int num_vectors_enabled;
 4059	unsigned int flags = PCI_IRQ_MSIX;
 4060
 4061	if (!pqi_disable_managed_interrupts)
 4062		flags |= PCI_IRQ_AFFINITY;
 4063
 4064	num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
 4065			PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
 4066			flags);
 4067	if (num_vectors_enabled < 0) {
 4068		dev_err(&ctrl_info->pci_dev->dev,
 4069			"MSI-X init failed with error %d\n",
 4070			num_vectors_enabled);
 4071		return num_vectors_enabled;
 4072	}
 4073
 4074	ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
 4075	ctrl_info->irq_mode = IRQ_MODE_MSIX;
 4076	return 0;
 4077}
 4078
 4079static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
 4080{
 4081	if (ctrl_info->num_msix_vectors_enabled) {
 4082		pci_free_irq_vectors(ctrl_info->pci_dev);
 4083		ctrl_info->num_msix_vectors_enabled = 0;
 4084	}
 4085}
 4086
 4087static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
 4088{
 4089	unsigned int i;
 4090	size_t alloc_length;
 4091	size_t element_array_length_per_iq;
 4092	size_t element_array_length_per_oq;
 4093	void *element_array;
 4094	void __iomem *next_queue_index;
 4095	void *aligned_pointer;
 4096	unsigned int num_inbound_queues;
 4097	unsigned int num_outbound_queues;
 4098	unsigned int num_queue_indexes;
 4099	struct pqi_queue_group *queue_group;
 4100
 4101	element_array_length_per_iq =
 4102		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
 4103		ctrl_info->num_elements_per_iq;
 4104	element_array_length_per_oq =
 4105		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
 4106		ctrl_info->num_elements_per_oq;
 4107	num_inbound_queues = ctrl_info->num_queue_groups * 2;
 4108	num_outbound_queues = ctrl_info->num_queue_groups;
 4109	num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
 4110
 4111	aligned_pointer = NULL;
 4112
 4113	for (i = 0; i < num_inbound_queues; i++) {
 4114		aligned_pointer = PTR_ALIGN(aligned_pointer,
 4115			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4116		aligned_pointer += element_array_length_per_iq;
 4117	}
 4118
 4119	for (i = 0; i < num_outbound_queues; i++) {
 4120		aligned_pointer = PTR_ALIGN(aligned_pointer,
 4121			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4122		aligned_pointer += element_array_length_per_oq;
 4123	}
 4124
 4125	aligned_pointer = PTR_ALIGN(aligned_pointer,
 4126		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4127	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
 4128		PQI_EVENT_OQ_ELEMENT_LENGTH;
 4129
 4130	for (i = 0; i < num_queue_indexes; i++) {
 4131		aligned_pointer = PTR_ALIGN(aligned_pointer,
 4132			PQI_OPERATIONAL_INDEX_ALIGNMENT);
 4133		aligned_pointer += sizeof(pqi_index_t);
 4134	}
 4135
 4136	alloc_length = (size_t)aligned_pointer +
 4137		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
 4138
 4139	alloc_length += PQI_EXTRA_SGL_MEMORY;
 4140
 4141	ctrl_info->queue_memory_base =
 4142		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
 4143				   &ctrl_info->queue_memory_base_dma_handle,
 4144				   GFP_KERNEL);
 4145
 4146	if (!ctrl_info->queue_memory_base)
 4147		return -ENOMEM;
 4148
 4149	ctrl_info->queue_memory_length = alloc_length;
 4150
 4151	element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
 4152		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4153
 4154	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 4155		queue_group = &ctrl_info->queue_groups[i];
 4156		queue_group->iq_element_array[RAID_PATH] = element_array;
 4157		queue_group->iq_element_array_bus_addr[RAID_PATH] =
 4158			ctrl_info->queue_memory_base_dma_handle +
 4159				(element_array - ctrl_info->queue_memory_base);
 4160		element_array += element_array_length_per_iq;
 4161		element_array = PTR_ALIGN(element_array,
 4162			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4163		queue_group->iq_element_array[AIO_PATH] = element_array;
 4164		queue_group->iq_element_array_bus_addr[AIO_PATH] =
 4165			ctrl_info->queue_memory_base_dma_handle +
 4166			(element_array - ctrl_info->queue_memory_base);
 4167		element_array += element_array_length_per_iq;
 4168		element_array = PTR_ALIGN(element_array,
 4169			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4170	}
 4171
 4172	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 4173		queue_group = &ctrl_info->queue_groups[i];
 4174		queue_group->oq_element_array = element_array;
 4175		queue_group->oq_element_array_bus_addr =
 4176			ctrl_info->queue_memory_base_dma_handle +
 4177			(element_array - ctrl_info->queue_memory_base);
 4178		element_array += element_array_length_per_oq;
 4179		element_array = PTR_ALIGN(element_array,
 4180			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4181	}
 4182
 4183	ctrl_info->event_queue.oq_element_array = element_array;
 4184	ctrl_info->event_queue.oq_element_array_bus_addr =
 4185		ctrl_info->queue_memory_base_dma_handle +
 4186		(element_array - ctrl_info->queue_memory_base);
 4187	element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
 4188		PQI_EVENT_OQ_ELEMENT_LENGTH;
 4189
 4190	next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
 4191		PQI_OPERATIONAL_INDEX_ALIGNMENT);
 4192
 4193	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 4194		queue_group = &ctrl_info->queue_groups[i];
 4195		queue_group->iq_ci[RAID_PATH] = next_queue_index;
 4196		queue_group->iq_ci_bus_addr[RAID_PATH] =
 4197			ctrl_info->queue_memory_base_dma_handle +
 4198			(next_queue_index -
 4199			(void __iomem *)ctrl_info->queue_memory_base);
 4200		next_queue_index += sizeof(pqi_index_t);
 4201		next_queue_index = PTR_ALIGN(next_queue_index,
 4202			PQI_OPERATIONAL_INDEX_ALIGNMENT);
 4203		queue_group->iq_ci[AIO_PATH] = next_queue_index;
 4204		queue_group->iq_ci_bus_addr[AIO_PATH] =
 4205			ctrl_info->queue_memory_base_dma_handle +
 4206			(next_queue_index -
 4207			(void __iomem *)ctrl_info->queue_memory_base);
 4208		next_queue_index += sizeof(pqi_index_t);
 4209		next_queue_index = PTR_ALIGN(next_queue_index,
 4210			PQI_OPERATIONAL_INDEX_ALIGNMENT);
 4211		queue_group->oq_pi = next_queue_index;
 4212		queue_group->oq_pi_bus_addr =
 4213			ctrl_info->queue_memory_base_dma_handle +
 4214			(next_queue_index -
 4215			(void __iomem *)ctrl_info->queue_memory_base);
 4216		next_queue_index += sizeof(pqi_index_t);
 4217		next_queue_index = PTR_ALIGN(next_queue_index,
 4218			PQI_OPERATIONAL_INDEX_ALIGNMENT);
 4219	}
 4220
 4221	ctrl_info->event_queue.oq_pi = next_queue_index;
 4222	ctrl_info->event_queue.oq_pi_bus_addr =
 4223		ctrl_info->queue_memory_base_dma_handle +
 4224		(next_queue_index -
 4225		(void __iomem *)ctrl_info->queue_memory_base);
 4226
 4227	return 0;
 4228}
 4229
 4230static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
 4231{
 4232	unsigned int i;
 4233	u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
 4234	u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
 4235
 4236	/*
 4237	 * Initialize the backpointers to the controller structure in
 4238	 * each operational queue group structure.
 4239	 */
 4240	for (i = 0; i < ctrl_info->num_queue_groups; i++)
 4241		ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
 4242
 4243	/*
 4244	 * Assign IDs to all operational queues.  Note that the IDs
 4245	 * assigned to operational IQs are independent of the IDs
 4246	 * assigned to operational OQs.
 4247	 */
 4248	ctrl_info->event_queue.oq_id = next_oq_id++;
 4249	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 4250		ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
 4251		ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
 4252		ctrl_info->queue_groups[i].oq_id = next_oq_id++;
 4253	}
 4254
 4255	/*
 4256	 * Assign MSI-X table entry indexes to all queues.  Note that the
 4257	 * interrupt for the event queue is shared with the first queue group.
 4258	 */
 4259	ctrl_info->event_queue.int_msg_num = 0;
 4260	for (i = 0; i < ctrl_info->num_queue_groups; i++)
 4261		ctrl_info->queue_groups[i].int_msg_num = i;
 4262
 4263	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 4264		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
 4265		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
 4266		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
 4267		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
 4268	}
 4269}
 4270
 4271static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
 4272{
 4273	size_t alloc_length;
 4274	struct pqi_admin_queues_aligned *admin_queues_aligned;
 4275	struct pqi_admin_queues *admin_queues;
 4276
 4277	alloc_length = sizeof(struct pqi_admin_queues_aligned) +
 4278		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
 4279
 4280	ctrl_info->admin_queue_memory_base =
 4281		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
 4282				   &ctrl_info->admin_queue_memory_base_dma_handle,
 4283				   GFP_KERNEL);
 4284
 4285	if (!ctrl_info->admin_queue_memory_base)
 4286		return -ENOMEM;
 4287
 4288	ctrl_info->admin_queue_memory_length = alloc_length;
 4289
 4290	admin_queues = &ctrl_info->admin_queues;
 4291	admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
 4292		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
 4293	admin_queues->iq_element_array =
 4294		&admin_queues_aligned->iq_element_array;
 4295	admin_queues->oq_element_array =
 4296		&admin_queues_aligned->oq_element_array;
 4297	admin_queues->iq_ci =
 4298		(pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
 4299	admin_queues->oq_pi =
 4300		(pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
 4301
 4302	admin_queues->iq_element_array_bus_addr =
 4303		ctrl_info->admin_queue_memory_base_dma_handle +
 4304		(admin_queues->iq_element_array -
 4305		ctrl_info->admin_queue_memory_base);
 4306	admin_queues->oq_element_array_bus_addr =
 4307		ctrl_info->admin_queue_memory_base_dma_handle +
 4308		(admin_queues->oq_element_array -
 4309		ctrl_info->admin_queue_memory_base);
 4310	admin_queues->iq_ci_bus_addr =
 4311		ctrl_info->admin_queue_memory_base_dma_handle +
 4312		((void __iomem *)admin_queues->iq_ci -
 4313		(void __iomem *)ctrl_info->admin_queue_memory_base);
 4314	admin_queues->oq_pi_bus_addr =
 4315		ctrl_info->admin_queue_memory_base_dma_handle +
 4316		((void __iomem *)admin_queues->oq_pi -
 4317		(void __iomem *)ctrl_info->admin_queue_memory_base);
 4318
 4319	return 0;
 4320}
 4321
 4322#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		HZ
 4323#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS	1
 4324
 4325static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
 4326{
 4327	struct pqi_device_registers __iomem *pqi_registers;
 4328	struct pqi_admin_queues *admin_queues;
 4329	unsigned long timeout;
 4330	u8 status;
 4331	u32 reg;
 4332
 4333	pqi_registers = ctrl_info->pqi_registers;
 4334	admin_queues = &ctrl_info->admin_queues;
 4335
 4336	writeq((u64)admin_queues->iq_element_array_bus_addr,
 4337		&pqi_registers->admin_iq_element_array_addr);
 4338	writeq((u64)admin_queues->oq_element_array_bus_addr,
 4339		&pqi_registers->admin_oq_element_array_addr);
 4340	writeq((u64)admin_queues->iq_ci_bus_addr,
 4341		&pqi_registers->admin_iq_ci_addr);
 4342	writeq((u64)admin_queues->oq_pi_bus_addr,
 4343		&pqi_registers->admin_oq_pi_addr);
 4344
 4345	reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
 4346		(PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
 4347		(admin_queues->int_msg_num << 16);
 4348	writel(reg, &pqi_registers->admin_iq_num_elements);
 4349
 4350	writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
 4351		&pqi_registers->function_and_status_code);
 4352
 4353	timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
 4354	while (1) {
 4355		msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
 4356		status = readb(&pqi_registers->function_and_status_code);
 4357		if (status == PQI_STATUS_IDLE)
 4358			break;
 4359		if (time_after(jiffies, timeout))
 4360			return -ETIMEDOUT;
 4361	}
 4362
 4363	/*
 4364	 * The offset registers are not initialized to the correct
 4365	 * offsets until *after* the create admin queue pair command
 4366	 * completes successfully.
 4367	 */
 4368	admin_queues->iq_pi = ctrl_info->iomem_base +
 4369		PQI_DEVICE_REGISTERS_OFFSET +
 4370		readq(&pqi_registers->admin_iq_pi_offset);
 4371	admin_queues->oq_ci = ctrl_info->iomem_base +
 4372		PQI_DEVICE_REGISTERS_OFFSET +
 4373		readq(&pqi_registers->admin_oq_ci_offset);
 4374
 4375	return 0;
 4376}
 4377
 4378static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
 4379	struct pqi_general_admin_request *request)
 4380{
 4381	struct pqi_admin_queues *admin_queues;
 4382	void *next_element;
 4383	pqi_index_t iq_pi;
 4384
 4385	admin_queues = &ctrl_info->admin_queues;
 4386	iq_pi = admin_queues->iq_pi_copy;
 4387
 4388	next_element = admin_queues->iq_element_array +
 4389		(iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
 4390
 4391	memcpy(next_element, request, sizeof(*request));
 4392
 4393	iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
 4394	admin_queues->iq_pi_copy = iq_pi;
 4395
 4396	/*
 4397	 * This write notifies the controller that an IU is available to be
 4398	 * processed.
 4399	 */
 4400	writel(iq_pi, admin_queues->iq_pi);
 4401}
 4402
 4403#define PQI_ADMIN_REQUEST_TIMEOUT_SECS	60
 4404
 4405static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
 4406	struct pqi_general_admin_response *response)
 4407{
 4408	struct pqi_admin_queues *admin_queues;
 4409	pqi_index_t oq_pi;
 4410	pqi_index_t oq_ci;
 4411	unsigned long timeout;
 4412
 4413	admin_queues = &ctrl_info->admin_queues;
 4414	oq_ci = admin_queues->oq_ci_copy;
 4415
 4416	timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
 4417
 4418	while (1) {
 4419		oq_pi = readl(admin_queues->oq_pi);
 4420		if (oq_pi != oq_ci)
 4421			break;
 4422		if (time_after(jiffies, timeout)) {
 4423			dev_err(&ctrl_info->pci_dev->dev,
 4424				"timed out waiting for admin response\n");
 4425			return -ETIMEDOUT;
 4426		}
 4427		if (!sis_is_firmware_running(ctrl_info))
 4428			return -ENXIO;
 4429		usleep_range(1000, 2000);
 4430	}
 4431
 4432	memcpy(response, admin_queues->oq_element_array +
 4433		(oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
 4434
 4435	oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
 4436	admin_queues->oq_ci_copy = oq_ci;
 4437	writel(oq_ci, admin_queues->oq_ci);
 4438
 4439	return 0;
 4440}
 4441
 4442static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
 4443	struct pqi_queue_group *queue_group, enum pqi_io_path path,
 4444	struct pqi_io_request *io_request)
 4445{
 4446	struct pqi_io_request *next;
 4447	void *next_element;
 4448	pqi_index_t iq_pi;
 4449	pqi_index_t iq_ci;
 4450	size_t iu_length;
 4451	unsigned long flags;
 4452	unsigned int num_elements_needed;
 4453	unsigned int num_elements_to_end_of_queue;
 4454	size_t copy_count;
 4455	struct pqi_iu_header *request;
 4456
 4457	spin_lock_irqsave(&queue_group->submit_lock[path], flags);
 4458
 4459	if (io_request) {
 4460		io_request->queue_group = queue_group;
 4461		list_add_tail(&io_request->request_list_entry,
 4462			&queue_group->request_list[path]);
 4463	}
 4464
 4465	iq_pi = queue_group->iq_pi_copy[path];
 4466
 4467	list_for_each_entry_safe(io_request, next,
 4468		&queue_group->request_list[path], request_list_entry) {
 4469
 4470		request = io_request->iu;
 4471
 4472		iu_length = get_unaligned_le16(&request->iu_length) +
 4473			PQI_REQUEST_HEADER_LENGTH;
 4474		num_elements_needed =
 4475			DIV_ROUND_UP(iu_length,
 4476				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
 4477
 4478		iq_ci = readl(queue_group->iq_ci[path]);
 4479
 4480		if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
 4481			ctrl_info->num_elements_per_iq))
 4482			break;
 4483
 4484		put_unaligned_le16(queue_group->oq_id,
 4485			&request->response_queue_id);
 4486
 4487		next_element = queue_group->iq_element_array[path] +
 4488			(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
 4489
 4490		num_elements_to_end_of_queue =
 4491			ctrl_info->num_elements_per_iq - iq_pi;
 4492
 4493		if (num_elements_needed <= num_elements_to_end_of_queue) {
 4494			memcpy(next_element, request, iu_length);
 4495		} else {
 4496			copy_count = num_elements_to_end_of_queue *
 4497				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
 4498			memcpy(next_element, request, copy_count);
 4499			memcpy(queue_group->iq_element_array[path],
 4500				(u8 *)request + copy_count,
 4501				iu_length - copy_count);
 4502		}
 4503
 4504		iq_pi = (iq_pi + num_elements_needed) %
 4505			ctrl_info->num_elements_per_iq;
 4506
 4507		list_del(&io_request->request_list_entry);
 4508	}
 4509
 4510	if (iq_pi != queue_group->iq_pi_copy[path]) {
 4511		queue_group->iq_pi_copy[path] = iq_pi;
 4512		/*
 4513		 * This write notifies the controller that one or more IUs are
 4514		 * available to be processed.
 4515		 */
 4516		writel(iq_pi, queue_group->iq_pi[path]);
 4517	}
 4518
 4519	spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
 4520}
 4521
 4522#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS		10
 4523
 4524static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
 4525	struct completion *wait)
 4526{
 4527	int rc;
 4528
 4529	while (1) {
 4530		if (wait_for_completion_io_timeout(wait,
 4531			PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
 4532			rc = 0;
 4533			break;
 4534		}
 4535
 4536		pqi_check_ctrl_health(ctrl_info);
 4537		if (pqi_ctrl_offline(ctrl_info)) {
 4538			rc = -ENXIO;
 4539			break;
 4540		}
 4541	}
 4542
 4543	return rc;
 4544}
 4545
 4546static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
 4547	void *context)
 4548{
 4549	struct completion *waiting = context;
 4550
 4551	complete(waiting);
 4552}
 4553
 4554static int pqi_process_raid_io_error_synchronous(
 4555	struct pqi_raid_error_info *error_info)
 4556{
 4557	int rc = -EIO;
 4558
 4559	switch (error_info->data_out_result) {
 4560	case PQI_DATA_IN_OUT_GOOD:
 4561		if (error_info->status == SAM_STAT_GOOD)
 4562			rc = 0;
 4563		break;
 4564	case PQI_DATA_IN_OUT_UNDERFLOW:
 4565		if (error_info->status == SAM_STAT_GOOD ||
 4566			error_info->status == SAM_STAT_CHECK_CONDITION)
 4567			rc = 0;
 4568		break;
 4569	case PQI_DATA_IN_OUT_ABORTED:
 4570		rc = PQI_CMD_STATUS_ABORTED;
 4571		break;
 4572	}
 4573
 4574	return rc;
 4575}
 4576
 4577static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
 4578{
 4579	return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
 4580}
 4581
 4582static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
 4583	struct pqi_iu_header *request, unsigned int flags,
 4584	struct pqi_raid_error_info *error_info)
 4585{
 4586	int rc = 0;
 4587	struct pqi_io_request *io_request;
 4588	size_t iu_length;
 4589	DECLARE_COMPLETION_ONSTACK(wait);
 4590
 4591	if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
 4592		if (down_interruptible(&ctrl_info->sync_request_sem))
 4593			return -ERESTARTSYS;
 4594	} else {
 4595		down(&ctrl_info->sync_request_sem);
 4596	}
 4597
 4598	pqi_ctrl_busy(ctrl_info);
 4599	/*
 4600	 * Wait for other admin queue updates such as;
 4601	 * config table changes, OFA memory updates, ...
 4602	 */
 4603	if (pqi_is_blockable_request(request))
 4604		pqi_wait_if_ctrl_blocked(ctrl_info);
 4605
 4606	if (pqi_ctrl_offline(ctrl_info)) {
 4607		rc = -ENXIO;
 4608		goto out;
 4609	}
 4610
 4611	io_request = pqi_alloc_io_request(ctrl_info, NULL);
 4612
 4613	put_unaligned_le16(io_request->index,
 4614		&(((struct pqi_raid_path_request *)request)->request_id));
 4615
 4616	if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
 4617		((struct pqi_raid_path_request *)request)->error_index =
 4618			((struct pqi_raid_path_request *)request)->request_id;
 4619
 4620	iu_length = get_unaligned_le16(&request->iu_length) +
 4621		PQI_REQUEST_HEADER_LENGTH;
 4622	memcpy(io_request->iu, request, iu_length);
 4623
 4624	io_request->io_complete_callback = pqi_raid_synchronous_complete;
 4625	io_request->context = &wait;
 4626
 4627	pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
 4628		io_request);
 4629
 4630	pqi_wait_for_completion_io(ctrl_info, &wait);
 4631
 4632	if (error_info) {
 4633		if (io_request->error_info)
 4634			memcpy(error_info, io_request->error_info, sizeof(*error_info));
 4635		else
 4636			memset(error_info, 0, sizeof(*error_info));
 4637	} else if (rc == 0 && io_request->error_info) {
 4638		rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
 4639	}
 4640
 4641	pqi_free_io_request(io_request);
 4642
 4643out:
 4644	pqi_ctrl_unbusy(ctrl_info);
 4645	up(&ctrl_info->sync_request_sem);
 4646
 4647	return rc;
 4648}
 4649
 4650static int pqi_validate_admin_response(
 4651	struct pqi_general_admin_response *response, u8 expected_function_code)
 4652{
 4653	if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
 4654		return -EINVAL;
 4655
 4656	if (get_unaligned_le16(&response->header.iu_length) !=
 4657		PQI_GENERAL_ADMIN_IU_LENGTH)
 4658		return -EINVAL;
 4659
 4660	if (response->function_code != expected_function_code)
 4661		return -EINVAL;
 4662
 4663	if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
 4664		return -EINVAL;
 4665
 4666	return 0;
 4667}
 4668
 4669static int pqi_submit_admin_request_synchronous(
 4670	struct pqi_ctrl_info *ctrl_info,
 4671	struct pqi_general_admin_request *request,
 4672	struct pqi_general_admin_response *response)
 4673{
 4674	int rc;
 4675
 4676	pqi_submit_admin_request(ctrl_info, request);
 4677
 4678	rc = pqi_poll_for_admin_response(ctrl_info, response);
 4679
 4680	if (rc == 0)
 4681		rc = pqi_validate_admin_response(response, request->function_code);
 4682
 4683	return rc;
 4684}
 4685
 4686static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
 4687{
 4688	int rc;
 4689	struct pqi_general_admin_request request;
 4690	struct pqi_general_admin_response response;
 4691	struct pqi_device_capability *capability;
 4692	struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
 4693
 4694	capability = kmalloc(sizeof(*capability), GFP_KERNEL);
 4695	if (!capability)
 4696		return -ENOMEM;
 4697
 4698	memset(&request, 0, sizeof(request));
 4699
 4700	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 4701	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
 4702		&request.header.iu_length);
 4703	request.function_code =
 4704		PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
 4705	put_unaligned_le32(sizeof(*capability),
 4706		&request.data.report_device_capability.buffer_length);
 4707
 4708	rc = pqi_map_single(ctrl_info->pci_dev,
 4709		&request.data.report_device_capability.sg_descriptor,
 4710		capability, sizeof(*capability),
 4711		DMA_FROM_DEVICE);
 4712	if (rc)
 4713		goto out;
 4714
 4715	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
 4716
 4717	pqi_pci_unmap(ctrl_info->pci_dev,
 4718		&request.data.report_device_capability.sg_descriptor, 1,
 4719		DMA_FROM_DEVICE);
 4720
 4721	if (rc)
 4722		goto out;
 4723
 4724	if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
 4725		rc = -EIO;
 4726		goto out;
 4727	}
 4728
 4729	ctrl_info->max_inbound_queues =
 4730		get_unaligned_le16(&capability->max_inbound_queues);
 4731	ctrl_info->max_elements_per_iq =
 4732		get_unaligned_le16(&capability->max_elements_per_iq);
 4733	ctrl_info->max_iq_element_length =
 4734		get_unaligned_le16(&capability->max_iq_element_length)
 4735		* 16;
 4736	ctrl_info->max_outbound_queues =
 4737		get_unaligned_le16(&capability->max_outbound_queues);
 4738	ctrl_info->max_elements_per_oq =
 4739		get_unaligned_le16(&capability->max_elements_per_oq);
 4740	ctrl_info->max_oq_element_length =
 4741		get_unaligned_le16(&capability->max_oq_element_length)
 4742		* 16;
 4743
 4744	sop_iu_layer_descriptor =
 4745		&capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
 4746
 4747	ctrl_info->max_inbound_iu_length_per_firmware =
 4748		get_unaligned_le16(
 4749			&sop_iu_layer_descriptor->max_inbound_iu_length);
 4750	ctrl_info->inbound_spanning_supported =
 4751		sop_iu_layer_descriptor->inbound_spanning_supported;
 4752	ctrl_info->outbound_spanning_supported =
 4753		sop_iu_layer_descriptor->outbound_spanning_supported;
 4754
 4755out:
 4756	kfree(capability);
 4757
 4758	return rc;
 4759}
 4760
 4761static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
 4762{
 4763	if (ctrl_info->max_iq_element_length <
 4764		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
 4765		dev_err(&ctrl_info->pci_dev->dev,
 4766			"max. inbound queue element length of %d is less than the required length of %d\n",
 4767			ctrl_info->max_iq_element_length,
 4768			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
 4769		return -EINVAL;
 4770	}
 4771
 4772	if (ctrl_info->max_oq_element_length <
 4773		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
 4774		dev_err(&ctrl_info->pci_dev->dev,
 4775			"max. outbound queue element length of %d is less than the required length of %d\n",
 4776			ctrl_info->max_oq_element_length,
 4777			PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
 4778		return -EINVAL;
 4779	}
 4780
 4781	if (ctrl_info->max_inbound_iu_length_per_firmware <
 4782		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
 4783		dev_err(&ctrl_info->pci_dev->dev,
 4784			"max. inbound IU length of %u is less than the min. required length of %d\n",
 4785			ctrl_info->max_inbound_iu_length_per_firmware,
 4786			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
 4787		return -EINVAL;
 4788	}
 4789
 4790	if (!ctrl_info->inbound_spanning_supported) {
 4791		dev_err(&ctrl_info->pci_dev->dev,
 4792			"the controller does not support inbound spanning\n");
 4793		return -EINVAL;
 4794	}
 4795
 4796	if (ctrl_info->outbound_spanning_supported) {
 4797		dev_err(&ctrl_info->pci_dev->dev,
 4798			"the controller supports outbound spanning but this driver does not\n");
 4799		return -EINVAL;
 4800	}
 4801
 4802	return 0;
 4803}
 4804
 4805static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
 4806{
 4807	int rc;
 4808	struct pqi_event_queue *event_queue;
 4809	struct pqi_general_admin_request request;
 4810	struct pqi_general_admin_response response;
 4811
 4812	event_queue = &ctrl_info->event_queue;
 4813
 4814	/*
 4815	 * Create OQ (Outbound Queue - device to host queue) to dedicate
 4816	 * to events.
 4817	 */
 4818	memset(&request, 0, sizeof(request));
 4819	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 4820	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
 4821		&request.header.iu_length);
 4822	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
 4823	put_unaligned_le16(event_queue->oq_id,
 4824		&request.data.create_operational_oq.queue_id);
 4825	put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
 4826		&request.data.create_operational_oq.element_array_addr);
 4827	put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
 4828		&request.data.create_operational_oq.pi_addr);
 4829	put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
 4830		&request.data.create_operational_oq.num_elements);
 4831	put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
 4832		&request.data.create_operational_oq.element_length);
 4833	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
 4834	put_unaligned_le16(event_queue->int_msg_num,
 4835		&request.data.create_operational_oq.int_msg_num);
 4836
 4837	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
 4838		&response);
 4839	if (rc)
 4840		return rc;
 4841
 4842	event_queue->oq_ci = ctrl_info->iomem_base +
 4843		PQI_DEVICE_REGISTERS_OFFSET +
 4844		get_unaligned_le64(
 4845			&response.data.create_operational_oq.oq_ci_offset);
 4846
 4847	return 0;
 4848}
 4849
 4850static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
 4851	unsigned int group_number)
 4852{
 4853	int rc;
 4854	struct pqi_queue_group *queue_group;
 4855	struct pqi_general_admin_request request;
 4856	struct pqi_general_admin_response response;
 4857
 4858	queue_group = &ctrl_info->queue_groups[group_number];
 4859
 4860	/*
 4861	 * Create IQ (Inbound Queue - host to device queue) for
 4862	 * RAID path.
 4863	 */
 4864	memset(&request, 0, sizeof(request));
 4865	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 4866	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
 4867		&request.header.iu_length);
 4868	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
 4869	put_unaligned_le16(queue_group->iq_id[RAID_PATH],
 4870		&request.data.create_operational_iq.queue_id);
 4871	put_unaligned_le64(
 4872		(u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
 4873		&request.data.create_operational_iq.element_array_addr);
 4874	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
 4875		&request.data.create_operational_iq.ci_addr);
 4876	put_unaligned_le16(ctrl_info->num_elements_per_iq,
 4877		&request.data.create_operational_iq.num_elements);
 4878	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
 4879		&request.data.create_operational_iq.element_length);
 4880	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
 4881
 4882	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
 4883		&response);
 4884	if (rc) {
 4885		dev_err(&ctrl_info->pci_dev->dev,
 4886			"error creating inbound RAID queue\n");
 4887		return rc;
 4888	}
 4889
 4890	queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
 4891		PQI_DEVICE_REGISTERS_OFFSET +
 4892		get_unaligned_le64(
 4893			&response.data.create_operational_iq.iq_pi_offset);
 4894
 4895	/*
 4896	 * Create IQ (Inbound Queue - host to device queue) for
 4897	 * Advanced I/O (AIO) path.
 4898	 */
 4899	memset(&request, 0, sizeof(request));
 4900	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 4901	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
 4902		&request.header.iu_length);
 4903	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
 4904	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
 4905		&request.data.create_operational_iq.queue_id);
 4906	put_unaligned_le64((u64)queue_group->
 4907		iq_element_array_bus_addr[AIO_PATH],
 4908		&request.data.create_operational_iq.element_array_addr);
 4909	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
 4910		&request.data.create_operational_iq.ci_addr);
 4911	put_unaligned_le16(ctrl_info->num_elements_per_iq,
 4912		&request.data.create_operational_iq.num_elements);
 4913	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
 4914		&request.data.create_operational_iq.element_length);
 4915	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
 4916
 4917	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
 4918		&response);
 4919	if (rc) {
 4920		dev_err(&ctrl_info->pci_dev->dev,
 4921			"error creating inbound AIO queue\n");
 4922		return rc;
 4923	}
 4924
 4925	queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
 4926		PQI_DEVICE_REGISTERS_OFFSET +
 4927		get_unaligned_le64(
 4928			&response.data.create_operational_iq.iq_pi_offset);
 4929
 4930	/*
 4931	 * Designate the 2nd IQ as the AIO path.  By default, all IQs are
 4932	 * assumed to be for RAID path I/O unless we change the queue's
 4933	 * property.
 4934	 */
 4935	memset(&request, 0, sizeof(request));
 4936	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 4937	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
 4938		&request.header.iu_length);
 4939	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
 4940	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
 4941		&request.data.change_operational_iq_properties.queue_id);
 4942	put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
 4943		&request.data.change_operational_iq_properties.vendor_specific);
 4944
 4945	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
 4946		&response);
 4947	if (rc) {
 4948		dev_err(&ctrl_info->pci_dev->dev,
 4949			"error changing queue property\n");
 4950		return rc;
 4951	}
 4952
 4953	/*
 4954	 * Create OQ (Outbound Queue - device to host queue).
 4955	 */
 4956	memset(&request, 0, sizeof(request));
 4957	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
 4958	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
 4959		&request.header.iu_length);
 4960	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
 4961	put_unaligned_le16(queue_group->oq_id,
 4962		&request.data.create_operational_oq.queue_id);
 4963	put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
 4964		&request.data.create_operational_oq.element_array_addr);
 4965	put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
 4966		&request.data.create_operational_oq.pi_addr);
 4967	put_unaligned_le16(ctrl_info->num_elements_per_oq,
 4968		&request.data.create_operational_oq.num_elements);
 4969	put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
 4970		&request.data.create_operational_oq.element_length);
 4971	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
 4972	put_unaligned_le16(queue_group->int_msg_num,
 4973		&request.data.create_operational_oq.int_msg_num);
 4974
 4975	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
 4976		&response);
 4977	if (rc) {
 4978		dev_err(&ctrl_info->pci_dev->dev,
 4979			"error creating outbound queue\n");
 4980		return rc;
 4981	}
 4982
 4983	queue_group->oq_ci = ctrl_info->iomem_base +
 4984		PQI_DEVICE_REGISTERS_OFFSET +
 4985		get_unaligned_le64(
 4986			&response.data.create_operational_oq.oq_ci_offset);
 4987
 4988	return 0;
 4989}
 4990
 4991static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
 4992{
 4993	int rc;
 4994	unsigned int i;
 4995
 4996	rc = pqi_create_event_queue(ctrl_info);
 4997	if (rc) {
 4998		dev_err(&ctrl_info->pci_dev->dev,
 4999			"error creating event queue\n");
 5000		return rc;
 5001	}
 5002
 5003	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 5004		rc = pqi_create_queue_group(ctrl_info, i);
 5005		if (rc) {
 5006			dev_err(&ctrl_info->pci_dev->dev,
 5007				"error creating queue group number %u/%u\n",
 5008				i, ctrl_info->num_queue_groups);
 5009			return rc;
 5010		}
 5011	}
 5012
 5013	return 0;
 5014}
 5015
 5016#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH	\
 5017	struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
 5018
 5019static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
 5020	bool enable_events)
 5021{
 5022	int rc;
 5023	unsigned int i;
 5024	struct pqi_event_config *event_config;
 5025	struct pqi_event_descriptor *event_descriptor;
 5026	struct pqi_general_management_request request;
 5027
 5028	event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
 5029		GFP_KERNEL);
 5030	if (!event_config)
 5031		return -ENOMEM;
 5032
 5033	memset(&request, 0, sizeof(request));
 5034
 5035	request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
 5036	put_unaligned_le16(offsetof(struct pqi_general_management_request,
 5037		data.report_event_configuration.sg_descriptors[1]) -
 5038		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
 5039	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
 5040		&request.data.report_event_configuration.buffer_length);
 5041
 5042	rc = pqi_map_single(ctrl_info->pci_dev,
 5043		request.data.report_event_configuration.sg_descriptors,
 5044		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
 5045		DMA_FROM_DEVICE);
 5046	if (rc)
 5047		goto out;
 5048
 5049	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 5050
 5051	pqi_pci_unmap(ctrl_info->pci_dev,
 5052		request.data.report_event_configuration.sg_descriptors, 1,
 5053		DMA_FROM_DEVICE);
 5054
 5055	if (rc)
 5056		goto out;
 5057
 5058	for (i = 0; i < event_config->num_event_descriptors; i++) {
 5059		event_descriptor = &event_config->descriptors[i];
 5060		if (enable_events &&
 5061			pqi_is_supported_event(event_descriptor->event_type))
 5062				put_unaligned_le16(ctrl_info->event_queue.oq_id,
 5063					&event_descriptor->oq_id);
 5064		else
 5065			put_unaligned_le16(0, &event_descriptor->oq_id);
 5066	}
 5067
 5068	memset(&request, 0, sizeof(request));
 5069
 5070	request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
 5071	put_unaligned_le16(offsetof(struct pqi_general_management_request,
 5072		data.report_event_configuration.sg_descriptors[1]) -
 5073		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
 5074	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
 5075		&request.data.report_event_configuration.buffer_length);
 5076
 5077	rc = pqi_map_single(ctrl_info->pci_dev,
 5078		request.data.report_event_configuration.sg_descriptors,
 5079		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
 5080		DMA_TO_DEVICE);
 5081	if (rc)
 5082		goto out;
 5083
 5084	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 5085
 5086	pqi_pci_unmap(ctrl_info->pci_dev,
 5087		request.data.report_event_configuration.sg_descriptors, 1,
 5088		DMA_TO_DEVICE);
 5089
 5090out:
 5091	kfree(event_config);
 5092
 5093	return rc;
 5094}
 5095
 5096static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
 5097{
 5098	return pqi_configure_events(ctrl_info, true);
 5099}
 5100
 5101static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
 5102{
 5103	unsigned int i;
 5104	struct device *dev;
 5105	size_t sg_chain_buffer_length;
 5106	struct pqi_io_request *io_request;
 5107
 5108	if (!ctrl_info->io_request_pool)
 5109		return;
 5110
 5111	dev = &ctrl_info->pci_dev->dev;
 5112	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
 5113	io_request = ctrl_info->io_request_pool;
 5114
 5115	for (i = 0; i < ctrl_info->max_io_slots; i++) {
 5116		kfree(io_request->iu);
 5117		if (!io_request->sg_chain_buffer)
 5118			break;
 5119		dma_free_coherent(dev, sg_chain_buffer_length,
 5120			io_request->sg_chain_buffer,
 5121			io_request->sg_chain_buffer_dma_handle);
 5122		io_request++;
 5123	}
 5124
 5125	kfree(ctrl_info->io_request_pool);
 5126	ctrl_info->io_request_pool = NULL;
 5127}
 5128
 5129static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
 5130{
 5131	ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
 5132				     ctrl_info->error_buffer_length,
 5133				     &ctrl_info->error_buffer_dma_handle,
 5134				     GFP_KERNEL);
 5135	if (!ctrl_info->error_buffer)
 5136		return -ENOMEM;
 5137
 5138	return 0;
 5139}
 5140
 5141static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
 5142{
 5143	unsigned int i;
 5144	void *sg_chain_buffer;
 5145	size_t sg_chain_buffer_length;
 5146	dma_addr_t sg_chain_buffer_dma_handle;
 5147	struct device *dev;
 5148	struct pqi_io_request *io_request;
 5149
 5150	ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
 5151		sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
 5152
 5153	if (!ctrl_info->io_request_pool) {
 5154		dev_err(&ctrl_info->pci_dev->dev,
 5155			"failed to allocate I/O request pool\n");
 5156		goto error;
 5157	}
 5158
 5159	dev = &ctrl_info->pci_dev->dev;
 5160	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
 5161	io_request = ctrl_info->io_request_pool;
 5162
 5163	for (i = 0; i < ctrl_info->max_io_slots; i++) {
 5164		io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
 5165
 5166		if (!io_request->iu) {
 5167			dev_err(&ctrl_info->pci_dev->dev,
 5168				"failed to allocate IU buffers\n");
 5169			goto error;
 5170		}
 5171
 5172		sg_chain_buffer = dma_alloc_coherent(dev,
 5173			sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
 5174			GFP_KERNEL);
 5175
 5176		if (!sg_chain_buffer) {
 5177			dev_err(&ctrl_info->pci_dev->dev,
 5178				"failed to allocate PQI scatter-gather chain buffers\n");
 5179			goto error;
 5180		}
 5181
 5182		io_request->index = i;
 5183		io_request->sg_chain_buffer = sg_chain_buffer;
 5184		io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
 5185		io_request++;
 5186	}
 5187
 5188	return 0;
 5189
 5190error:
 5191	pqi_free_all_io_requests(ctrl_info);
 5192
 5193	return -ENOMEM;
 5194}
 5195
 5196/*
 5197 * Calculate required resources that are sized based on max. outstanding
 5198 * requests and max. transfer size.
 5199 */
 5200
 5201static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
 5202{
 5203	u32 max_transfer_size;
 5204	u32 max_sg_entries;
 5205
 5206	ctrl_info->scsi_ml_can_queue =
 5207		ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
 5208	ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
 5209
 5210	ctrl_info->error_buffer_length =
 5211		ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
 5212
 5213	if (reset_devices)
 5214		max_transfer_size = min(ctrl_info->max_transfer_size,
 5215			PQI_MAX_TRANSFER_SIZE_KDUMP);
 5216	else
 5217		max_transfer_size = min(ctrl_info->max_transfer_size,
 5218			PQI_MAX_TRANSFER_SIZE);
 5219
 5220	max_sg_entries = max_transfer_size / PAGE_SIZE;
 5221
 5222	/* +1 to cover when the buffer is not page-aligned. */
 5223	max_sg_entries++;
 5224
 5225	max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
 5226
 5227	max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
 5228
 5229	ctrl_info->sg_chain_buffer_length =
 5230		(max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
 5231		PQI_EXTRA_SGL_MEMORY;
 5232	ctrl_info->sg_tablesize = max_sg_entries;
 5233	ctrl_info->max_sectors = max_transfer_size / 512;
 5234}
 5235
 5236static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
 5237{
 5238	int num_queue_groups;
 5239	u16 num_elements_per_iq;
 5240	u16 num_elements_per_oq;
 5241
 5242	if (reset_devices) {
 5243		num_queue_groups = 1;
 5244	} else {
 5245		int num_cpus;
 5246		int max_queue_groups;
 5247
 5248		max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
 5249			ctrl_info->max_outbound_queues - 1);
 5250		max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
 5251
 5252		num_cpus = num_online_cpus();
 5253		num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
 5254		num_queue_groups = min(num_queue_groups, max_queue_groups);
 5255	}
 5256
 5257	ctrl_info->num_queue_groups = num_queue_groups;
 5258
 5259	/*
 5260	 * Make sure that the max. inbound IU length is an even multiple
 5261	 * of our inbound element length.
 5262	 */
 5263	ctrl_info->max_inbound_iu_length =
 5264		(ctrl_info->max_inbound_iu_length_per_firmware /
 5265		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
 5266		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
 5267
 5268	num_elements_per_iq =
 5269		(ctrl_info->max_inbound_iu_length /
 5270		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
 5271
 5272	/* Add one because one element in each queue is unusable. */
 5273	num_elements_per_iq++;
 5274
 5275	num_elements_per_iq = min(num_elements_per_iq,
 5276		ctrl_info->max_elements_per_iq);
 5277
 5278	num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
 5279	num_elements_per_oq = min(num_elements_per_oq,
 5280		ctrl_info->max_elements_per_oq);
 5281
 5282	ctrl_info->num_elements_per_iq = num_elements_per_iq;
 5283	ctrl_info->num_elements_per_oq = num_elements_per_oq;
 5284
 5285	ctrl_info->max_sg_per_iu =
 5286		((ctrl_info->max_inbound_iu_length -
 5287		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
 5288		sizeof(struct pqi_sg_descriptor)) +
 5289		PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
 5290
 5291	ctrl_info->max_sg_per_r56_iu =
 5292		((ctrl_info->max_inbound_iu_length -
 5293		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
 5294		sizeof(struct pqi_sg_descriptor)) +
 5295		PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
 5296}
 5297
 5298static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
 5299	struct scatterlist *sg)
 5300{
 5301	u64 address = (u64)sg_dma_address(sg);
 5302	unsigned int length = sg_dma_len(sg);
 5303
 5304	put_unaligned_le64(address, &sg_descriptor->address);
 5305	put_unaligned_le32(length, &sg_descriptor->length);
 5306	put_unaligned_le32(0, &sg_descriptor->flags);
 5307}
 5308
 5309static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
 5310	struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
 5311	int max_sg_per_iu, bool *chained)
 5312{
 5313	int i;
 5314	unsigned int num_sg_in_iu;
 5315
 5316	*chained = false;
 5317	i = 0;
 5318	num_sg_in_iu = 0;
 5319	max_sg_per_iu--;	/* Subtract 1 to leave room for chain marker. */
 5320
 5321	while (1) {
 5322		pqi_set_sg_descriptor(sg_descriptor, sg);
 5323		if (!*chained)
 5324			num_sg_in_iu++;
 5325		i++;
 5326		if (i == sg_count)
 5327			break;
 5328		sg_descriptor++;
 5329		if (i == max_sg_per_iu) {
 5330			put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
 5331				&sg_descriptor->address);
 5332			put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
 5333				&sg_descriptor->length);
 5334			put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
 5335			*chained = true;
 5336			num_sg_in_iu++;
 5337			sg_descriptor = io_request->sg_chain_buffer;
 5338		}
 5339		sg = sg_next(sg);
 5340	}
 5341
 5342	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
 5343
 5344	return num_sg_in_iu;
 5345}
 5346
 5347static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
 5348	struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
 5349	struct pqi_io_request *io_request)
 5350{
 5351	u16 iu_length;
 5352	int sg_count;
 5353	bool chained;
 5354	unsigned int num_sg_in_iu;
 5355	struct scatterlist *sg;
 5356	struct pqi_sg_descriptor *sg_descriptor;
 5357
 5358	sg_count = scsi_dma_map(scmd);
 5359	if (sg_count < 0)
 5360		return sg_count;
 5361
 5362	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
 5363		PQI_REQUEST_HEADER_LENGTH;
 5364
 5365	if (sg_count == 0)
 5366		goto out;
 5367
 5368	sg = scsi_sglist(scmd);
 5369	sg_descriptor = request->sg_descriptors;
 5370
 5371	num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
 5372		ctrl_info->max_sg_per_iu, &chained);
 5373
 5374	request->partial = chained;
 5375	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
 5376
 5377out:
 5378	put_unaligned_le16(iu_length, &request->header.iu_length);
 5379
 5380	return 0;
 5381}
 5382
 5383static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
 5384	struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
 5385	struct pqi_io_request *io_request)
 5386{
 5387	u16 iu_length;
 5388	int sg_count;
 5389	bool chained;
 5390	unsigned int num_sg_in_iu;
 5391	struct scatterlist *sg;
 5392	struct pqi_sg_descriptor *sg_descriptor;
 5393
 5394	sg_count = scsi_dma_map(scmd);
 5395	if (sg_count < 0)
 5396		return sg_count;
 5397
 5398	iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
 5399		PQI_REQUEST_HEADER_LENGTH;
 5400	num_sg_in_iu = 0;
 5401
 5402	if (sg_count == 0)
 5403		goto out;
 5404
 5405	sg = scsi_sglist(scmd);
 5406	sg_descriptor = request->sg_descriptors;
 5407
 5408	num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
 5409		ctrl_info->max_sg_per_iu, &chained);
 5410
 5411	request->partial = chained;
 5412	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
 5413
 5414out:
 5415	put_unaligned_le16(iu_length, &request->header.iu_length);
 5416	request->num_sg_descriptors = num_sg_in_iu;
 5417
 5418	return 0;
 5419}
 5420
 5421static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
 5422	struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
 5423	struct pqi_io_request *io_request)
 5424{
 5425	u16 iu_length;
 5426	int sg_count;
 5427	bool chained;
 5428	unsigned int num_sg_in_iu;
 5429	struct scatterlist *sg;
 5430	struct pqi_sg_descriptor *sg_descriptor;
 5431
 5432	sg_count = scsi_dma_map(scmd);
 5433	if (sg_count < 0)
 5434		return sg_count;
 5435
 5436	iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
 5437		PQI_REQUEST_HEADER_LENGTH;
 5438	num_sg_in_iu = 0;
 5439
 5440	if (sg_count != 0) {
 5441		sg = scsi_sglist(scmd);
 5442		sg_descriptor = request->sg_descriptors;
 5443
 5444		num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
 5445			ctrl_info->max_sg_per_r56_iu, &chained);
 5446
 5447		request->partial = chained;
 5448		iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
 5449	}
 5450
 5451	put_unaligned_le16(iu_length, &request->header.iu_length);
 5452	request->num_sg_descriptors = num_sg_in_iu;
 5453
 5454	return 0;
 5455}
 5456
 5457static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
 5458	struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
 5459	struct pqi_io_request *io_request)
 5460{
 5461	u16 iu_length;
 5462	int sg_count;
 5463	bool chained;
 5464	unsigned int num_sg_in_iu;
 5465	struct scatterlist *sg;
 5466	struct pqi_sg_descriptor *sg_descriptor;
 5467
 5468	sg_count = scsi_dma_map(scmd);
 5469	if (sg_count < 0)
 5470		return sg_count;
 5471
 5472	iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
 5473		PQI_REQUEST_HEADER_LENGTH;
 5474	num_sg_in_iu = 0;
 5475
 5476	if (sg_count == 0)
 5477		goto out;
 5478
 5479	sg = scsi_sglist(scmd);
 5480	sg_descriptor = request->sg_descriptors;
 5481
 5482	num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
 5483		ctrl_info->max_sg_per_iu, &chained);
 5484
 5485	request->partial = chained;
 5486	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
 5487
 5488out:
 5489	put_unaligned_le16(iu_length, &request->header.iu_length);
 5490	request->num_sg_descriptors = num_sg_in_iu;
 5491
 5492	return 0;
 5493}
 5494
 5495static void pqi_raid_io_complete(struct pqi_io_request *io_request,
 5496	void *context)
 5497{
 5498	struct scsi_cmnd *scmd;
 5499
 5500	scmd = io_request->scmd;
 5501	pqi_free_io_request(io_request);
 5502	scsi_dma_unmap(scmd);
 5503	pqi_scsi_done(scmd);
 5504}
 5505
 5506static int pqi_raid_submit_scsi_cmd_with_io_request(
 5507	struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
 5508	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
 5509	struct pqi_queue_group *queue_group)
 5510{
 5511	int rc;
 5512	size_t cdb_length;
 5513	struct pqi_raid_path_request *request;
 5514
 5515	io_request->io_complete_callback = pqi_raid_io_complete;
 5516	io_request->scmd = scmd;
 5517
 5518	request = io_request->iu;
 5519	memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
 5520
 5521	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
 5522	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
 5523	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 5524	put_unaligned_le16(io_request->index, &request->request_id);
 5525	request->error_index = request->request_id;
 5526	memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
 5527	request->ml_device_lun_number = (u8)scmd->device->lun;
 5528
 5529	cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
 5530	memcpy(request->cdb, scmd->cmnd, cdb_length);
 5531
 5532	switch (cdb_length) {
 5533	case 6:
 5534	case 10:
 5535	case 12:
 5536	case 16:
 5537		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
 5538		break;
 5539	case 20:
 5540		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
 5541		break;
 5542	case 24:
 5543		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
 5544		break;
 5545	case 28:
 5546		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
 5547		break;
 5548	case 32:
 5549	default:
 5550		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
 5551		break;
 5552	}
 5553
 5554	switch (scmd->sc_data_direction) {
 5555	case DMA_FROM_DEVICE:
 5556		request->data_direction = SOP_READ_FLAG;
 5557		break;
 5558	case DMA_TO_DEVICE:
 5559		request->data_direction = SOP_WRITE_FLAG;
 5560		break;
 5561	case DMA_NONE:
 5562		request->data_direction = SOP_NO_DIRECTION_FLAG;
 5563		break;
 5564	case DMA_BIDIRECTIONAL:
 5565		request->data_direction = SOP_BIDIRECTIONAL;
 5566		break;
 5567	default:
 5568		dev_err(&ctrl_info->pci_dev->dev,
 5569			"unknown data direction: %d\n",
 5570			scmd->sc_data_direction);
 5571		break;
 5572	}
 5573
 5574	rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
 5575	if (rc) {
 5576		pqi_free_io_request(io_request);
 5577		return SCSI_MLQUEUE_HOST_BUSY;
 5578	}
 5579
 5580	pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
 5581
 5582	return 0;
 5583}
 5584
 5585static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
 5586	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
 5587	struct pqi_queue_group *queue_group)
 5588{
 5589	struct pqi_io_request *io_request;
 5590
 5591	io_request = pqi_alloc_io_request(ctrl_info, scmd);
 5592	if (!io_request)
 5593		return SCSI_MLQUEUE_HOST_BUSY;
 5594
 5595	return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
 5596		device, scmd, queue_group);
 5597}
 5598
 5599static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
 5600{
 5601	struct scsi_cmnd *scmd;
 5602	struct pqi_scsi_dev *device;
 5603	struct pqi_ctrl_info *ctrl_info;
 5604
 5605	if (!io_request->raid_bypass)
 5606		return false;
 5607
 5608	scmd = io_request->scmd;
 5609	if ((scmd->result & 0xff) == SAM_STAT_GOOD)
 5610		return false;
 5611	if (host_byte(scmd->result) == DID_NO_CONNECT)
 5612		return false;
 5613
 5614	device = scmd->device->hostdata;
 5615	if (pqi_device_offline(device) || pqi_device_in_remove(device))
 5616		return false;
 5617
 5618	ctrl_info = shost_to_hba(scmd->device->host);
 5619	if (pqi_ctrl_offline(ctrl_info))
 5620		return false;
 5621
 5622	return true;
 5623}
 5624
 5625static void pqi_aio_io_complete(struct pqi_io_request *io_request,
 5626	void *context)
 5627{
 5628	struct scsi_cmnd *scmd;
 5629
 5630	scmd = io_request->scmd;
 5631	scsi_dma_unmap(scmd);
 5632	if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
 5633		set_host_byte(scmd, DID_IMM_RETRY);
 5634		pqi_cmd_priv(scmd)->this_residual++;
 5635	}
 5636
 5637	pqi_free_io_request(io_request);
 5638	pqi_scsi_done(scmd);
 5639}
 5640
 5641static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
 5642	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
 5643{
 5644	bool io_high_prio;
 5645	int priority_class;
 5646
 5647	io_high_prio = false;
 5648
 5649	if (device->ncq_prio_enable) {
 5650		priority_class =
 5651			IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
 5652		if (priority_class == IOPRIO_CLASS_RT) {
 5653			/* Set NCQ priority for read/write commands. */
 5654			switch (scmd->cmnd[0]) {
 5655			case WRITE_16:
 5656			case READ_16:
 5657			case WRITE_12:
 5658			case READ_12:
 5659			case WRITE_10:
 5660			case READ_10:
 5661			case WRITE_6:
 5662			case READ_6:
 5663				io_high_prio = true;
 5664				break;
 5665			}
 5666		}
 5667	}
 5668
 5669	return io_high_prio;
 5670}
 5671
 5672static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
 5673	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
 5674	struct pqi_queue_group *queue_group)
 5675{
 5676	bool io_high_prio;
 5677
 5678	io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
 5679
 5680	return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
 5681		scmd->cmnd, scmd->cmd_len, queue_group, NULL,
 5682		false, io_high_prio);
 5683}
 5684
 5685static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
 5686	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
 5687	unsigned int cdb_length, struct pqi_queue_group *queue_group,
 5688	struct pqi_encryption_info *encryption_info, bool raid_bypass,
 5689	bool io_high_prio)
 5690{
 5691	int rc;
 5692	struct pqi_io_request *io_request;
 5693	struct pqi_aio_path_request *request;
 5694	struct pqi_scsi_dev *device;
 5695
 5696	device = scmd->device->hostdata;
 5697	io_request = pqi_alloc_io_request(ctrl_info, scmd);
 5698	if (!io_request)
 5699		return SCSI_MLQUEUE_HOST_BUSY;
 5700	io_request->io_complete_callback = pqi_aio_io_complete;
 5701	io_request->scmd = scmd;
 5702	io_request->raid_bypass = raid_bypass;
 5703
 5704	request = io_request->iu;
 5705	memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
 5706
 5707	request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
 5708	put_unaligned_le32(aio_handle, &request->nexus_id);
 5709	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
 5710	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 5711	request->command_priority = io_high_prio;
 5712	put_unaligned_le16(io_request->index, &request->request_id);
 5713	request->error_index = request->request_id;
 5714	if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
 5715		put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
 5716	if (cdb_length > sizeof(request->cdb))
 5717		cdb_length = sizeof(request->cdb);
 5718	request->cdb_length = cdb_length;
 5719	memcpy(request->cdb, cdb, cdb_length);
 5720
 5721	switch (scmd->sc_data_direction) {
 5722	case DMA_TO_DEVICE:
 5723		request->data_direction = SOP_READ_FLAG;
 5724		break;
 5725	case DMA_FROM_DEVICE:
 5726		request->data_direction = SOP_WRITE_FLAG;
 5727		break;
 5728	case DMA_NONE:
 5729		request->data_direction = SOP_NO_DIRECTION_FLAG;
 5730		break;
 5731	case DMA_BIDIRECTIONAL:
 5732		request->data_direction = SOP_BIDIRECTIONAL;
 5733		break;
 5734	default:
 5735		dev_err(&ctrl_info->pci_dev->dev,
 5736			"unknown data direction: %d\n",
 5737			scmd->sc_data_direction);
 5738		break;
 5739	}
 5740
 5741	if (encryption_info) {
 5742		request->encryption_enable = true;
 5743		put_unaligned_le16(encryption_info->data_encryption_key_index,
 5744			&request->data_encryption_key_index);
 5745		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
 5746			&request->encrypt_tweak_lower);
 5747		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
 5748			&request->encrypt_tweak_upper);
 5749	}
 5750
 5751	rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
 5752	if (rc) {
 5753		pqi_free_io_request(io_request);
 5754		return SCSI_MLQUEUE_HOST_BUSY;
 5755	}
 5756
 5757	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
 5758
 5759	return 0;
 5760}
 5761
 5762static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
 5763	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
 5764	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
 5765	struct pqi_scsi_dev_raid_map_data *rmd)
 5766{
 5767	int rc;
 5768	struct pqi_io_request *io_request;
 5769	struct pqi_aio_r1_path_request *r1_request;
 5770
 5771	io_request = pqi_alloc_io_request(ctrl_info, scmd);
 5772	if (!io_request)
 5773		return SCSI_MLQUEUE_HOST_BUSY;
 5774
 5775	io_request->io_complete_callback = pqi_aio_io_complete;
 5776	io_request->scmd = scmd;
 5777	io_request->raid_bypass = true;
 5778
 5779	r1_request = io_request->iu;
 5780	memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
 5781
 5782	r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
 5783	put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
 5784	r1_request->num_drives = rmd->num_it_nexus_entries;
 5785	put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
 5786	put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
 5787	if (rmd->num_it_nexus_entries == 3)
 5788		put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
 5789
 5790	put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
 5791	r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 5792	put_unaligned_le16(io_request->index, &r1_request->request_id);
 5793	r1_request->error_index = r1_request->request_id;
 5794	if (rmd->cdb_length > sizeof(r1_request->cdb))
 5795		rmd->cdb_length = sizeof(r1_request->cdb);
 5796	r1_request->cdb_length = rmd->cdb_length;
 5797	memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
 5798
 5799	/* The direction is always write. */
 5800	r1_request->data_direction = SOP_READ_FLAG;
 5801
 5802	if (encryption_info) {
 5803		r1_request->encryption_enable = true;
 5804		put_unaligned_le16(encryption_info->data_encryption_key_index,
 5805				&r1_request->data_encryption_key_index);
 5806		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
 5807				&r1_request->encrypt_tweak_lower);
 5808		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
 5809				&r1_request->encrypt_tweak_upper);
 5810	}
 5811
 5812	rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
 5813	if (rc) {
 5814		pqi_free_io_request(io_request);
 5815		return SCSI_MLQUEUE_HOST_BUSY;
 5816	}
 5817
 5818	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
 5819
 5820	return 0;
 5821}
 5822
 5823static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
 5824	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
 5825	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
 5826	struct pqi_scsi_dev_raid_map_data *rmd)
 5827{
 5828	int rc;
 5829	struct pqi_io_request *io_request;
 5830	struct pqi_aio_r56_path_request *r56_request;
 5831
 5832	io_request = pqi_alloc_io_request(ctrl_info, scmd);
 5833	if (!io_request)
 5834		return SCSI_MLQUEUE_HOST_BUSY;
 5835	io_request->io_complete_callback = pqi_aio_io_complete;
 5836	io_request->scmd = scmd;
 5837	io_request->raid_bypass = true;
 5838
 5839	r56_request = io_request->iu;
 5840	memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
 5841
 5842	if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
 5843		r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
 5844	else
 5845		r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
 5846
 5847	put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
 5848	put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
 5849	put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
 5850	if (rmd->raid_level == SA_RAID_6) {
 5851		put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
 5852		r56_request->xor_multiplier = rmd->xor_mult;
 5853	}
 5854	put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
 5855	r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 5856	put_unaligned_le64(rmd->row, &r56_request->row);
 5857
 5858	put_unaligned_le16(io_request->index, &r56_request->request_id);
 5859	r56_request->error_index = r56_request->request_id;
 5860
 5861	if (rmd->cdb_length > sizeof(r56_request->cdb))
 5862		rmd->cdb_length = sizeof(r56_request->cdb);
 5863	r56_request->cdb_length = rmd->cdb_length;
 5864	memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
 5865
 5866	/* The direction is always write. */
 5867	r56_request->data_direction = SOP_READ_FLAG;
 5868
 5869	if (encryption_info) {
 5870		r56_request->encryption_enable = true;
 5871		put_unaligned_le16(encryption_info->data_encryption_key_index,
 5872				&r56_request->data_encryption_key_index);
 5873		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
 5874				&r56_request->encrypt_tweak_lower);
 5875		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
 5876				&r56_request->encrypt_tweak_upper);
 5877	}
 5878
 5879	rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
 5880	if (rc) {
 5881		pqi_free_io_request(io_request);
 5882		return SCSI_MLQUEUE_HOST_BUSY;
 5883	}
 5884
 5885	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
 5886
 5887	return 0;
 5888}
 5889
 5890static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
 5891	struct scsi_cmnd *scmd)
 5892{
 5893	/*
 5894	 * We are setting host_tagset = 1 during init.
 5895	 */
 5896	return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
 5897}
 5898
 5899static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
 5900{
 5901	if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
 5902		return false;
 5903
 5904	return pqi_cmd_priv(scmd)->this_residual == 0;
 5905}
 5906
 5907/*
 5908 * This function gets called just before we hand the completed SCSI request
 5909 * back to the SML.
 5910 */
 5911
 5912void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
 5913{
 5914	struct pqi_scsi_dev *device;
 5915
 5916	if (!scmd->device) {
 5917		set_host_byte(scmd, DID_NO_CONNECT);
 5918		return;
 5919	}
 5920
 5921	device = scmd->device->hostdata;
 5922	if (!device) {
 5923		set_host_byte(scmd, DID_NO_CONNECT);
 5924		return;
 5925	}
 5926
 5927	atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
 5928}
 5929
 5930static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
 5931	struct scsi_cmnd *scmd)
 5932{
 5933	u32 oldest_jiffies;
 5934	u8 lru_index;
 5935	int i;
 5936	int rc;
 5937	struct pqi_scsi_dev *device;
 5938	struct pqi_stream_data *pqi_stream_data;
 5939	struct pqi_scsi_dev_raid_map_data rmd;
 5940
 5941	if (!ctrl_info->enable_stream_detection)
 5942		return false;
 5943
 5944	rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
 5945	if (rc)
 5946		return false;
 5947
 5948	/* Check writes only. */
 5949	if (!rmd.is_write)
 5950		return false;
 5951
 5952	device = scmd->device->hostdata;
 5953
 5954	/* Check for RAID 5/6 streams. */
 5955	if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
 5956		return false;
 5957
 5958	/*
 5959	 * If controller does not support AIO RAID{5,6} writes, need to send
 5960	 * requests down non-AIO path.
 5961	 */
 5962	if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
 5963		(device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
 5964		return true;
 5965
 5966	lru_index = 0;
 5967	oldest_jiffies = INT_MAX;
 5968	for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
 5969		pqi_stream_data = &device->stream_data[i];
 5970		/*
 5971		 * Check for adjacent request or request is within
 5972		 * the previous request.
 5973		 */
 5974		if ((pqi_stream_data->next_lba &&
 5975			rmd.first_block >= pqi_stream_data->next_lba) &&
 5976			rmd.first_block <= pqi_stream_data->next_lba +
 5977				rmd.block_cnt) {
 5978			pqi_stream_data->next_lba = rmd.first_block +
 5979				rmd.block_cnt;
 5980			pqi_stream_data->last_accessed = jiffies;
 5981			return true;
 5982		}
 5983
 5984		/* unused entry */
 5985		if (pqi_stream_data->last_accessed == 0) {
 5986			lru_index = i;
 5987			break;
 5988		}
 5989
 5990		/* Find entry with oldest last accessed time. */
 5991		if (pqi_stream_data->last_accessed <= oldest_jiffies) {
 5992			oldest_jiffies = pqi_stream_data->last_accessed;
 5993			lru_index = i;
 5994		}
 5995	}
 5996
 5997	/* Set LRU entry. */
 5998	pqi_stream_data = &device->stream_data[lru_index];
 5999	pqi_stream_data->last_accessed = jiffies;
 6000	pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
 6001
 6002	return false;
 6003}
 6004
 6005static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 6006{
 6007	int rc;
 6008	struct pqi_ctrl_info *ctrl_info;
 6009	struct pqi_scsi_dev *device;
 6010	u16 hw_queue;
 6011	struct pqi_queue_group *queue_group;
 6012	bool raid_bypassed;
 6013
 6014	device = scmd->device->hostdata;
 6015
 6016	if (!device) {
 6017		set_host_byte(scmd, DID_NO_CONNECT);
 6018		pqi_scsi_done(scmd);
 6019		return 0;
 6020	}
 6021
 6022	atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
 6023
 6024	ctrl_info = shost_to_hba(shost);
 6025
 6026	if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
 6027		set_host_byte(scmd, DID_NO_CONNECT);
 6028		pqi_scsi_done(scmd);
 6029		return 0;
 6030	}
 6031
 6032	if (pqi_ctrl_blocked(ctrl_info)) {
 6033		rc = SCSI_MLQUEUE_HOST_BUSY;
 6034		goto out;
 6035	}
 6036
 6037	/*
 6038	 * This is necessary because the SML doesn't zero out this field during
 6039	 * error recovery.
 6040	 */
 6041	scmd->result = 0;
 6042
 6043	hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
 6044	queue_group = &ctrl_info->queue_groups[hw_queue];
 6045
 6046	if (pqi_is_logical_device(device)) {
 6047		raid_bypassed = false;
 6048		if (device->raid_bypass_enabled &&
 6049			pqi_is_bypass_eligible_request(scmd) &&
 6050			!pqi_is_parity_write_stream(ctrl_info, scmd)) {
 6051			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 6052			if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
 6053				raid_bypassed = true;
 6054				atomic_inc(&device->raid_bypass_cnt);
 6055			}
 6056		}
 6057		if (!raid_bypassed)
 6058			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 6059	} else {
 6060		if (device->aio_enabled)
 6061			rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 6062		else
 6063			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 6064	}
 6065
 6066out:
 6067	if (rc)
 6068		atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
 6069
 6070	return rc;
 6071}
 6072
 6073static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
 6074{
 6075	unsigned int i;
 6076	unsigned int path;
 6077	unsigned long flags;
 6078	unsigned int queued_io_count;
 6079	struct pqi_queue_group *queue_group;
 6080	struct pqi_io_request *io_request;
 6081
 6082	queued_io_count = 0;
 6083
 6084	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 6085		queue_group = &ctrl_info->queue_groups[i];
 6086		for (path = 0; path < 2; path++) {
 6087			spin_lock_irqsave(&queue_group->submit_lock[path], flags);
 6088			list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
 6089				queued_io_count++;
 6090			spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
 6091		}
 6092	}
 6093
 6094	return queued_io_count;
 6095}
 6096
 6097static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
 6098{
 6099	unsigned int i;
 6100	unsigned int path;
 6101	unsigned int nonempty_inbound_queue_count;
 6102	struct pqi_queue_group *queue_group;
 6103	pqi_index_t iq_pi;
 6104	pqi_index_t iq_ci;
 6105
 6106	nonempty_inbound_queue_count = 0;
 6107
 6108	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 6109		queue_group = &ctrl_info->queue_groups[i];
 6110		for (path = 0; path < 2; path++) {
 6111			iq_pi = queue_group->iq_pi_copy[path];
 6112			iq_ci = readl(queue_group->iq_ci[path]);
 6113			if (iq_ci != iq_pi)
 6114				nonempty_inbound_queue_count++;
 6115		}
 6116	}
 6117
 6118	return nonempty_inbound_queue_count;
 6119}
 6120
 6121#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS	10
 6122
 6123static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
 6124{
 6125	unsigned long start_jiffies;
 6126	unsigned long warning_timeout;
 6127	unsigned int queued_io_count;
 6128	unsigned int nonempty_inbound_queue_count;
 6129	bool displayed_warning;
 6130
 6131	displayed_warning = false;
 6132	start_jiffies = jiffies;
 6133	warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
 6134
 6135	while (1) {
 6136		queued_io_count = pqi_queued_io_count(ctrl_info);
 6137		nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
 6138		if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
 6139			break;
 6140		pqi_check_ctrl_health(ctrl_info);
 6141		if (pqi_ctrl_offline(ctrl_info))
 6142			return -ENXIO;
 6143		if (time_after(jiffies, warning_timeout)) {
 6144			dev_warn(&ctrl_info->pci_dev->dev,
 6145				"waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
 6146				jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
 6147			displayed_warning = true;
 6148			warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
 6149		}
 6150		usleep_range(1000, 2000);
 6151	}
 6152
 6153	if (displayed_warning)
 6154		dev_warn(&ctrl_info->pci_dev->dev,
 6155			"queued I/O drained after waiting for %u seconds\n",
 6156			jiffies_to_msecs(jiffies - start_jiffies) / 1000);
 6157
 6158	return 0;
 6159}
 6160
 6161static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
 6162	struct pqi_scsi_dev *device)
 6163{
 6164	unsigned int i;
 6165	unsigned int path;
 6166	struct pqi_queue_group *queue_group;
 6167	unsigned long flags;
 6168	struct pqi_io_request *io_request;
 6169	struct pqi_io_request *next;
 6170	struct scsi_cmnd *scmd;
 6171	struct pqi_scsi_dev *scsi_device;
 6172
 6173	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 6174		queue_group = &ctrl_info->queue_groups[i];
 6175
 6176		for (path = 0; path < 2; path++) {
 6177			spin_lock_irqsave(
 6178				&queue_group->submit_lock[path], flags);
 6179
 6180			list_for_each_entry_safe(io_request, next,
 6181				&queue_group->request_list[path],
 6182				request_list_entry) {
 6183
 6184				scmd = io_request->scmd;
 6185				if (!scmd)
 6186					continue;
 6187
 6188				scsi_device = scmd->device->hostdata;
 6189				if (scsi_device != device)
 6190					continue;
 6191
 6192				list_del(&io_request->request_list_entry);
 6193				set_host_byte(scmd, DID_RESET);
 6194				pqi_free_io_request(io_request);
 6195				scsi_dma_unmap(scmd);
 6196				pqi_scsi_done(scmd);
 6197			}
 6198
 6199			spin_unlock_irqrestore(
 6200				&queue_group->submit_lock[path], flags);
 6201		}
 6202	}
 6203}
 6204
 6205#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS	10
 6206
 6207static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
 6208	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
 6209{
 6210	int cmds_outstanding;
 6211	unsigned long start_jiffies;
 6212	unsigned long warning_timeout;
 6213	unsigned long msecs_waiting;
 6214
 6215	start_jiffies = jiffies;
 6216	warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
 6217
 6218	while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
 6219		if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
 6220			pqi_check_ctrl_health(ctrl_info);
 6221			if (pqi_ctrl_offline(ctrl_info))
 6222				return -ENXIO;
 6223		}
 6224		msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
 6225		if (msecs_waiting >= timeout_msecs) {
 6226			dev_err(&ctrl_info->pci_dev->dev,
 6227				"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
 6228				ctrl_info->scsi_host->host_no, device->bus, device->target,
 6229				lun, msecs_waiting / 1000, cmds_outstanding);
 6230			return -ETIMEDOUT;
 6231		}
 6232		if (time_after(jiffies, warning_timeout)) {
 6233			dev_warn(&ctrl_info->pci_dev->dev,
 6234				"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
 6235				ctrl_info->scsi_host->host_no, device->bus, device->target,
 6236				lun, msecs_waiting / 1000, cmds_outstanding);
 6237			warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
 6238		}
 6239		usleep_range(1000, 2000);
 6240	}
 6241
 6242	return 0;
 6243}
 6244
 6245static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
 6246	void *context)
 6247{
 6248	struct completion *waiting = context;
 6249
 6250	complete(waiting);
 6251}
 6252
 6253#define PQI_LUN_RESET_POLL_COMPLETION_SECS	10
 6254
 6255static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
 6256	struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
 6257{
 6258	int rc;
 6259	unsigned int wait_secs;
 6260	int cmds_outstanding;
 6261
 6262	wait_secs = 0;
 6263
 6264	while (1) {
 6265		if (wait_for_completion_io_timeout(wait,
 6266			PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
 6267			rc = 0;
 6268			break;
 6269		}
 6270
 6271		pqi_check_ctrl_health(ctrl_info);
 6272		if (pqi_ctrl_offline(ctrl_info)) {
 6273			rc = -ENXIO;
 6274			break;
 6275		}
 6276
 6277		wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
 6278		cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
 6279		dev_warn(&ctrl_info->pci_dev->dev,
 6280			"scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
 6281			ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
 6282	}
 6283
 6284	return rc;
 6285}
 6286
 6287#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS	30
 6288
 6289static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
 6290{
 6291	int rc;
 6292	struct pqi_io_request *io_request;
 6293	DECLARE_COMPLETION_ONSTACK(wait);
 6294	struct pqi_task_management_request *request;
 6295	struct pqi_scsi_dev *device;
 6296
 6297	device = scmd->device->hostdata;
 6298	io_request = pqi_alloc_io_request(ctrl_info, NULL);
 6299	io_request->io_complete_callback = pqi_lun_reset_complete;
 6300	io_request->context = &wait;
 6301
 6302	request = io_request->iu;
 6303	memset(request, 0, sizeof(*request));
 6304
 6305	request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
 6306	put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
 6307		&request->header.iu_length);
 6308	put_unaligned_le16(io_request->index, &request->request_id);
 6309	memcpy(request->lun_number, device->scsi3addr,
 6310		sizeof(request->lun_number));
 6311	if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
 6312		request->ml_device_lun_number = (u8)scmd->device->lun;
 6313	request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
 6314	if (ctrl_info->tmf_iu_timeout_supported)
 6315		put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
 6316
 6317	pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
 6318		io_request);
 6319
 6320	rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
 6321	if (rc == 0)
 6322		rc = io_request->status;
 6323
 6324	pqi_free_io_request(io_request);
 6325
 6326	return rc;
 6327}
 6328
 6329#define PQI_LUN_RESET_RETRIES				3
 6330#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS		(10 * 1000)
 6331#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS		(10 * 60 * 1000)
 6332#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS	(2 * 60 * 1000)
 6333
 6334static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
 6335{
 6336	int reset_rc;
 6337	int wait_rc;
 6338	unsigned int retries;
 6339	unsigned long timeout_msecs;
 6340	struct pqi_scsi_dev *device;
 6341
 6342	device = scmd->device->hostdata;
 6343	for (retries = 0;;) {
 6344		reset_rc = pqi_lun_reset(ctrl_info, scmd);
 6345		if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
 6346			break;
 6347		msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
 6348	}
 6349
 6350	timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
 6351		PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
 6352
 6353	wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
 6354	if (wait_rc && reset_rc == 0)
 6355		reset_rc = wait_rc;
 6356
 6357	return reset_rc == 0 ? SUCCESS : FAILED;
 6358}
 6359
 6360static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
 6361{
 6362	int rc;
 6363	struct pqi_scsi_dev *device;
 6364
 6365	device = scmd->device->hostdata;
 6366	pqi_ctrl_block_requests(ctrl_info);
 6367	pqi_ctrl_wait_until_quiesced(ctrl_info);
 6368	pqi_fail_io_queued_for_device(ctrl_info, device);
 6369	rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
 6370	if (rc)
 6371		rc = FAILED;
 6372	else
 6373		rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
 6374	pqi_ctrl_unblock_requests(ctrl_info);
 6375
 6376	return rc;
 6377}
 6378
 6379static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
 6380{
 6381	int rc;
 6382	struct Scsi_Host *shost;
 6383	struct pqi_ctrl_info *ctrl_info;
 6384	struct pqi_scsi_dev *device;
 6385
 6386	shost = scmd->device->host;
 6387	ctrl_info = shost_to_hba(shost);
 6388	device = scmd->device->hostdata;
 6389
 6390	mutex_lock(&ctrl_info->lun_reset_mutex);
 6391
 6392	dev_err(&ctrl_info->pci_dev->dev,
 6393		"resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
 6394		shost->host_no,
 6395		device->bus, device->target, (u32)scmd->device->lun,
 6396		scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
 6397
 6398	pqi_check_ctrl_health(ctrl_info);
 6399	if (pqi_ctrl_offline(ctrl_info))
 6400		rc = FAILED;
 6401	else
 6402		rc = pqi_device_reset(ctrl_info, scmd);
 6403
 6404	dev_err(&ctrl_info->pci_dev->dev,
 6405		"reset of scsi %d:%d:%d:%d: %s\n",
 6406		shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
 6407		rc == SUCCESS ? "SUCCESS" : "FAILED");
 6408
 6409	mutex_unlock(&ctrl_info->lun_reset_mutex);
 6410
 6411	return rc;
 6412}
 6413
 6414static int pqi_slave_alloc(struct scsi_device *sdev)
 6415{
 6416	struct pqi_scsi_dev *device;
 6417	unsigned long flags;
 6418	struct pqi_ctrl_info *ctrl_info;
 6419	struct scsi_target *starget;
 6420	struct sas_rphy *rphy;
 6421
 6422	ctrl_info = shost_to_hba(sdev->host);
 6423
 6424	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 6425
 6426	if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
 6427		starget = scsi_target(sdev);
 6428		rphy = target_to_rphy(starget);
 6429		device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
 6430		if (device) {
 6431			if (device->target_lun_valid) {
 6432				device->ignore_device = true;
 6433			} else {
 6434				device->target = sdev_id(sdev);
 6435				device->lun = sdev->lun;
 6436				device->target_lun_valid = true;
 6437			}
 6438		}
 6439	} else {
 6440		device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
 6441			sdev_id(sdev), sdev->lun);
 6442	}
 6443
 6444	if (device) {
 6445		sdev->hostdata = device;
 6446		device->sdev = sdev;
 6447		if (device->queue_depth) {
 6448			device->advertised_queue_depth = device->queue_depth;
 6449			scsi_change_queue_depth(sdev,
 6450				device->advertised_queue_depth);
 6451		}
 6452		if (pqi_is_logical_device(device)) {
 6453			pqi_disable_write_same(sdev);
 6454		} else {
 6455			sdev->allow_restart = 1;
 6456			if (device->device_type == SA_DEVICE_TYPE_NVME)
 6457				pqi_disable_write_same(sdev);
 6458		}
 6459	}
 6460
 6461	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 6462
 6463	return 0;
 6464}
 6465
 6466static void pqi_map_queues(struct Scsi_Host *shost)
 6467{
 6468	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6469
 6470	blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
 6471			      ctrl_info->pci_dev, 0);
 6472}
 6473
 6474static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
 6475{
 6476	return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
 6477}
 6478
 6479static int pqi_slave_configure(struct scsi_device *sdev)
 6480{
 6481	int rc = 0;
 6482	struct pqi_scsi_dev *device;
 6483
 6484	device = sdev->hostdata;
 6485	device->devtype = sdev->type;
 6486
 6487	if (pqi_is_tape_changer_device(device) && device->ignore_device) {
 6488		rc = -ENXIO;
 6489		device->ignore_device = false;
 6490	}
 6491
 6492	return rc;
 6493}
 6494
 6495static void pqi_slave_destroy(struct scsi_device *sdev)
 6496{
 6497	struct pqi_ctrl_info *ctrl_info;
 6498	struct pqi_scsi_dev *device;
 6499	int mutex_acquired;
 6500	unsigned long flags;
 6501
 6502	ctrl_info = shost_to_hba(sdev->host);
 6503
 6504	mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
 6505	if (!mutex_acquired)
 6506		return;
 6507
 6508	device = sdev->hostdata;
 6509	if (!device) {
 6510		mutex_unlock(&ctrl_info->scan_mutex);
 6511		return;
 6512	}
 6513
 6514	device->lun_count--;
 6515	if (device->lun_count > 0) {
 6516		mutex_unlock(&ctrl_info->scan_mutex);
 6517		return;
 6518	}
 6519
 6520	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 6521	list_del(&device->scsi_device_list_entry);
 6522	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 6523
 6524	mutex_unlock(&ctrl_info->scan_mutex);
 6525
 6526	pqi_dev_info(ctrl_info, "removed", device);
 6527	pqi_free_device(device);
 6528}
 6529
 6530static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 6531{
 6532	struct pci_dev *pci_dev;
 6533	u32 subsystem_vendor;
 6534	u32 subsystem_device;
 6535	cciss_pci_info_struct pciinfo;
 6536
 6537	if (!arg)
 6538		return -EINVAL;
 6539
 6540	pci_dev = ctrl_info->pci_dev;
 6541
 6542	pciinfo.domain = pci_domain_nr(pci_dev->bus);
 6543	pciinfo.bus = pci_dev->bus->number;
 6544	pciinfo.dev_fn = pci_dev->devfn;
 6545	subsystem_vendor = pci_dev->subsystem_vendor;
 6546	subsystem_device = pci_dev->subsystem_device;
 6547	pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
 6548
 6549	if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
 6550		return -EFAULT;
 6551
 6552	return 0;
 6553}
 6554
 6555static int pqi_getdrivver_ioctl(void __user *arg)
 6556{
 6557	u32 version;
 6558
 6559	if (!arg)
 6560		return -EINVAL;
 6561
 6562	version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
 6563		(DRIVER_RELEASE << 16) | DRIVER_REVISION;
 6564
 6565	if (copy_to_user(arg, &version, sizeof(version)))
 6566		return -EFAULT;
 6567
 6568	return 0;
 6569}
 6570
 6571struct ciss_error_info {
 6572	u8	scsi_status;
 6573	int	command_status;
 6574	size_t	sense_data_length;
 6575};
 6576
 6577static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
 6578	struct ciss_error_info *ciss_error_info)
 6579{
 6580	int ciss_cmd_status;
 6581	size_t sense_data_length;
 6582
 6583	switch (pqi_error_info->data_out_result) {
 6584	case PQI_DATA_IN_OUT_GOOD:
 6585		ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
 6586		break;
 6587	case PQI_DATA_IN_OUT_UNDERFLOW:
 6588		ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
 6589		break;
 6590	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
 6591		ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
 6592		break;
 6593	case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
 6594	case PQI_DATA_IN_OUT_BUFFER_ERROR:
 6595	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
 6596	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
 6597	case PQI_DATA_IN_OUT_ERROR:
 6598		ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
 6599		break;
 6600	case PQI_DATA_IN_OUT_HARDWARE_ERROR:
 6601	case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
 6602	case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
 6603	case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
 6604	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
 6605	case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
 6606	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
 6607	case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
 6608	case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
 6609	case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
 6610		ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
 6611		break;
 6612	case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
 6613		ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
 6614		break;
 6615	case PQI_DATA_IN_OUT_ABORTED:
 6616		ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
 6617		break;
 6618	case PQI_DATA_IN_OUT_TIMEOUT:
 6619		ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
 6620		break;
 6621	default:
 6622		ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
 6623		break;
 6624	}
 6625
 6626	sense_data_length =
 6627		get_unaligned_le16(&pqi_error_info->sense_data_length);
 6628	if (sense_data_length == 0)
 6629		sense_data_length =
 6630		get_unaligned_le16(&pqi_error_info->response_data_length);
 6631	if (sense_data_length)
 6632		if (sense_data_length > sizeof(pqi_error_info->data))
 6633			sense_data_length = sizeof(pqi_error_info->data);
 6634
 6635	ciss_error_info->scsi_status = pqi_error_info->status;
 6636	ciss_error_info->command_status = ciss_cmd_status;
 6637	ciss_error_info->sense_data_length = sense_data_length;
 6638}
 6639
 6640static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 6641{
 6642	int rc;
 6643	char *kernel_buffer = NULL;
 6644	u16 iu_length;
 6645	size_t sense_data_length;
 6646	IOCTL_Command_struct iocommand;
 6647	struct pqi_raid_path_request request;
 6648	struct pqi_raid_error_info pqi_error_info;
 6649	struct ciss_error_info ciss_error_info;
 6650
 6651	if (pqi_ctrl_offline(ctrl_info))
 6652		return -ENXIO;
 6653	if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
 6654		return -EBUSY;
 6655	if (!arg)
 6656		return -EINVAL;
 6657	if (!capable(CAP_SYS_RAWIO))
 6658		return -EPERM;
 6659	if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
 6660		return -EFAULT;
 6661	if (iocommand.buf_size < 1 &&
 6662		iocommand.Request.Type.Direction != XFER_NONE)
 6663		return -EINVAL;
 6664	if (iocommand.Request.CDBLen > sizeof(request.cdb))
 6665		return -EINVAL;
 6666	if (iocommand.Request.Type.Type != TYPE_CMD)
 6667		return -EINVAL;
 6668
 6669	switch (iocommand.Request.Type.Direction) {
 6670	case XFER_NONE:
 6671	case XFER_WRITE:
 6672	case XFER_READ:
 6673	case XFER_READ | XFER_WRITE:
 6674		break;
 6675	default:
 6676		return -EINVAL;
 6677	}
 6678
 6679	if (iocommand.buf_size > 0) {
 6680		kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
 6681		if (!kernel_buffer)
 6682			return -ENOMEM;
 6683		if (iocommand.Request.Type.Direction & XFER_WRITE) {
 6684			if (copy_from_user(kernel_buffer, iocommand.buf,
 6685				iocommand.buf_size)) {
 6686				rc = -EFAULT;
 6687				goto out;
 6688			}
 6689		} else {
 6690			memset(kernel_buffer, 0, iocommand.buf_size);
 6691		}
 6692	}
 6693
 6694	memset(&request, 0, sizeof(request));
 6695
 6696	request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
 6697	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
 6698		PQI_REQUEST_HEADER_LENGTH;
 6699	memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
 6700		sizeof(request.lun_number));
 6701	memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
 6702	request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
 6703
 6704	switch (iocommand.Request.Type.Direction) {
 6705	case XFER_NONE:
 6706		request.data_direction = SOP_NO_DIRECTION_FLAG;
 6707		break;
 6708	case XFER_WRITE:
 6709		request.data_direction = SOP_WRITE_FLAG;
 6710		break;
 6711	case XFER_READ:
 6712		request.data_direction = SOP_READ_FLAG;
 6713		break;
 6714	case XFER_READ | XFER_WRITE:
 6715		request.data_direction = SOP_BIDIRECTIONAL;
 6716		break;
 6717	}
 6718
 6719	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 6720
 6721	if (iocommand.buf_size > 0) {
 6722		put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
 6723
 6724		rc = pqi_map_single(ctrl_info->pci_dev,
 6725			&request.sg_descriptors[0], kernel_buffer,
 6726			iocommand.buf_size, DMA_BIDIRECTIONAL);
 6727		if (rc)
 6728			goto out;
 6729
 6730		iu_length += sizeof(request.sg_descriptors[0]);
 6731	}
 6732
 6733	put_unaligned_le16(iu_length, &request.header.iu_length);
 6734
 6735	if (ctrl_info->raid_iu_timeout_supported)
 6736		put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
 6737
 6738	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 6739		PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
 6740
 6741	if (iocommand.buf_size > 0)
 6742		pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 6743			DMA_BIDIRECTIONAL);
 6744
 6745	memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
 6746
 6747	if (rc == 0) {
 6748		pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
 6749		iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
 6750		iocommand.error_info.CommandStatus =
 6751			ciss_error_info.command_status;
 6752		sense_data_length = ciss_error_info.sense_data_length;
 6753		if (sense_data_length) {
 6754			if (sense_data_length >
 6755				sizeof(iocommand.error_info.SenseInfo))
 6756				sense_data_length =
 6757					sizeof(iocommand.error_info.SenseInfo);
 6758			memcpy(iocommand.error_info.SenseInfo,
 6759				pqi_error_info.data, sense_data_length);
 6760			iocommand.error_info.SenseLen = sense_data_length;
 6761		}
 6762	}
 6763
 6764	if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
 6765		rc = -EFAULT;
 6766		goto out;
 6767	}
 6768
 6769	if (rc == 0 && iocommand.buf_size > 0 &&
 6770		(iocommand.Request.Type.Direction & XFER_READ)) {
 6771		if (copy_to_user(iocommand.buf, kernel_buffer,
 6772			iocommand.buf_size)) {
 6773			rc = -EFAULT;
 6774		}
 6775	}
 6776
 6777out:
 6778	kfree(kernel_buffer);
 6779
 6780	return rc;
 6781}
 6782
 6783static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
 6784		     void __user *arg)
 6785{
 6786	int rc;
 6787	struct pqi_ctrl_info *ctrl_info;
 6788
 6789	ctrl_info = shost_to_hba(sdev->host);
 6790
 6791	switch (cmd) {
 6792	case CCISS_DEREGDISK:
 6793	case CCISS_REGNEWDISK:
 6794	case CCISS_REGNEWD:
 6795		rc = pqi_scan_scsi_devices(ctrl_info);
 6796		break;
 6797	case CCISS_GETPCIINFO:
 6798		rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
 6799		break;
 6800	case CCISS_GETDRIVVER:
 6801		rc = pqi_getdrivver_ioctl(arg);
 6802		break;
 6803	case CCISS_PASSTHRU:
 6804		rc = pqi_passthru_ioctl(ctrl_info, arg);
 6805		break;
 6806	default:
 6807		rc = -EINVAL;
 6808		break;
 6809	}
 6810
 6811	return rc;
 6812}
 6813
 6814static ssize_t pqi_firmware_version_show(struct device *dev,
 6815	struct device_attribute *attr, char *buffer)
 6816{
 6817	struct Scsi_Host *shost;
 6818	struct pqi_ctrl_info *ctrl_info;
 6819
 6820	shost = class_to_shost(dev);
 6821	ctrl_info = shost_to_hba(shost);
 6822
 6823	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
 6824}
 6825
 6826static ssize_t pqi_driver_version_show(struct device *dev,
 6827	struct device_attribute *attr, char *buffer)
 6828{
 6829	return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
 6830}
 6831
 6832static ssize_t pqi_serial_number_show(struct device *dev,
 6833	struct device_attribute *attr, char *buffer)
 6834{
 6835	struct Scsi_Host *shost;
 6836	struct pqi_ctrl_info *ctrl_info;
 6837
 6838	shost = class_to_shost(dev);
 6839	ctrl_info = shost_to_hba(shost);
 6840
 6841	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
 6842}
 6843
 6844static ssize_t pqi_model_show(struct device *dev,
 6845	struct device_attribute *attr, char *buffer)
 6846{
 6847	struct Scsi_Host *shost;
 6848	struct pqi_ctrl_info *ctrl_info;
 6849
 6850	shost = class_to_shost(dev);
 6851	ctrl_info = shost_to_hba(shost);
 6852
 6853	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
 6854}
 6855
 6856static ssize_t pqi_vendor_show(struct device *dev,
 6857	struct device_attribute *attr, char *buffer)
 6858{
 6859	struct Scsi_Host *shost;
 6860	struct pqi_ctrl_info *ctrl_info;
 6861
 6862	shost = class_to_shost(dev);
 6863	ctrl_info = shost_to_hba(shost);
 6864
 6865	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
 6866}
 6867
 6868static ssize_t pqi_host_rescan_store(struct device *dev,
 6869	struct device_attribute *attr, const char *buffer, size_t count)
 6870{
 6871	struct Scsi_Host *shost = class_to_shost(dev);
 6872
 6873	pqi_scan_start(shost);
 6874
 6875	return count;
 6876}
 6877
 6878static ssize_t pqi_lockup_action_show(struct device *dev,
 6879	struct device_attribute *attr, char *buffer)
 6880{
 6881	int count = 0;
 6882	unsigned int i;
 6883
 6884	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
 6885		if (pqi_lockup_actions[i].action == pqi_lockup_action)
 6886			count += scnprintf(buffer + count, PAGE_SIZE - count,
 6887				"[%s] ", pqi_lockup_actions[i].name);
 6888		else
 6889			count += scnprintf(buffer + count, PAGE_SIZE - count,
 6890				"%s ", pqi_lockup_actions[i].name);
 6891	}
 6892
 6893	count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
 6894
 6895	return count;
 6896}
 6897
 6898static ssize_t pqi_lockup_action_store(struct device *dev,
 6899	struct device_attribute *attr, const char *buffer, size_t count)
 6900{
 6901	unsigned int i;
 6902	char *action_name;
 6903	char action_name_buffer[32];
 6904
 6905	strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
 6906	action_name = strstrip(action_name_buffer);
 6907
 6908	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
 6909		if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
 6910			pqi_lockup_action = pqi_lockup_actions[i].action;
 6911			return count;
 6912		}
 6913	}
 6914
 6915	return -EINVAL;
 6916}
 6917
 6918static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
 6919	struct device_attribute *attr, char *buffer)
 6920{
 6921	struct Scsi_Host *shost = class_to_shost(dev);
 6922	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6923
 6924	return scnprintf(buffer, 10, "%x\n",
 6925			ctrl_info->enable_stream_detection);
 6926}
 6927
 6928static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
 6929	struct device_attribute *attr, const char *buffer, size_t count)
 6930{
 6931	struct Scsi_Host *shost = class_to_shost(dev);
 6932	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6933	u8 set_stream_detection = 0;
 6934
 6935	if (kstrtou8(buffer, 0, &set_stream_detection))
 6936		return -EINVAL;
 6937
 6938	if (set_stream_detection > 0)
 6939		set_stream_detection = 1;
 6940
 6941	ctrl_info->enable_stream_detection = set_stream_detection;
 6942
 6943	return count;
 6944}
 6945
 6946static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
 6947	struct device_attribute *attr, char *buffer)
 6948{
 6949	struct Scsi_Host *shost = class_to_shost(dev);
 6950	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6951
 6952	return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
 6953}
 6954
 6955static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
 6956	struct device_attribute *attr, const char *buffer, size_t count)
 6957{
 6958	struct Scsi_Host *shost = class_to_shost(dev);
 6959	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6960	u8 set_r5_writes = 0;
 6961
 6962	if (kstrtou8(buffer, 0, &set_r5_writes))
 6963		return -EINVAL;
 6964
 6965	if (set_r5_writes > 0)
 6966		set_r5_writes = 1;
 6967
 6968	ctrl_info->enable_r5_writes = set_r5_writes;
 6969
 6970	return count;
 6971}
 6972
 6973static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
 6974	struct device_attribute *attr, char *buffer)
 6975{
 6976	struct Scsi_Host *shost = class_to_shost(dev);
 6977	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6978
 6979	return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
 6980}
 6981
 6982static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
 6983	struct device_attribute *attr, const char *buffer, size_t count)
 6984{
 6985	struct Scsi_Host *shost = class_to_shost(dev);
 6986	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 6987	u8 set_r6_writes = 0;
 6988
 6989	if (kstrtou8(buffer, 0, &set_r6_writes))
 6990		return -EINVAL;
 6991
 6992	if (set_r6_writes > 0)
 6993		set_r6_writes = 1;
 6994
 6995	ctrl_info->enable_r6_writes = set_r6_writes;
 6996
 6997	return count;
 6998}
 6999
 7000static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
 7001static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
 7002static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
 7003static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
 7004static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
 7005static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
 7006static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
 7007	pqi_lockup_action_store);
 7008static DEVICE_ATTR(enable_stream_detection, 0644,
 7009	pqi_host_enable_stream_detection_show,
 7010	pqi_host_enable_stream_detection_store);
 7011static DEVICE_ATTR(enable_r5_writes, 0644,
 7012	pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
 7013static DEVICE_ATTR(enable_r6_writes, 0644,
 7014	pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
 7015
 7016static struct attribute *pqi_shost_attrs[] = {
 7017	&dev_attr_driver_version.attr,
 7018	&dev_attr_firmware_version.attr,
 7019	&dev_attr_model.attr,
 7020	&dev_attr_serial_number.attr,
 7021	&dev_attr_vendor.attr,
 7022	&dev_attr_rescan.attr,
 7023	&dev_attr_lockup_action.attr,
 7024	&dev_attr_enable_stream_detection.attr,
 7025	&dev_attr_enable_r5_writes.attr,
 7026	&dev_attr_enable_r6_writes.attr,
 7027	NULL
 7028};
 7029
 7030ATTRIBUTE_GROUPS(pqi_shost);
 7031
 7032static ssize_t pqi_unique_id_show(struct device *dev,
 7033	struct device_attribute *attr, char *buffer)
 7034{
 7035	struct pqi_ctrl_info *ctrl_info;
 7036	struct scsi_device *sdev;
 7037	struct pqi_scsi_dev *device;
 7038	unsigned long flags;
 7039	u8 unique_id[16];
 7040
 7041	sdev = to_scsi_device(dev);
 7042	ctrl_info = shost_to_hba(sdev->host);
 7043
 7044	if (pqi_ctrl_offline(ctrl_info))
 7045		return -ENODEV;
 7046
 7047	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7048
 7049	device = sdev->hostdata;
 7050	if (!device) {
 7051		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7052		return -ENODEV;
 7053	}
 7054
 7055	if (device->is_physical_device)
 7056		memcpy(unique_id, device->wwid, sizeof(device->wwid));
 7057	else
 7058		memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
 7059
 7060	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7061
 7062	return scnprintf(buffer, PAGE_SIZE,
 7063		"%02X%02X%02X%02X%02X%02X%02X%02X"
 7064		"%02X%02X%02X%02X%02X%02X%02X%02X\n",
 7065		unique_id[0], unique_id[1], unique_id[2], unique_id[3],
 7066		unique_id[4], unique_id[5], unique_id[6], unique_id[7],
 7067		unique_id[8], unique_id[9], unique_id[10], unique_id[11],
 7068		unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
 7069}
 7070
 7071static ssize_t pqi_lunid_show(struct device *dev,
 7072	struct device_attribute *attr, char *buffer)
 7073{
 7074	struct pqi_ctrl_info *ctrl_info;
 7075	struct scsi_device *sdev;
 7076	struct pqi_scsi_dev *device;
 7077	unsigned long flags;
 7078	u8 lunid[8];
 7079
 7080	sdev = to_scsi_device(dev);
 7081	ctrl_info = shost_to_hba(sdev->host);
 7082
 7083	if (pqi_ctrl_offline(ctrl_info))
 7084		return -ENODEV;
 7085
 7086	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7087
 7088	device = sdev->hostdata;
 7089	if (!device) {
 7090		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7091		return -ENODEV;
 7092	}
 7093
 7094	memcpy(lunid, device->scsi3addr, sizeof(lunid));
 7095
 7096	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7097
 7098	return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
 7099}
 7100
 7101#define MAX_PATHS	8
 7102
 7103static ssize_t pqi_path_info_show(struct device *dev,
 7104	struct device_attribute *attr, char *buf)
 7105{
 7106	struct pqi_ctrl_info *ctrl_info;
 7107	struct scsi_device *sdev;
 7108	struct pqi_scsi_dev *device;
 7109	unsigned long flags;
 7110	int i;
 7111	int output_len = 0;
 7112	u8 box;
 7113	u8 bay;
 7114	u8 path_map_index;
 7115	char *active;
 7116	u8 phys_connector[2];
 7117
 7118	sdev = to_scsi_device(dev);
 7119	ctrl_info = shost_to_hba(sdev->host);
 7120
 7121	if (pqi_ctrl_offline(ctrl_info))
 7122		return -ENODEV;
 7123
 7124	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7125
 7126	device = sdev->hostdata;
 7127	if (!device) {
 7128		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7129		return -ENODEV;
 7130	}
 7131
 7132	bay = device->bay;
 7133	for (i = 0; i < MAX_PATHS; i++) {
 7134		path_map_index = 1 << i;
 7135		if (i == device->active_path_index)
 7136			active = "Active";
 7137		else if (device->path_map & path_map_index)
 7138			active = "Inactive";
 7139		else
 7140			continue;
 7141
 7142		output_len += scnprintf(buf + output_len,
 7143					PAGE_SIZE - output_len,
 7144					"[%d:%d:%d:%d] %20.20s ",
 7145					ctrl_info->scsi_host->host_no,
 7146					device->bus, device->target,
 7147					device->lun,
 7148					scsi_device_type(device->devtype));
 7149
 7150		if (device->devtype == TYPE_RAID ||
 7151			pqi_is_logical_device(device))
 7152			goto end_buffer;
 7153
 7154		memcpy(&phys_connector, &device->phys_connector[i],
 7155			sizeof(phys_connector));
 7156		if (phys_connector[0] < '0')
 7157			phys_connector[0] = '0';
 7158		if (phys_connector[1] < '0')
 7159			phys_connector[1] = '0';
 7160
 7161		output_len += scnprintf(buf + output_len,
 7162					PAGE_SIZE - output_len,
 7163					"PORT: %.2s ", phys_connector);
 7164
 7165		box = device->box[i];
 7166		if (box != 0 && box != 0xFF)
 7167			output_len += scnprintf(buf + output_len,
 7168						PAGE_SIZE - output_len,
 7169						"BOX: %hhu ", box);
 7170
 7171		if ((device->devtype == TYPE_DISK ||
 7172			device->devtype == TYPE_ZBC) &&
 7173			pqi_expose_device(device))
 7174			output_len += scnprintf(buf + output_len,
 7175						PAGE_SIZE - output_len,
 7176						"BAY: %hhu ", bay);
 7177
 7178end_buffer:
 7179		output_len += scnprintf(buf + output_len,
 7180					PAGE_SIZE - output_len,
 7181					"%s\n", active);
 7182	}
 7183
 7184	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7185
 7186	return output_len;
 7187}
 7188
 7189static ssize_t pqi_sas_address_show(struct device *dev,
 7190	struct device_attribute *attr, char *buffer)
 7191{
 7192	struct pqi_ctrl_info *ctrl_info;
 7193	struct scsi_device *sdev;
 7194	struct pqi_scsi_dev *device;
 7195	unsigned long flags;
 7196	u64 sas_address;
 7197
 7198	sdev = to_scsi_device(dev);
 7199	ctrl_info = shost_to_hba(sdev->host);
 7200
 7201	if (pqi_ctrl_offline(ctrl_info))
 7202		return -ENODEV;
 7203
 7204	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7205
 7206	device = sdev->hostdata;
 7207	if (!device) {
 7208		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7209		return -ENODEV;
 7210	}
 7211
 7212	sas_address = device->sas_address;
 7213
 7214	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7215
 7216	return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
 7217}
 7218
 7219static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
 7220	struct device_attribute *attr, char *buffer)
 7221{
 7222	struct pqi_ctrl_info *ctrl_info;
 7223	struct scsi_device *sdev;
 7224	struct pqi_scsi_dev *device;
 7225	unsigned long flags;
 7226
 7227	sdev = to_scsi_device(dev);
 7228	ctrl_info = shost_to_hba(sdev->host);
 7229
 7230	if (pqi_ctrl_offline(ctrl_info))
 7231		return -ENODEV;
 7232
 7233	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7234
 7235	device = sdev->hostdata;
 7236	if (!device) {
 7237		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7238		return -ENODEV;
 7239	}
 7240
 7241	buffer[0] = device->raid_bypass_enabled ? '1' : '0';
 7242	buffer[1] = '\n';
 7243	buffer[2] = '\0';
 7244
 7245	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7246
 7247	return 2;
 7248}
 7249
 7250static ssize_t pqi_raid_level_show(struct device *dev,
 7251	struct device_attribute *attr, char *buffer)
 7252{
 7253	struct pqi_ctrl_info *ctrl_info;
 7254	struct scsi_device *sdev;
 7255	struct pqi_scsi_dev *device;
 7256	unsigned long flags;
 7257	char *raid_level;
 7258
 7259	sdev = to_scsi_device(dev);
 7260	ctrl_info = shost_to_hba(sdev->host);
 7261
 7262	if (pqi_ctrl_offline(ctrl_info))
 7263		return -ENODEV;
 7264
 7265	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7266
 7267	device = sdev->hostdata;
 7268	if (!device) {
 7269		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7270		return -ENODEV;
 7271	}
 7272
 7273	if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
 7274		raid_level = pqi_raid_level_to_string(device->raid_level);
 7275	else
 7276		raid_level = "N/A";
 7277
 7278	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7279
 7280	return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
 7281}
 7282
 7283static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
 7284	struct device_attribute *attr, char *buffer)
 7285{
 7286	struct pqi_ctrl_info *ctrl_info;
 7287	struct scsi_device *sdev;
 7288	struct pqi_scsi_dev *device;
 7289	unsigned long flags;
 7290	int raid_bypass_cnt;
 7291
 7292	sdev = to_scsi_device(dev);
 7293	ctrl_info = shost_to_hba(sdev->host);
 7294
 7295	if (pqi_ctrl_offline(ctrl_info))
 7296		return -ENODEV;
 7297
 7298	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7299
 7300	device = sdev->hostdata;
 7301	if (!device) {
 7302		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7303		return -ENODEV;
 7304	}
 7305
 7306	raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
 7307
 7308	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7309
 7310	return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
 7311}
 7312
 7313static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
 7314		struct device_attribute *attr, char *buf)
 7315{
 7316	struct pqi_ctrl_info *ctrl_info;
 7317	struct scsi_device *sdev;
 7318	struct pqi_scsi_dev *device;
 7319	unsigned long flags;
 7320	int output_len = 0;
 7321
 7322	sdev = to_scsi_device(dev);
 7323	ctrl_info = shost_to_hba(sdev->host);
 7324
 7325	if (pqi_ctrl_offline(ctrl_info))
 7326		return -ENODEV;
 7327
 7328	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7329
 7330	device = sdev->hostdata;
 7331	if (!device) {
 7332		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7333		return -ENODEV;
 7334	}
 7335
 7336	output_len = snprintf(buf, PAGE_SIZE, "%d\n",
 7337				device->ncq_prio_enable);
 7338	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7339
 7340	return output_len;
 7341}
 7342
 7343static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
 7344			struct device_attribute *attr,
 7345			const char *buf, size_t count)
 7346{
 7347	struct pqi_ctrl_info *ctrl_info;
 7348	struct scsi_device *sdev;
 7349	struct pqi_scsi_dev *device;
 7350	unsigned long flags;
 7351	u8 ncq_prio_enable = 0;
 7352
 7353	if (kstrtou8(buf, 0, &ncq_prio_enable))
 7354		return -EINVAL;
 7355
 7356	sdev = to_scsi_device(dev);
 7357	ctrl_info = shost_to_hba(sdev->host);
 7358
 7359	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 7360
 7361	device = sdev->hostdata;
 7362
 7363	if (!device) {
 7364		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7365		return -ENODEV;
 7366	}
 7367
 7368	if (!device->ncq_prio_support ||
 7369		!device->is_physical_device) {
 7370		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7371		return -EINVAL;
 7372	}
 7373
 7374	device->ncq_prio_enable = ncq_prio_enable;
 7375
 7376	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 7377
 7378	return  strlen(buf);
 7379}
 7380
 7381static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
 7382static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
 7383static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
 7384static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
 7385static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
 7386static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
 7387static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
 7388static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
 7389		pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
 7390
 7391static struct attribute *pqi_sdev_attrs[] = {
 7392	&dev_attr_lunid.attr,
 7393	&dev_attr_unique_id.attr,
 7394	&dev_attr_path_info.attr,
 7395	&dev_attr_sas_address.attr,
 7396	&dev_attr_ssd_smart_path_enabled.attr,
 7397	&dev_attr_raid_level.attr,
 7398	&dev_attr_raid_bypass_cnt.attr,
 7399	&dev_attr_sas_ncq_prio_enable.attr,
 7400	NULL
 7401};
 7402
 7403ATTRIBUTE_GROUPS(pqi_sdev);
 7404
 7405static struct scsi_host_template pqi_driver_template = {
 7406	.module = THIS_MODULE,
 7407	.name = DRIVER_NAME_SHORT,
 7408	.proc_name = DRIVER_NAME_SHORT,
 7409	.queuecommand = pqi_scsi_queue_command,
 7410	.scan_start = pqi_scan_start,
 7411	.scan_finished = pqi_scan_finished,
 7412	.this_id = -1,
 7413	.eh_device_reset_handler = pqi_eh_device_reset_handler,
 7414	.ioctl = pqi_ioctl,
 7415	.slave_alloc = pqi_slave_alloc,
 7416	.slave_configure = pqi_slave_configure,
 7417	.slave_destroy = pqi_slave_destroy,
 7418	.map_queues = pqi_map_queues,
 7419	.sdev_groups = pqi_sdev_groups,
 7420	.shost_groups = pqi_shost_groups,
 7421	.cmd_size = sizeof(struct pqi_cmd_priv),
 7422};
 7423
 7424static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
 7425{
 7426	int rc;
 7427	struct Scsi_Host *shost;
 7428
 7429	shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
 7430	if (!shost) {
 7431		dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
 7432		return -ENOMEM;
 7433	}
 7434
 7435	shost->io_port = 0;
 7436	shost->n_io_port = 0;
 7437	shost->this_id = -1;
 7438	shost->max_channel = PQI_MAX_BUS;
 7439	shost->max_cmd_len = MAX_COMMAND_SIZE;
 7440	shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
 7441	shost->max_id = ~0;
 7442	shost->max_sectors = ctrl_info->max_sectors;
 7443	shost->can_queue = ctrl_info->scsi_ml_can_queue;
 7444	shost->cmd_per_lun = shost->can_queue;
 7445	shost->sg_tablesize = ctrl_info->sg_tablesize;
 7446	shost->transportt = pqi_sas_transport_template;
 7447	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
 7448	shost->unique_id = shost->irq;
 7449	shost->nr_hw_queues = ctrl_info->num_queue_groups;
 7450	shost->host_tagset = 1;
 7451	shost->hostdata[0] = (unsigned long)ctrl_info;
 7452
 7453	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
 7454	if (rc) {
 7455		dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
 7456		goto free_host;
 7457	}
 7458
 7459	rc = pqi_add_sas_host(shost, ctrl_info);
 7460	if (rc) {
 7461		dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
 7462		goto remove_host;
 7463	}
 7464
 7465	ctrl_info->scsi_host = shost;
 7466
 7467	return 0;
 7468
 7469remove_host:
 7470	scsi_remove_host(shost);
 7471free_host:
 7472	scsi_host_put(shost);
 7473
 7474	return rc;
 7475}
 7476
 7477static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
 7478{
 7479	struct Scsi_Host *shost;
 7480
 7481	pqi_delete_sas_host(ctrl_info);
 7482
 7483	shost = ctrl_info->scsi_host;
 7484	if (!shost)
 7485		return;
 7486
 7487	scsi_remove_host(shost);
 7488	scsi_host_put(shost);
 7489}
 7490
 7491static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
 7492{
 7493	int rc = 0;
 7494	struct pqi_device_registers __iomem *pqi_registers;
 7495	unsigned long timeout;
 7496	unsigned int timeout_msecs;
 7497	union pqi_reset_register reset_reg;
 7498
 7499	pqi_registers = ctrl_info->pqi_registers;
 7500	timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
 7501	timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
 7502
 7503	while (1) {
 7504		msleep(PQI_RESET_POLL_INTERVAL_MSECS);
 7505		reset_reg.all_bits = readl(&pqi_registers->device_reset);
 7506		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
 7507			break;
 7508		if (!sis_is_firmware_running(ctrl_info)) {
 7509			rc = -ENXIO;
 7510			break;
 7511		}
 7512		if (time_after(jiffies, timeout)) {
 7513			rc = -ETIMEDOUT;
 7514			break;
 7515		}
 7516	}
 7517
 7518	return rc;
 7519}
 7520
 7521static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
 7522{
 7523	int rc;
 7524	union pqi_reset_register reset_reg;
 7525
 7526	if (ctrl_info->pqi_reset_quiesce_supported) {
 7527		rc = sis_pqi_reset_quiesce(ctrl_info);
 7528		if (rc) {
 7529			dev_err(&ctrl_info->pci_dev->dev,
 7530				"PQI reset failed during quiesce with error %d\n", rc);
 7531			return rc;
 7532		}
 7533	}
 7534
 7535	reset_reg.all_bits = 0;
 7536	reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
 7537	reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
 7538
 7539	writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
 7540
 7541	rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
 7542	if (rc)
 7543		dev_err(&ctrl_info->pci_dev->dev,
 7544			"PQI reset failed with error %d\n", rc);
 7545
 7546	return rc;
 7547}
 7548
 7549static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
 7550{
 7551	int rc;
 7552	struct bmic_sense_subsystem_info *sense_info;
 7553
 7554	sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
 7555	if (!sense_info)
 7556		return -ENOMEM;
 7557
 7558	rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
 7559	if (rc)
 7560		goto out;
 7561
 7562	memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
 7563		sizeof(sense_info->ctrl_serial_number));
 7564	ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
 7565
 7566out:
 7567	kfree(sense_info);
 7568
 7569	return rc;
 7570}
 7571
 7572static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
 7573{
 7574	int rc;
 7575	struct bmic_identify_controller *identify;
 7576
 7577	identify = kmalloc(sizeof(*identify), GFP_KERNEL);
 7578	if (!identify)
 7579		return -ENOMEM;
 7580
 7581	rc = pqi_identify_controller(ctrl_info, identify);
 7582	if (rc)
 7583		goto out;
 7584
 7585	if (get_unaligned_le32(&identify->extra_controller_flags) &
 7586		BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
 7587		memcpy(ctrl_info->firmware_version,
 7588			identify->firmware_version_long,
 7589			sizeof(identify->firmware_version_long));
 7590	} else {
 7591		memcpy(ctrl_info->firmware_version,
 7592			identify->firmware_version_short,
 7593			sizeof(identify->firmware_version_short));
 7594		ctrl_info->firmware_version
 7595			[sizeof(identify->firmware_version_short)] = '\0';
 7596		snprintf(ctrl_info->firmware_version +
 7597			strlen(ctrl_info->firmware_version),
 7598			sizeof(ctrl_info->firmware_version) -
 7599			sizeof(identify->firmware_version_short),
 7600			"-%u",
 7601			get_unaligned_le16(&identify->firmware_build_number));
 7602	}
 7603
 7604	memcpy(ctrl_info->model, identify->product_id,
 7605		sizeof(identify->product_id));
 7606	ctrl_info->model[sizeof(identify->product_id)] = '\0';
 7607
 7608	memcpy(ctrl_info->vendor, identify->vendor_id,
 7609		sizeof(identify->vendor_id));
 7610	ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
 7611
 7612	dev_info(&ctrl_info->pci_dev->dev,
 7613		"Firmware version: %s\n", ctrl_info->firmware_version);
 7614
 7615out:
 7616	kfree(identify);
 7617
 7618	return rc;
 7619}
 7620
 7621struct pqi_config_table_section_info {
 7622	struct pqi_ctrl_info *ctrl_info;
 7623	void		*section;
 7624	u32		section_offset;
 7625	void __iomem	*section_iomem_addr;
 7626};
 7627
 7628static inline bool pqi_is_firmware_feature_supported(
 7629	struct pqi_config_table_firmware_features *firmware_features,
 7630	unsigned int bit_position)
 7631{
 7632	unsigned int byte_index;
 7633
 7634	byte_index = bit_position / BITS_PER_BYTE;
 7635
 7636	if (byte_index >= le16_to_cpu(firmware_features->num_elements))
 7637		return false;
 7638
 7639	return firmware_features->features_supported[byte_index] &
 7640		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
 7641}
 7642
 7643static inline bool pqi_is_firmware_feature_enabled(
 7644	struct pqi_config_table_firmware_features *firmware_features,
 7645	void __iomem *firmware_features_iomem_addr,
 7646	unsigned int bit_position)
 7647{
 7648	unsigned int byte_index;
 7649	u8 __iomem *features_enabled_iomem_addr;
 7650
 7651	byte_index = (bit_position / BITS_PER_BYTE) +
 7652		(le16_to_cpu(firmware_features->num_elements) * 2);
 7653
 7654	features_enabled_iomem_addr = firmware_features_iomem_addr +
 7655		offsetof(struct pqi_config_table_firmware_features,
 7656			features_supported) + byte_index;
 7657
 7658	return *((__force u8 *)features_enabled_iomem_addr) &
 7659		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
 7660}
 7661
 7662static inline void pqi_request_firmware_feature(
 7663	struct pqi_config_table_firmware_features *firmware_features,
 7664	unsigned int bit_position)
 7665{
 7666	unsigned int byte_index;
 7667
 7668	byte_index = (bit_position / BITS_PER_BYTE) +
 7669		le16_to_cpu(firmware_features->num_elements);
 7670
 7671	firmware_features->features_supported[byte_index] |=
 7672		(1 << (bit_position % BITS_PER_BYTE));
 7673}
 7674
 7675static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
 7676	u16 first_section, u16 last_section)
 7677{
 7678	struct pqi_vendor_general_request request;
 7679
 7680	memset(&request, 0, sizeof(request));
 7681
 7682	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
 7683	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
 7684		&request.header.iu_length);
 7685	put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
 7686		&request.function_code);
 7687	put_unaligned_le16(first_section,
 7688		&request.data.config_table_update.first_section);
 7689	put_unaligned_le16(last_section,
 7690		&request.data.config_table_update.last_section);
 7691
 7692	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 7693}
 7694
 7695static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
 7696	struct pqi_config_table_firmware_features *firmware_features,
 7697	void __iomem *firmware_features_iomem_addr)
 7698{
 7699	void *features_requested;
 7700	void __iomem *features_requested_iomem_addr;
 7701	void __iomem *host_max_known_feature_iomem_addr;
 7702
 7703	features_requested = firmware_features->features_supported +
 7704		le16_to_cpu(firmware_features->num_elements);
 7705
 7706	features_requested_iomem_addr = firmware_features_iomem_addr +
 7707		(features_requested - (void *)firmware_features);
 7708
 7709	memcpy_toio(features_requested_iomem_addr, features_requested,
 7710		le16_to_cpu(firmware_features->num_elements));
 7711
 7712	if (pqi_is_firmware_feature_supported(firmware_features,
 7713		PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
 7714		host_max_known_feature_iomem_addr =
 7715			features_requested_iomem_addr +
 7716			(le16_to_cpu(firmware_features->num_elements) * 2) +
 7717			sizeof(__le16);
 7718		writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
 7719			host_max_known_feature_iomem_addr);
 7720	}
 7721
 7722	return pqi_config_table_update(ctrl_info,
 7723		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
 7724		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
 7725}
 7726
 7727struct pqi_firmware_feature {
 7728	char		*feature_name;
 7729	unsigned int	feature_bit;
 7730	bool		supported;
 7731	bool		enabled;
 7732	void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
 7733		struct pqi_firmware_feature *firmware_feature);
 7734};
 7735
 7736static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
 7737	struct pqi_firmware_feature *firmware_feature)
 7738{
 7739	if (!firmware_feature->supported) {
 7740		dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
 7741			firmware_feature->feature_name);
 7742		return;
 7743	}
 7744
 7745	if (firmware_feature->enabled) {
 7746		dev_info(&ctrl_info->pci_dev->dev,
 7747			"%s enabled\n", firmware_feature->feature_name);
 7748		return;
 7749	}
 7750
 7751	dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
 7752		firmware_feature->feature_name);
 7753}
 7754
 7755static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
 7756	struct pqi_firmware_feature *firmware_feature)
 7757{
 7758	switch (firmware_feature->feature_bit) {
 7759	case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
 7760		ctrl_info->enable_r1_writes = firmware_feature->enabled;
 7761		break;
 7762	case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
 7763		ctrl_info->enable_r5_writes = firmware_feature->enabled;
 7764		break;
 7765	case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
 7766		ctrl_info->enable_r6_writes = firmware_feature->enabled;
 7767		break;
 7768	case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
 7769		ctrl_info->soft_reset_handshake_supported =
 7770			firmware_feature->enabled &&
 7771			pqi_read_soft_reset_status(ctrl_info);
 7772		break;
 7773	case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
 7774		ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
 7775		break;
 7776	case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
 7777		ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
 7778		break;
 7779	case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
 7780		ctrl_info->firmware_triage_supported = firmware_feature->enabled;
 7781		pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
 7782		break;
 7783	case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
 7784		ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
 7785		break;
 7786	case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
 7787		ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
 7788		break;
 7789	}
 7790
 7791	pqi_firmware_feature_status(ctrl_info, firmware_feature);
 7792}
 7793
 7794static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
 7795	struct pqi_firmware_feature *firmware_feature)
 7796{
 7797	if (firmware_feature->feature_status)
 7798		firmware_feature->feature_status(ctrl_info, firmware_feature);
 7799}
 7800
 7801static DEFINE_MUTEX(pqi_firmware_features_mutex);
 7802
 7803static struct pqi_firmware_feature pqi_firmware_features[] = {
 7804	{
 7805		.feature_name = "Online Firmware Activation",
 7806		.feature_bit = PQI_FIRMWARE_FEATURE_OFA,
 7807		.feature_status = pqi_firmware_feature_status,
 7808	},
 7809	{
 7810		.feature_name = "Serial Management Protocol",
 7811		.feature_bit = PQI_FIRMWARE_FEATURE_SMP,
 7812		.feature_status = pqi_firmware_feature_status,
 7813	},
 7814	{
 7815		.feature_name = "Maximum Known Feature",
 7816		.feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
 7817		.feature_status = pqi_firmware_feature_status,
 7818	},
 7819	{
 7820		.feature_name = "RAID 0 Read Bypass",
 7821		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
 7822		.feature_status = pqi_firmware_feature_status,
 7823	},
 7824	{
 7825		.feature_name = "RAID 1 Read Bypass",
 7826		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
 7827		.feature_status = pqi_firmware_feature_status,
 7828	},
 7829	{
 7830		.feature_name = "RAID 5 Read Bypass",
 7831		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
 7832		.feature_status = pqi_firmware_feature_status,
 7833	},
 7834	{
 7835		.feature_name = "RAID 6 Read Bypass",
 7836		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
 7837		.feature_status = pqi_firmware_feature_status,
 7838	},
 7839	{
 7840		.feature_name = "RAID 0 Write Bypass",
 7841		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
 7842		.feature_status = pqi_firmware_feature_status,
 7843	},
 7844	{
 7845		.feature_name = "RAID 1 Write Bypass",
 7846		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
 7847		.feature_status = pqi_ctrl_update_feature_flags,
 7848	},
 7849	{
 7850		.feature_name = "RAID 5 Write Bypass",
 7851		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
 7852		.feature_status = pqi_ctrl_update_feature_flags,
 7853	},
 7854	{
 7855		.feature_name = "RAID 6 Write Bypass",
 7856		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
 7857		.feature_status = pqi_ctrl_update_feature_flags,
 7858	},
 7859	{
 7860		.feature_name = "New Soft Reset Handshake",
 7861		.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
 7862		.feature_status = pqi_ctrl_update_feature_flags,
 7863	},
 7864	{
 7865		.feature_name = "RAID IU Timeout",
 7866		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
 7867		.feature_status = pqi_ctrl_update_feature_flags,
 7868	},
 7869	{
 7870		.feature_name = "TMF IU Timeout",
 7871		.feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
 7872		.feature_status = pqi_ctrl_update_feature_flags,
 7873	},
 7874	{
 7875		.feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
 7876		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
 7877		.feature_status = pqi_firmware_feature_status,
 7878	},
 7879	{
 7880		.feature_name = "Firmware Triage",
 7881		.feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
 7882		.feature_status = pqi_ctrl_update_feature_flags,
 7883	},
 7884	{
 7885		.feature_name = "RPL Extended Formats 4 and 5",
 7886		.feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
 7887		.feature_status = pqi_ctrl_update_feature_flags,
 7888	},
 7889	{
 7890		.feature_name = "Multi-LUN Target",
 7891		.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
 7892		.feature_status = pqi_ctrl_update_feature_flags,
 7893	},
 7894};
 7895
 7896static void pqi_process_firmware_features(
 7897	struct pqi_config_table_section_info *section_info)
 7898{
 7899	int rc;
 7900	struct pqi_ctrl_info *ctrl_info;
 7901	struct pqi_config_table_firmware_features *firmware_features;
 7902	void __iomem *firmware_features_iomem_addr;
 7903	unsigned int i;
 7904	unsigned int num_features_supported;
 7905
 7906	ctrl_info = section_info->ctrl_info;
 7907	firmware_features = section_info->section;
 7908	firmware_features_iomem_addr = section_info->section_iomem_addr;
 7909
 7910	for (i = 0, num_features_supported = 0;
 7911		i < ARRAY_SIZE(pqi_firmware_features); i++) {
 7912		if (pqi_is_firmware_feature_supported(firmware_features,
 7913			pqi_firmware_features[i].feature_bit)) {
 7914			pqi_firmware_features[i].supported = true;
 7915			num_features_supported++;
 7916		} else {
 7917			pqi_firmware_feature_update(ctrl_info,
 7918				&pqi_firmware_features[i]);
 7919		}
 7920	}
 7921
 7922	if (num_features_supported == 0)
 7923		return;
 7924
 7925	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
 7926		if (!pqi_firmware_features[i].supported)
 7927			continue;
 7928		pqi_request_firmware_feature(firmware_features,
 7929			pqi_firmware_features[i].feature_bit);
 7930	}
 7931
 7932	rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
 7933		firmware_features_iomem_addr);
 7934	if (rc) {
 7935		dev_err(&ctrl_info->pci_dev->dev,
 7936			"failed to enable firmware features in PQI configuration table\n");
 7937		for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
 7938			if (!pqi_firmware_features[i].supported)
 7939				continue;
 7940			pqi_firmware_feature_update(ctrl_info,
 7941				&pqi_firmware_features[i]);
 7942		}
 7943		return;
 7944	}
 7945
 7946	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
 7947		if (!pqi_firmware_features[i].supported)
 7948			continue;
 7949		if (pqi_is_firmware_feature_enabled(firmware_features,
 7950			firmware_features_iomem_addr,
 7951			pqi_firmware_features[i].feature_bit)) {
 7952				pqi_firmware_features[i].enabled = true;
 7953		}
 7954		pqi_firmware_feature_update(ctrl_info,
 7955			&pqi_firmware_features[i]);
 7956	}
 7957}
 7958
 7959static void pqi_init_firmware_features(void)
 7960{
 7961	unsigned int i;
 7962
 7963	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
 7964		pqi_firmware_features[i].supported = false;
 7965		pqi_firmware_features[i].enabled = false;
 7966	}
 7967}
 7968
 7969static void pqi_process_firmware_features_section(
 7970	struct pqi_config_table_section_info *section_info)
 7971{
 7972	mutex_lock(&pqi_firmware_features_mutex);
 7973	pqi_init_firmware_features();
 7974	pqi_process_firmware_features(section_info);
 7975	mutex_unlock(&pqi_firmware_features_mutex);
 7976}
 7977
 7978/*
 7979 * Reset all controller settings that can be initialized during the processing
 7980 * of the PQI Configuration Table.
 7981 */
 7982
 7983static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
 7984{
 7985	ctrl_info->heartbeat_counter = NULL;
 7986	ctrl_info->soft_reset_status = NULL;
 7987	ctrl_info->soft_reset_handshake_supported = false;
 7988	ctrl_info->enable_r1_writes = false;
 7989	ctrl_info->enable_r5_writes = false;
 7990	ctrl_info->enable_r6_writes = false;
 7991	ctrl_info->raid_iu_timeout_supported = false;
 7992	ctrl_info->tmf_iu_timeout_supported = false;
 7993	ctrl_info->firmware_triage_supported = false;
 7994	ctrl_info->rpl_extended_format_4_5_supported = false;
 7995	ctrl_info->multi_lun_device_supported = false;
 7996}
 7997
 7998static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
 7999{
 8000	u32 table_length;
 8001	u32 section_offset;
 8002	bool firmware_feature_section_present;
 8003	void __iomem *table_iomem_addr;
 8004	struct pqi_config_table *config_table;
 8005	struct pqi_config_table_section_header *section;
 8006	struct pqi_config_table_section_info section_info;
 8007	struct pqi_config_table_section_info feature_section_info = {0};
 8008
 8009	table_length = ctrl_info->config_table_length;
 8010	if (table_length == 0)
 8011		return 0;
 8012
 8013	config_table = kmalloc(table_length, GFP_KERNEL);
 8014	if (!config_table) {
 8015		dev_err(&ctrl_info->pci_dev->dev,
 8016			"failed to allocate memory for PQI configuration table\n");
 8017		return -ENOMEM;
 8018	}
 8019
 8020	/*
 8021	 * Copy the config table contents from I/O memory space into the
 8022	 * temporary buffer.
 8023	 */
 8024	table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
 8025	memcpy_fromio(config_table, table_iomem_addr, table_length);
 8026
 8027	firmware_feature_section_present = false;
 8028	section_info.ctrl_info = ctrl_info;
 8029	section_offset = get_unaligned_le32(&config_table->first_section_offset);
 8030
 8031	while (section_offset) {
 8032		section = (void *)config_table + section_offset;
 8033
 8034		section_info.section = section;
 8035		section_info.section_offset = section_offset;
 8036		section_info.section_iomem_addr = table_iomem_addr + section_offset;
 8037
 8038		switch (get_unaligned_le16(&section->section_id)) {
 8039		case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
 8040			firmware_feature_section_present = true;
 8041			feature_section_info = section_info;
 8042			break;
 8043		case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
 8044			if (pqi_disable_heartbeat)
 8045				dev_warn(&ctrl_info->pci_dev->dev,
 8046				"heartbeat disabled by module parameter\n");
 8047			else
 8048				ctrl_info->heartbeat_counter =
 8049					table_iomem_addr +
 8050					section_offset +
 8051					offsetof(struct pqi_config_table_heartbeat,
 8052						heartbeat_counter);
 8053			break;
 8054		case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
 8055			ctrl_info->soft_reset_status =
 8056				table_iomem_addr +
 8057				section_offset +
 8058				offsetof(struct pqi_config_table_soft_reset,
 8059					soft_reset_status);
 8060			break;
 8061		}
 8062
 8063		section_offset = get_unaligned_le16(&section->next_section_offset);
 8064	}
 8065
 8066	/*
 8067	 * We process the firmware feature section after all other sections
 8068	 * have been processed so that the feature bit callbacks can take
 8069	 * into account the settings configured by other sections.
 8070	 */
 8071	if (firmware_feature_section_present)
 8072		pqi_process_firmware_features_section(&feature_section_info);
 8073
 8074	kfree(config_table);
 8075
 8076	return 0;
 8077}
 8078
 8079/* Switches the controller from PQI mode back into SIS mode. */
 8080
 8081static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
 8082{
 8083	int rc;
 8084
 8085	pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
 8086	rc = pqi_reset(ctrl_info);
 8087	if (rc)
 8088		return rc;
 8089	rc = sis_reenable_sis_mode(ctrl_info);
 8090	if (rc) {
 8091		dev_err(&ctrl_info->pci_dev->dev,
 8092			"re-enabling SIS mode failed with error %d\n", rc);
 8093		return rc;
 8094	}
 8095	pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
 8096
 8097	return 0;
 8098}
 8099
 8100/*
 8101 * If the controller isn't already in SIS mode, this function forces it into
 8102 * SIS mode.
 8103 */
 8104
 8105static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
 8106{
 8107	if (!sis_is_firmware_running(ctrl_info))
 8108		return -ENXIO;
 8109
 8110	if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
 8111		return 0;
 8112
 8113	if (sis_is_kernel_up(ctrl_info)) {
 8114		pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
 8115		return 0;
 8116	}
 8117
 8118	return pqi_revert_to_sis_mode(ctrl_info);
 8119}
 8120
 8121static void pqi_perform_lockup_action(void)
 8122{
 8123	switch (pqi_lockup_action) {
 8124	case PANIC:
 8125		panic("FATAL: Smart Family Controller lockup detected");
 8126		break;
 8127	case REBOOT:
 8128		emergency_restart();
 8129		break;
 8130	case NONE:
 8131	default:
 8132		break;
 8133	}
 8134}
 8135
 8136static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
 8137{
 8138	int rc;
 8139	u32 product_id;
 8140
 8141	if (reset_devices) {
 8142		if (pqi_is_fw_triage_supported(ctrl_info)) {
 8143			rc = sis_wait_for_fw_triage_completion(ctrl_info);
 8144			if (rc)
 8145				return rc;
 8146		}
 8147		sis_soft_reset(ctrl_info);
 8148		ssleep(PQI_POST_RESET_DELAY_SECS);
 8149	} else {
 8150		rc = pqi_force_sis_mode(ctrl_info);
 8151		if (rc)
 8152			return rc;
 8153	}
 8154
 8155	/*
 8156	 * Wait until the controller is ready to start accepting SIS
 8157	 * commands.
 8158	 */
 8159	rc = sis_wait_for_ctrl_ready(ctrl_info);
 8160	if (rc) {
 8161		if (reset_devices) {
 8162			dev_err(&ctrl_info->pci_dev->dev,
 8163				"kdump init failed with error %d\n", rc);
 8164			pqi_lockup_action = REBOOT;
 8165			pqi_perform_lockup_action();
 8166		}
 8167		return rc;
 8168	}
 8169
 8170	/*
 8171	 * Get the controller properties.  This allows us to determine
 8172	 * whether or not it supports PQI mode.
 8173	 */
 8174	rc = sis_get_ctrl_properties(ctrl_info);
 8175	if (rc) {
 8176		dev_err(&ctrl_info->pci_dev->dev,
 8177			"error obtaining controller properties\n");
 8178		return rc;
 8179	}
 8180
 8181	rc = sis_get_pqi_capabilities(ctrl_info);
 8182	if (rc) {
 8183		dev_err(&ctrl_info->pci_dev->dev,
 8184			"error obtaining controller capabilities\n");
 8185		return rc;
 8186	}
 8187
 8188	product_id = sis_get_product_id(ctrl_info);
 8189	ctrl_info->product_id = (u8)product_id;
 8190	ctrl_info->product_revision = (u8)(product_id >> 8);
 8191
 8192	if (reset_devices) {
 8193		if (ctrl_info->max_outstanding_requests >
 8194			PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
 8195				ctrl_info->max_outstanding_requests =
 8196					PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
 8197	} else {
 8198		if (ctrl_info->max_outstanding_requests >
 8199			PQI_MAX_OUTSTANDING_REQUESTS)
 8200				ctrl_info->max_outstanding_requests =
 8201					PQI_MAX_OUTSTANDING_REQUESTS;
 8202	}
 8203
 8204	pqi_calculate_io_resources(ctrl_info);
 8205
 8206	rc = pqi_alloc_error_buffer(ctrl_info);
 8207	if (rc) {
 8208		dev_err(&ctrl_info->pci_dev->dev,
 8209			"failed to allocate PQI error buffer\n");
 8210		return rc;
 8211	}
 8212
 8213	/*
 8214	 * If the function we are about to call succeeds, the
 8215	 * controller will transition from legacy SIS mode
 8216	 * into PQI mode.
 8217	 */
 8218	rc = sis_init_base_struct_addr(ctrl_info);
 8219	if (rc) {
 8220		dev_err(&ctrl_info->pci_dev->dev,
 8221			"error initializing PQI mode\n");
 8222		return rc;
 8223	}
 8224
 8225	/* Wait for the controller to complete the SIS -> PQI transition. */
 8226	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
 8227	if (rc) {
 8228		dev_err(&ctrl_info->pci_dev->dev,
 8229			"transition to PQI mode failed\n");
 8230		return rc;
 8231	}
 8232
 8233	/* From here on, we are running in PQI mode. */
 8234	ctrl_info->pqi_mode_enabled = true;
 8235	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
 8236
 8237	rc = pqi_alloc_admin_queues(ctrl_info);
 8238	if (rc) {
 8239		dev_err(&ctrl_info->pci_dev->dev,
 8240			"failed to allocate admin queues\n");
 8241		return rc;
 8242	}
 8243
 8244	rc = pqi_create_admin_queues(ctrl_info);
 8245	if (rc) {
 8246		dev_err(&ctrl_info->pci_dev->dev,
 8247			"error creating admin queues\n");
 8248		return rc;
 8249	}
 8250
 8251	rc = pqi_report_device_capability(ctrl_info);
 8252	if (rc) {
 8253		dev_err(&ctrl_info->pci_dev->dev,
 8254			"obtaining device capability failed\n");
 8255		return rc;
 8256	}
 8257
 8258	rc = pqi_validate_device_capability(ctrl_info);
 8259	if (rc)
 8260		return rc;
 8261
 8262	pqi_calculate_queue_resources(ctrl_info);
 8263
 8264	rc = pqi_enable_msix_interrupts(ctrl_info);
 8265	if (rc)
 8266		return rc;
 8267
 8268	if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
 8269		ctrl_info->max_msix_vectors =
 8270			ctrl_info->num_msix_vectors_enabled;
 8271		pqi_calculate_queue_resources(ctrl_info);
 8272	}
 8273
 8274	rc = pqi_alloc_io_resources(ctrl_info);
 8275	if (rc)
 8276		return rc;
 8277
 8278	rc = pqi_alloc_operational_queues(ctrl_info);
 8279	if (rc) {
 8280		dev_err(&ctrl_info->pci_dev->dev,
 8281			"failed to allocate operational queues\n");
 8282		return rc;
 8283	}
 8284
 8285	pqi_init_operational_queues(ctrl_info);
 8286
 8287	rc = pqi_create_queues(ctrl_info);
 8288	if (rc)
 8289		return rc;
 8290
 8291	rc = pqi_request_irqs(ctrl_info);
 8292	if (rc)
 8293		return rc;
 8294
 8295	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
 8296
 8297	ctrl_info->controller_online = true;
 8298
 8299	rc = pqi_process_config_table(ctrl_info);
 8300	if (rc)
 8301		return rc;
 8302
 8303	pqi_start_heartbeat_timer(ctrl_info);
 8304
 8305	if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
 8306		rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
 8307		if (rc) { /* Supported features not returned correctly. */
 8308			dev_err(&ctrl_info->pci_dev->dev,
 8309				"error obtaining advanced RAID bypass configuration\n");
 8310			return rc;
 8311		}
 8312		ctrl_info->ciss_report_log_flags |=
 8313			CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
 8314	}
 8315
 8316	rc = pqi_enable_events(ctrl_info);
 8317	if (rc) {
 8318		dev_err(&ctrl_info->pci_dev->dev,
 8319			"error enabling events\n");
 8320		return rc;
 8321	}
 8322
 8323	/* Register with the SCSI subsystem. */
 8324	rc = pqi_register_scsi(ctrl_info);
 8325	if (rc)
 8326		return rc;
 8327
 8328	rc = pqi_get_ctrl_product_details(ctrl_info);
 8329	if (rc) {
 8330		dev_err(&ctrl_info->pci_dev->dev,
 8331			"error obtaining product details\n");
 8332		return rc;
 8333	}
 8334
 8335	rc = pqi_get_ctrl_serial_number(ctrl_info);
 8336	if (rc) {
 8337		dev_err(&ctrl_info->pci_dev->dev,
 8338			"error obtaining ctrl serial number\n");
 8339		return rc;
 8340	}
 8341
 8342	rc = pqi_set_diag_rescan(ctrl_info);
 8343	if (rc) {
 8344		dev_err(&ctrl_info->pci_dev->dev,
 8345			"error enabling multi-lun rescan\n");
 8346		return rc;
 8347	}
 8348
 8349	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
 8350	if (rc) {
 8351		dev_err(&ctrl_info->pci_dev->dev,
 8352			"error updating host wellness\n");
 8353		return rc;
 8354	}
 8355
 8356	pqi_schedule_update_time_worker(ctrl_info);
 8357
 8358	pqi_scan_scsi_devices(ctrl_info);
 8359
 8360	return 0;
 8361}
 8362
 8363static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
 8364{
 8365	unsigned int i;
 8366	struct pqi_admin_queues *admin_queues;
 8367	struct pqi_event_queue *event_queue;
 8368
 8369	admin_queues = &ctrl_info->admin_queues;
 8370	admin_queues->iq_pi_copy = 0;
 8371	admin_queues->oq_ci_copy = 0;
 8372	writel(0, admin_queues->oq_pi);
 8373
 8374	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
 8375		ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
 8376		ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
 8377		ctrl_info->queue_groups[i].oq_ci_copy = 0;
 8378
 8379		writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
 8380		writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
 8381		writel(0, ctrl_info->queue_groups[i].oq_pi);
 8382	}
 8383
 8384	event_queue = &ctrl_info->event_queue;
 8385	writel(0, event_queue->oq_pi);
 8386	event_queue->oq_ci_copy = 0;
 8387}
 8388
 8389static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
 8390{
 8391	int rc;
 8392
 8393	rc = pqi_force_sis_mode(ctrl_info);
 8394	if (rc)
 8395		return rc;
 8396
 8397	/*
 8398	 * Wait until the controller is ready to start accepting SIS
 8399	 * commands.
 8400	 */
 8401	rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
 8402	if (rc)
 8403		return rc;
 8404
 8405	/*
 8406	 * Get the controller properties.  This allows us to determine
 8407	 * whether or not it supports PQI mode.
 8408	 */
 8409	rc = sis_get_ctrl_properties(ctrl_info);
 8410	if (rc) {
 8411		dev_err(&ctrl_info->pci_dev->dev,
 8412			"error obtaining controller properties\n");
 8413		return rc;
 8414	}
 8415
 8416	rc = sis_get_pqi_capabilities(ctrl_info);
 8417	if (rc) {
 8418		dev_err(&ctrl_info->pci_dev->dev,
 8419			"error obtaining controller capabilities\n");
 8420		return rc;
 8421	}
 8422
 8423	/*
 8424	 * If the function we are about to call succeeds, the
 8425	 * controller will transition from legacy SIS mode
 8426	 * into PQI mode.
 8427	 */
 8428	rc = sis_init_base_struct_addr(ctrl_info);
 8429	if (rc) {
 8430		dev_err(&ctrl_info->pci_dev->dev,
 8431			"error initializing PQI mode\n");
 8432		return rc;
 8433	}
 8434
 8435	/* Wait for the controller to complete the SIS -> PQI transition. */
 8436	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
 8437	if (rc) {
 8438		dev_err(&ctrl_info->pci_dev->dev,
 8439			"transition to PQI mode failed\n");
 8440		return rc;
 8441	}
 8442
 8443	/* From here on, we are running in PQI mode. */
 8444	ctrl_info->pqi_mode_enabled = true;
 8445	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
 8446
 8447	pqi_reinit_queues(ctrl_info);
 8448
 8449	rc = pqi_create_admin_queues(ctrl_info);
 8450	if (rc) {
 8451		dev_err(&ctrl_info->pci_dev->dev,
 8452			"error creating admin queues\n");
 8453		return rc;
 8454	}
 8455
 8456	rc = pqi_create_queues(ctrl_info);
 8457	if (rc)
 8458		return rc;
 8459
 8460	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
 8461
 8462	ctrl_info->controller_online = true;
 8463	pqi_ctrl_unblock_requests(ctrl_info);
 8464
 8465	pqi_ctrl_reset_config(ctrl_info);
 8466
 8467	rc = pqi_process_config_table(ctrl_info);
 8468	if (rc)
 8469		return rc;
 8470
 8471	pqi_start_heartbeat_timer(ctrl_info);
 8472
 8473	if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
 8474		rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
 8475		if (rc) {
 8476			dev_err(&ctrl_info->pci_dev->dev,
 8477				"error obtaining advanced RAID bypass configuration\n");
 8478			return rc;
 8479		}
 8480		ctrl_info->ciss_report_log_flags |=
 8481			CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
 8482	}
 8483
 8484	rc = pqi_enable_events(ctrl_info);
 8485	if (rc) {
 8486		dev_err(&ctrl_info->pci_dev->dev,
 8487			"error enabling events\n");
 8488		return rc;
 8489	}
 8490
 8491	rc = pqi_get_ctrl_product_details(ctrl_info);
 8492	if (rc) {
 8493		dev_err(&ctrl_info->pci_dev->dev,
 8494			"error obtaining product details\n");
 8495		return rc;
 8496	}
 8497
 8498	rc = pqi_set_diag_rescan(ctrl_info);
 8499	if (rc) {
 8500		dev_err(&ctrl_info->pci_dev->dev,
 8501			"error enabling multi-lun rescan\n");
 8502		return rc;
 8503	}
 8504
 8505	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
 8506	if (rc) {
 8507		dev_err(&ctrl_info->pci_dev->dev,
 8508			"error updating host wellness\n");
 8509		return rc;
 8510	}
 8511
 8512	if (pqi_ofa_in_progress(ctrl_info))
 8513		pqi_ctrl_unblock_scan(ctrl_info);
 8514
 8515	pqi_scan_scsi_devices(ctrl_info);
 8516
 8517	return 0;
 8518}
 8519
 8520static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
 8521{
 8522	int rc;
 8523
 8524	rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
 8525		PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
 8526
 8527	return pcibios_err_to_errno(rc);
 8528}
 8529
 8530static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
 8531{
 8532	int rc;
 8533	u64 mask;
 8534
 8535	rc = pci_enable_device(ctrl_info->pci_dev);
 8536	if (rc) {
 8537		dev_err(&ctrl_info->pci_dev->dev,
 8538			"failed to enable PCI device\n");
 8539		return rc;
 8540	}
 8541
 8542	if (sizeof(dma_addr_t) > 4)
 8543		mask = DMA_BIT_MASK(64);
 8544	else
 8545		mask = DMA_BIT_MASK(32);
 8546
 8547	rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
 8548	if (rc) {
 8549		dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
 8550		goto disable_device;
 8551	}
 8552
 8553	rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
 8554	if (rc) {
 8555		dev_err(&ctrl_info->pci_dev->dev,
 8556			"failed to obtain PCI resources\n");
 8557		goto disable_device;
 8558	}
 8559
 8560	ctrl_info->iomem_base = ioremap(pci_resource_start(
 8561		ctrl_info->pci_dev, 0),
 8562		sizeof(struct pqi_ctrl_registers));
 8563	if (!ctrl_info->iomem_base) {
 8564		dev_err(&ctrl_info->pci_dev->dev,
 8565			"failed to map memory for controller registers\n");
 8566		rc = -ENOMEM;
 8567		goto release_regions;
 8568	}
 8569
 8570#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS		0x6
 8571
 8572	/* Increase the PCIe completion timeout. */
 8573	rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
 8574		PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
 8575	if (rc) {
 8576		dev_err(&ctrl_info->pci_dev->dev,
 8577			"failed to set PCIe completion timeout\n");
 8578		goto release_regions;
 8579	}
 8580
 8581	/* Enable bus mastering. */
 8582	pci_set_master(ctrl_info->pci_dev);
 8583
 8584	ctrl_info->registers = ctrl_info->iomem_base;
 8585	ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
 8586
 8587	pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
 8588
 8589	return 0;
 8590
 8591release_regions:
 8592	pci_release_regions(ctrl_info->pci_dev);
 8593disable_device:
 8594	pci_disable_device(ctrl_info->pci_dev);
 8595
 8596	return rc;
 8597}
 8598
 8599static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
 8600{
 8601	iounmap(ctrl_info->iomem_base);
 8602	pci_release_regions(ctrl_info->pci_dev);
 8603	if (pci_is_enabled(ctrl_info->pci_dev))
 8604		pci_disable_device(ctrl_info->pci_dev);
 8605	pci_set_drvdata(ctrl_info->pci_dev, NULL);
 8606}
 8607
 8608static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
 8609{
 8610	struct pqi_ctrl_info *ctrl_info;
 8611
 8612	ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
 8613			GFP_KERNEL, numa_node);
 8614	if (!ctrl_info)
 8615		return NULL;
 8616
 8617	mutex_init(&ctrl_info->scan_mutex);
 8618	mutex_init(&ctrl_info->lun_reset_mutex);
 8619	mutex_init(&ctrl_info->ofa_mutex);
 8620
 8621	INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
 8622	spin_lock_init(&ctrl_info->scsi_device_list_lock);
 8623
 8624	INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
 8625	atomic_set(&ctrl_info->num_interrupts, 0);
 8626
 8627	INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
 8628	INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
 8629
 8630	timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
 8631	INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
 8632
 8633	INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
 8634	INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
 8635
 8636	sema_init(&ctrl_info->sync_request_sem,
 8637		PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
 8638	init_waitqueue_head(&ctrl_info->block_requests_wait);
 8639
 8640	ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
 8641	ctrl_info->irq_mode = IRQ_MODE_NONE;
 8642	ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
 8643
 8644	ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
 8645	ctrl_info->max_transfer_encrypted_sas_sata =
 8646		PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
 8647	ctrl_info->max_transfer_encrypted_nvme =
 8648		PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
 8649	ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
 8650	ctrl_info->max_write_raid_1_10_2drive = ~0;
 8651	ctrl_info->max_write_raid_1_10_3drive = ~0;
 8652	ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
 8653
 8654	return ctrl_info;
 8655}
 8656
 8657static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
 8658{
 8659	kfree(ctrl_info);
 8660}
 8661
 8662static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
 8663{
 8664	pqi_free_irqs(ctrl_info);
 8665	pqi_disable_msix_interrupts(ctrl_info);
 8666}
 8667
 8668static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
 8669{
 8670	pqi_free_interrupts(ctrl_info);
 8671	if (ctrl_info->queue_memory_base)
 8672		dma_free_coherent(&ctrl_info->pci_dev->dev,
 8673			ctrl_info->queue_memory_length,
 8674			ctrl_info->queue_memory_base,
 8675			ctrl_info->queue_memory_base_dma_handle);
 8676	if (ctrl_info->admin_queue_memory_base)
 8677		dma_free_coherent(&ctrl_info->pci_dev->dev,
 8678			ctrl_info->admin_queue_memory_length,
 8679			ctrl_info->admin_queue_memory_base,
 8680			ctrl_info->admin_queue_memory_base_dma_handle);
 8681	pqi_free_all_io_requests(ctrl_info);
 8682	if (ctrl_info->error_buffer)
 8683		dma_free_coherent(&ctrl_info->pci_dev->dev,
 8684			ctrl_info->error_buffer_length,
 8685			ctrl_info->error_buffer,
 8686			ctrl_info->error_buffer_dma_handle);
 8687	if (ctrl_info->iomem_base)
 8688		pqi_cleanup_pci_init(ctrl_info);
 8689	pqi_free_ctrl_info(ctrl_info);
 8690}
 8691
 8692static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
 8693{
 8694	ctrl_info->controller_online = false;
 8695	pqi_stop_heartbeat_timer(ctrl_info);
 8696	pqi_ctrl_block_requests(ctrl_info);
 8697	pqi_cancel_rescan_worker(ctrl_info);
 8698	pqi_cancel_update_time_worker(ctrl_info);
 8699	if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
 8700		pqi_fail_all_outstanding_requests(ctrl_info);
 8701		ctrl_info->pqi_mode_enabled = false;
 8702	}
 8703	pqi_unregister_scsi(ctrl_info);
 8704	if (ctrl_info->pqi_mode_enabled)
 8705		pqi_revert_to_sis_mode(ctrl_info);
 8706	pqi_free_ctrl_resources(ctrl_info);
 8707}
 8708
 8709static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
 8710{
 8711	pqi_ctrl_block_scan(ctrl_info);
 8712	pqi_scsi_block_requests(ctrl_info);
 8713	pqi_ctrl_block_device_reset(ctrl_info);
 8714	pqi_ctrl_block_requests(ctrl_info);
 8715	pqi_ctrl_wait_until_quiesced(ctrl_info);
 8716	pqi_stop_heartbeat_timer(ctrl_info);
 8717}
 8718
 8719static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
 8720{
 8721	pqi_start_heartbeat_timer(ctrl_info);
 8722	pqi_ctrl_unblock_requests(ctrl_info);
 8723	pqi_ctrl_unblock_device_reset(ctrl_info);
 8724	pqi_scsi_unblock_requests(ctrl_info);
 8725	pqi_ctrl_unblock_scan(ctrl_info);
 8726}
 8727
 8728static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
 8729{
 8730	int i;
 8731	u32 sg_count;
 8732	struct device *dev;
 8733	struct pqi_ofa_memory *ofap;
 8734	struct pqi_sg_descriptor *mem_descriptor;
 8735	dma_addr_t dma_handle;
 8736
 8737	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
 8738
 8739	sg_count = DIV_ROUND_UP(total_size, chunk_size);
 8740	if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
 8741		goto out;
 8742
 8743	ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
 8744	if (!ctrl_info->pqi_ofa_chunk_virt_addr)
 8745		goto out;
 8746
 8747	dev = &ctrl_info->pci_dev->dev;
 8748
 8749	for (i = 0; i < sg_count; i++) {
 8750		ctrl_info->pqi_ofa_chunk_virt_addr[i] =
 8751			dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
 8752		if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
 8753			goto out_free_chunks;
 8754		mem_descriptor = &ofap->sg_descriptor[i];
 8755		put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
 8756		put_unaligned_le32(chunk_size, &mem_descriptor->length);
 8757	}
 8758
 8759	put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
 8760	put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
 8761	put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
 8762
 8763	return 0;
 8764
 8765out_free_chunks:
 8766	while (--i >= 0) {
 8767		mem_descriptor = &ofap->sg_descriptor[i];
 8768		dma_free_coherent(dev, chunk_size,
 8769			ctrl_info->pqi_ofa_chunk_virt_addr[i],
 8770			get_unaligned_le64(&mem_descriptor->address));
 8771	}
 8772	kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
 8773
 8774out:
 8775	return -ENOMEM;
 8776}
 8777
 8778static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
 8779{
 8780	u32 total_size;
 8781	u32 chunk_size;
 8782	u32 min_chunk_size;
 8783
 8784	if (ctrl_info->ofa_bytes_requested == 0)
 8785		return 0;
 8786
 8787	total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
 8788	min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
 8789	min_chunk_size = PAGE_ALIGN(min_chunk_size);
 8790
 8791	for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
 8792		if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
 8793			return 0;
 8794		chunk_size /= 2;
 8795		chunk_size = PAGE_ALIGN(chunk_size);
 8796	}
 8797
 8798	return -ENOMEM;
 8799}
 8800
 8801static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
 8802{
 8803	struct device *dev;
 8804	struct pqi_ofa_memory *ofap;
 8805
 8806	dev = &ctrl_info->pci_dev->dev;
 8807
 8808	ofap = dma_alloc_coherent(dev, sizeof(*ofap),
 8809		&ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
 8810	if (!ofap)
 8811		return;
 8812
 8813	ctrl_info->pqi_ofa_mem_virt_addr = ofap;
 8814
 8815	if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
 8816		dev_err(dev,
 8817			"failed to allocate host buffer for Online Firmware Activation\n");
 8818		dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
 8819		ctrl_info->pqi_ofa_mem_virt_addr = NULL;
 8820		return;
 8821	}
 8822
 8823	put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
 8824	memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
 8825}
 8826
 8827static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
 8828{
 8829	unsigned int i;
 8830	struct device *dev;
 8831	struct pqi_ofa_memory *ofap;
 8832	struct pqi_sg_descriptor *mem_descriptor;
 8833	unsigned int num_memory_descriptors;
 8834
 8835	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
 8836	if (!ofap)
 8837		return;
 8838
 8839	dev = &ctrl_info->pci_dev->dev;
 8840
 8841	if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
 8842		goto out;
 8843
 8844	mem_descriptor = ofap->sg_descriptor;
 8845	num_memory_descriptors =
 8846		get_unaligned_le16(&ofap->num_memory_descriptors);
 8847
 8848	for (i = 0; i < num_memory_descriptors; i++) {
 8849		dma_free_coherent(dev,
 8850			get_unaligned_le32(&mem_descriptor[i].length),
 8851			ctrl_info->pqi_ofa_chunk_virt_addr[i],
 8852			get_unaligned_le64(&mem_descriptor[i].address));
 8853	}
 8854	kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
 8855
 8856out:
 8857	dma_free_coherent(dev, sizeof(*ofap), ofap,
 8858		ctrl_info->pqi_ofa_mem_dma_handle);
 8859	ctrl_info->pqi_ofa_mem_virt_addr = NULL;
 8860}
 8861
 8862static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
 8863{
 8864	u32 buffer_length;
 8865	struct pqi_vendor_general_request request;
 8866	struct pqi_ofa_memory *ofap;
 8867
 8868	memset(&request, 0, sizeof(request));
 8869
 8870	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
 8871	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
 8872		&request.header.iu_length);
 8873	put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
 8874		&request.function_code);
 8875
 8876	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
 8877
 8878	if (ofap) {
 8879		buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
 8880			get_unaligned_le16(&ofap->num_memory_descriptors) *
 8881			sizeof(struct pqi_sg_descriptor);
 8882
 8883		put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
 8884			&request.data.ofa_memory_allocation.buffer_address);
 8885		put_unaligned_le32(buffer_length,
 8886			&request.data.ofa_memory_allocation.buffer_length);
 8887	}
 8888
 8889	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 8890}
 8891
 8892static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
 8893{
 8894	ssleep(delay_secs);
 8895
 8896	return pqi_ctrl_init_resume(ctrl_info);
 8897}
 8898
 8899static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
 8900	.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
 8901	.status = SAM_STAT_CHECK_CONDITION,
 8902};
 8903
 8904static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
 8905{
 8906	unsigned int i;
 8907	struct pqi_io_request *io_request;
 8908	struct scsi_cmnd *scmd;
 8909	struct scsi_device *sdev;
 8910
 8911	for (i = 0; i < ctrl_info->max_io_slots; i++) {
 8912		io_request = &ctrl_info->io_request_pool[i];
 8913		if (atomic_read(&io_request->refcount) == 0)
 8914			continue;
 8915
 8916		scmd = io_request->scmd;
 8917		if (scmd) {
 8918			sdev = scmd->device;
 8919			if (!sdev || !scsi_device_online(sdev)) {
 8920				pqi_free_io_request(io_request);
 8921				continue;
 8922			} else {
 8923				set_host_byte(scmd, DID_NO_CONNECT);
 8924			}
 8925		} else {
 8926			io_request->status = -ENXIO;
 8927			io_request->error_info =
 8928				&pqi_ctrl_offline_raid_error_info;
 8929		}
 8930
 8931		io_request->io_complete_callback(io_request,
 8932			io_request->context);
 8933	}
 8934}
 8935
 8936static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
 8937{
 8938	pqi_perform_lockup_action();
 8939	pqi_stop_heartbeat_timer(ctrl_info);
 8940	pqi_free_interrupts(ctrl_info);
 8941	pqi_cancel_rescan_worker(ctrl_info);
 8942	pqi_cancel_update_time_worker(ctrl_info);
 8943	pqi_ctrl_wait_until_quiesced(ctrl_info);
 8944	pqi_fail_all_outstanding_requests(ctrl_info);
 8945	pqi_ctrl_unblock_requests(ctrl_info);
 8946}
 8947
 8948static void pqi_ctrl_offline_worker(struct work_struct *work)
 8949{
 8950	struct pqi_ctrl_info *ctrl_info;
 8951
 8952	ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
 8953	pqi_take_ctrl_offline_deferred(ctrl_info);
 8954}
 8955
 8956static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
 8957	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
 8958{
 8959	if (!ctrl_info->controller_online)
 8960		return;
 8961
 8962	ctrl_info->controller_online = false;
 8963	ctrl_info->pqi_mode_enabled = false;
 8964	pqi_ctrl_block_requests(ctrl_info);
 8965	if (!pqi_disable_ctrl_shutdown)
 8966		sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
 8967	pci_disable_device(ctrl_info->pci_dev);
 8968	dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
 8969	schedule_work(&ctrl_info->ctrl_offline_work);
 8970}
 8971
 8972static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
 8973	const struct pci_device_id *id)
 8974{
 8975	char *ctrl_description;
 8976
 8977	if (id->driver_data)
 8978		ctrl_description = (char *)id->driver_data;
 8979	else
 8980		ctrl_description = "Microchip Smart Family Controller";
 8981
 8982	dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
 8983}
 8984
 8985static int pqi_pci_probe(struct pci_dev *pci_dev,
 8986	const struct pci_device_id *id)
 8987{
 8988	int rc;
 8989	int node;
 8990	struct pqi_ctrl_info *ctrl_info;
 8991
 8992	pqi_print_ctrl_info(pci_dev, id);
 8993
 8994	if (pqi_disable_device_id_wildcards &&
 8995		id->subvendor == PCI_ANY_ID &&
 8996		id->subdevice == PCI_ANY_ID) {
 8997		dev_warn(&pci_dev->dev,
 8998			"controller not probed because device ID wildcards are disabled\n");
 8999		return -ENODEV;
 9000	}
 9001
 9002	if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
 9003		dev_warn(&pci_dev->dev,
 9004			"controller device ID matched using wildcards\n");
 9005
 9006	node = dev_to_node(&pci_dev->dev);
 9007	if (node == NUMA_NO_NODE) {
 9008		node = cpu_to_node(0);
 9009		if (node == NUMA_NO_NODE)
 9010			node = 0;
 9011		set_dev_node(&pci_dev->dev, node);
 9012	}
 9013
 9014	ctrl_info = pqi_alloc_ctrl_info(node);
 9015	if (!ctrl_info) {
 9016		dev_err(&pci_dev->dev,
 9017			"failed to allocate controller info block\n");
 9018		return -ENOMEM;
 9019	}
 9020
 9021	ctrl_info->pci_dev = pci_dev;
 9022
 9023	rc = pqi_pci_init(ctrl_info);
 9024	if (rc)
 9025		goto error;
 9026
 9027	rc = pqi_ctrl_init(ctrl_info);
 9028	if (rc)
 9029		goto error;
 9030
 9031	return 0;
 9032
 9033error:
 9034	pqi_remove_ctrl(ctrl_info);
 9035
 9036	return rc;
 9037}
 9038
 9039static void pqi_pci_remove(struct pci_dev *pci_dev)
 9040{
 9041	struct pqi_ctrl_info *ctrl_info;
 9042	u16 vendor_id;
 9043	int rc;
 9044
 9045	ctrl_info = pci_get_drvdata(pci_dev);
 9046	if (!ctrl_info)
 9047		return;
 9048
 9049	pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
 9050	if (vendor_id == 0xffff)
 9051		ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
 9052	else
 9053		ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
 9054
 9055	if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
 9056		rc = pqi_flush_cache(ctrl_info, RESTART);
 9057		if (rc)
 9058			dev_err(&pci_dev->dev,
 9059				"unable to flush controller cache during remove\n");
 9060	}
 9061
 9062	pqi_remove_ctrl(ctrl_info);
 9063}
 9064
 9065static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
 9066{
 9067	unsigned int i;
 9068	struct pqi_io_request *io_request;
 9069	struct scsi_cmnd *scmd;
 9070
 9071	for (i = 0; i < ctrl_info->max_io_slots; i++) {
 9072		io_request = &ctrl_info->io_request_pool[i];
 9073		if (atomic_read(&io_request->refcount) == 0)
 9074			continue;
 9075		scmd = io_request->scmd;
 9076		WARN_ON(scmd != NULL); /* IO command from SML */
 9077		WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
 9078	}
 9079}
 9080
 9081static void pqi_shutdown(struct pci_dev *pci_dev)
 9082{
 9083	int rc;
 9084	struct pqi_ctrl_info *ctrl_info;
 9085	enum bmic_flush_cache_shutdown_event shutdown_event;
 9086
 9087	ctrl_info = pci_get_drvdata(pci_dev);
 9088	if (!ctrl_info) {
 9089		dev_err(&pci_dev->dev,
 9090			"cache could not be flushed\n");
 9091		return;
 9092	}
 9093
 9094	pqi_wait_until_ofa_finished(ctrl_info);
 9095
 9096	pqi_scsi_block_requests(ctrl_info);
 9097	pqi_ctrl_block_device_reset(ctrl_info);
 9098	pqi_ctrl_block_requests(ctrl_info);
 9099	pqi_ctrl_wait_until_quiesced(ctrl_info);
 9100
 9101	if (system_state == SYSTEM_RESTART)
 9102		shutdown_event = RESTART;
 9103	else
 9104		shutdown_event = SHUTDOWN;
 9105
 9106	/*
 9107	 * Write all data in the controller's battery-backed cache to
 9108	 * storage.
 9109	 */
 9110	rc = pqi_flush_cache(ctrl_info, shutdown_event);
 9111	if (rc)
 9112		dev_err(&pci_dev->dev,
 9113			"unable to flush controller cache\n");
 9114
 9115	pqi_crash_if_pending_command(ctrl_info);
 9116	pqi_reset(ctrl_info);
 9117}
 9118
 9119static void pqi_process_lockup_action_param(void)
 9120{
 9121	unsigned int i;
 9122
 9123	if (!pqi_lockup_action_param)
 9124		return;
 9125
 9126	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
 9127		if (strcmp(pqi_lockup_action_param,
 9128			pqi_lockup_actions[i].name) == 0) {
 9129			pqi_lockup_action = pqi_lockup_actions[i].action;
 9130			return;
 9131		}
 9132	}
 9133
 9134	pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
 9135		DRIVER_NAME_SHORT, pqi_lockup_action_param);
 9136}
 9137
 9138#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS		30
 9139#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS		(30 * 60)
 9140
 9141static void pqi_process_ctrl_ready_timeout_param(void)
 9142{
 9143	if (pqi_ctrl_ready_timeout_secs == 0)
 9144		return;
 9145
 9146	if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
 9147		pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
 9148			DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
 9149		pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
 9150	} else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
 9151		pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
 9152			DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
 9153		pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
 9154	}
 9155
 9156	sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
 9157}
 9158
 9159static void pqi_process_module_params(void)
 9160{
 9161	pqi_process_lockup_action_param();
 9162	pqi_process_ctrl_ready_timeout_param();
 9163}
 9164
 9165#if defined(CONFIG_PM)
 9166
 9167static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
 9168{
 9169	if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
 9170		return RESTART;
 9171
 9172	return SUSPEND;
 9173}
 9174
 9175static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
 9176{
 9177	struct pci_dev *pci_dev;
 9178	struct pqi_ctrl_info *ctrl_info;
 9179
 9180	pci_dev = to_pci_dev(dev);
 9181	ctrl_info = pci_get_drvdata(pci_dev);
 9182
 9183	pqi_wait_until_ofa_finished(ctrl_info);
 9184
 9185	pqi_ctrl_block_scan(ctrl_info);
 9186	pqi_scsi_block_requests(ctrl_info);
 9187	pqi_ctrl_block_device_reset(ctrl_info);
 9188	pqi_ctrl_block_requests(ctrl_info);
 9189	pqi_ctrl_wait_until_quiesced(ctrl_info);
 9190
 9191	if (suspend) {
 9192		enum bmic_flush_cache_shutdown_event shutdown_event;
 9193
 9194		shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
 9195		pqi_flush_cache(ctrl_info, shutdown_event);
 9196	}
 9197
 9198	pqi_stop_heartbeat_timer(ctrl_info);
 9199	pqi_crash_if_pending_command(ctrl_info);
 9200	pqi_free_irqs(ctrl_info);
 9201
 9202	ctrl_info->controller_online = false;
 9203	ctrl_info->pqi_mode_enabled = false;
 9204
 9205	return 0;
 9206}
 9207
 9208static __maybe_unused int pqi_suspend(struct device *dev)
 9209{
 9210	return pqi_suspend_or_freeze(dev, true);
 9211}
 9212
 9213static int pqi_resume_or_restore(struct device *dev)
 9214{
 9215	int rc;
 9216	struct pci_dev *pci_dev;
 9217	struct pqi_ctrl_info *ctrl_info;
 9218
 9219	pci_dev = to_pci_dev(dev);
 9220	ctrl_info = pci_get_drvdata(pci_dev);
 9221
 9222	rc = pqi_request_irqs(ctrl_info);
 9223	if (rc)
 9224		return rc;
 9225
 9226	pqi_ctrl_unblock_device_reset(ctrl_info);
 9227	pqi_ctrl_unblock_requests(ctrl_info);
 9228	pqi_scsi_unblock_requests(ctrl_info);
 9229	pqi_ctrl_unblock_scan(ctrl_info);
 9230
 9231	ssleep(PQI_POST_RESET_DELAY_SECS);
 9232
 9233	return pqi_ctrl_init_resume(ctrl_info);
 9234}
 9235
 9236static int pqi_freeze(struct device *dev)
 9237{
 9238	return pqi_suspend_or_freeze(dev, false);
 9239}
 9240
 9241static int pqi_thaw(struct device *dev)
 9242{
 9243	int rc;
 9244	struct pci_dev *pci_dev;
 9245	struct pqi_ctrl_info *ctrl_info;
 9246
 9247	pci_dev = to_pci_dev(dev);
 9248	ctrl_info = pci_get_drvdata(pci_dev);
 9249
 9250	rc = pqi_request_irqs(ctrl_info);
 9251	if (rc)
 9252		return rc;
 9253
 9254	ctrl_info->controller_online = true;
 9255	ctrl_info->pqi_mode_enabled = true;
 9256
 9257	pqi_ctrl_unblock_device_reset(ctrl_info);
 9258	pqi_ctrl_unblock_requests(ctrl_info);
 9259	pqi_scsi_unblock_requests(ctrl_info);
 9260	pqi_ctrl_unblock_scan(ctrl_info);
 9261
 9262	return 0;
 9263}
 9264
 9265static int pqi_poweroff(struct device *dev)
 9266{
 9267	struct pci_dev *pci_dev;
 9268	struct pqi_ctrl_info *ctrl_info;
 9269	enum bmic_flush_cache_shutdown_event shutdown_event;
 9270
 9271	pci_dev = to_pci_dev(dev);
 9272	ctrl_info = pci_get_drvdata(pci_dev);
 9273
 9274	shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
 9275	pqi_flush_cache(ctrl_info, shutdown_event);
 9276
 9277	return 0;
 9278}
 9279
 9280static const struct dev_pm_ops pqi_pm_ops = {
 9281	.suspend = pqi_suspend,
 9282	.resume = pqi_resume_or_restore,
 9283	.freeze = pqi_freeze,
 9284	.thaw = pqi_thaw,
 9285	.poweroff = pqi_poweroff,
 9286	.restore = pqi_resume_or_restore,
 9287};
 9288
 9289#endif /* CONFIG_PM */
 9290
 9291/* Define the PCI IDs for the controllers that we support. */
 9292static const struct pci_device_id pqi_pci_id_table[] = {
 9293	{
 9294		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9295			       0x105b, 0x1211)
 9296	},
 9297	{
 9298		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9299			       0x105b, 0x1321)
 9300	},
 9301	{
 9302		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9303			       0x152d, 0x8a22)
 9304	},
 9305	{
 9306		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9307			       0x152d, 0x8a23)
 9308	},
 9309	{
 9310		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9311			       0x152d, 0x8a24)
 9312	},
 9313	{
 9314		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9315			       0x152d, 0x8a36)
 9316	},
 9317	{
 9318		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9319			       0x152d, 0x8a37)
 9320	},
 9321	{
 9322		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9323			       0x193d, 0x1104)
 9324	},
 9325	{
 9326		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9327			       0x193d, 0x1105)
 9328	},
 9329	{
 9330		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9331			       0x193d, 0x1106)
 9332	},
 9333	{
 9334		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9335			       0x193d, 0x1107)
 9336	},
 9337	{
 9338		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9339			       0x193d, 0x1108)
 9340	},
 9341	{
 9342		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9343			       0x193d, 0x1109)
 9344	},
 9345	{
 9346		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9347			       0x193d, 0x110b)
 9348	},
 9349	{
 9350		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9351			       0x193d, 0x8460)
 9352	},
 9353	{
 9354		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9355			       0x193d, 0x8461)
 9356	},
 9357	{
 9358		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9359			       0x193d, 0xc460)
 9360	},
 9361	{
 9362		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9363			       0x193d, 0xc461)
 9364	},
 9365	{
 9366		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9367			       0x193d, 0xf460)
 9368	},
 9369	{
 9370		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9371			       0x193d, 0xf461)
 9372	},
 9373	{
 9374		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9375			       0x1bd4, 0x0045)
 9376	},
 9377	{
 9378		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9379			       0x1bd4, 0x0046)
 9380	},
 9381	{
 9382		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9383			       0x1bd4, 0x0047)
 9384	},
 9385	{
 9386		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9387			       0x1bd4, 0x0048)
 9388	},
 9389	{
 9390		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9391			       0x1bd4, 0x004a)
 9392	},
 9393	{
 9394		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9395			       0x1bd4, 0x004b)
 9396	},
 9397	{
 9398		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9399			       0x1bd4, 0x004c)
 9400	},
 9401	{
 9402		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9403			       0x1bd4, 0x004f)
 9404	},
 9405	{
 9406		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9407			       0x1bd4, 0x0051)
 9408	},
 9409	{
 9410		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9411			       0x1bd4, 0x0052)
 9412	},
 9413	{
 9414		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9415			       0x1bd4, 0x0053)
 9416	},
 9417	{
 9418		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9419			       0x1bd4, 0x0054)
 9420	},
 9421	{
 9422		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9423			       0x1bd4, 0x006b)
 9424	},
 9425	{
 9426		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9427			       0x1bd4, 0x006c)
 9428	},
 9429	{
 9430		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9431			       0x1bd4, 0x006d)
 9432	},
 9433	{
 9434		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9435			       0x1bd4, 0x006f)
 9436	},
 9437	{
 9438		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9439			       0x1bd4, 0x0070)
 9440	},
 9441	{
 9442		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9443			       0x1bd4, 0x0071)
 9444	},
 9445	{
 9446		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9447			       0x1bd4, 0x0072)
 9448	},
 9449	{
 9450		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9451			       0x1bd4, 0x0086)
 9452	},
 9453	{
 9454		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9455			       0x1bd4, 0x0087)
 9456	},
 9457	{
 9458		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9459			       0x1bd4, 0x0088)
 9460	},
 9461	{
 9462		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9463			       0x1bd4, 0x0089)
 9464	},
 9465	{
 9466		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9467			       0x19e5, 0xd227)
 9468	},
 9469	{
 9470		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9471			       0x19e5, 0xd228)
 9472	},
 9473	{
 9474		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9475			       0x19e5, 0xd229)
 9476	},
 9477	{
 9478		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9479			       0x19e5, 0xd22a)
 9480	},
 9481	{
 9482		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9483			       0x19e5, 0xd22b)
 9484	},
 9485	{
 9486		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9487			       0x19e5, 0xd22c)
 9488	},
 9489	{
 9490		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9491			       PCI_VENDOR_ID_ADAPTEC2, 0x0110)
 9492	},
 9493	{
 9494		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9495			       PCI_VENDOR_ID_ADAPTEC2, 0x0608)
 9496	},
 9497	{
 9498		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9499			       PCI_VENDOR_ID_ADAPTEC2, 0x0659)
 9500	},
 9501	{
 9502		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9503			       PCI_VENDOR_ID_ADAPTEC2, 0x0800)
 9504	},
 9505	{
 9506		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9507			       PCI_VENDOR_ID_ADAPTEC2, 0x0801)
 9508	},
 9509	{
 9510		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9511			       PCI_VENDOR_ID_ADAPTEC2, 0x0802)
 9512	},
 9513	{
 9514		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9515			       PCI_VENDOR_ID_ADAPTEC2, 0x0803)
 9516	},
 9517	{
 9518		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9519			       PCI_VENDOR_ID_ADAPTEC2, 0x0804)
 9520	},
 9521	{
 9522		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9523			       PCI_VENDOR_ID_ADAPTEC2, 0x0805)
 9524	},
 9525	{
 9526		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9527			       PCI_VENDOR_ID_ADAPTEC2, 0x0806)
 9528	},
 9529	{
 9530		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9531			       PCI_VENDOR_ID_ADAPTEC2, 0x0807)
 9532	},
 9533	{
 9534		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9535			       PCI_VENDOR_ID_ADAPTEC2, 0x0808)
 9536	},
 9537	{
 9538		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9539			       PCI_VENDOR_ID_ADAPTEC2, 0x0809)
 9540	},
 9541	{
 9542		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9543			       PCI_VENDOR_ID_ADAPTEC2, 0x080a)
 9544	},
 9545	{
 9546		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9547			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
 9548	},
 9549	{
 9550		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9551			       PCI_VENDOR_ID_ADAPTEC2, 0x0901)
 9552	},
 9553	{
 9554		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9555			       PCI_VENDOR_ID_ADAPTEC2, 0x0902)
 9556	},
 9557	{
 9558		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9559			       PCI_VENDOR_ID_ADAPTEC2, 0x0903)
 9560	},
 9561	{
 9562		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9563			       PCI_VENDOR_ID_ADAPTEC2, 0x0904)
 9564	},
 9565	{
 9566		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9567			       PCI_VENDOR_ID_ADAPTEC2, 0x0905)
 9568	},
 9569	{
 9570		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9571			       PCI_VENDOR_ID_ADAPTEC2, 0x0906)
 9572	},
 9573	{
 9574		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9575			       PCI_VENDOR_ID_ADAPTEC2, 0x0907)
 9576	},
 9577	{
 9578		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9579			       PCI_VENDOR_ID_ADAPTEC2, 0x0908)
 9580	},
 9581	{
 9582		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9583			       PCI_VENDOR_ID_ADAPTEC2, 0x090a)
 9584	},
 9585	{
 9586		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9587			       PCI_VENDOR_ID_ADAPTEC2, 0x1200)
 9588	},
 9589	{
 9590		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9591			       PCI_VENDOR_ID_ADAPTEC2, 0x1201)
 9592	},
 9593	{
 9594		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9595			       PCI_VENDOR_ID_ADAPTEC2, 0x1202)
 9596	},
 9597	{
 9598		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9599			       PCI_VENDOR_ID_ADAPTEC2, 0x1280)
 9600	},
 9601	{
 9602		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9603			       PCI_VENDOR_ID_ADAPTEC2, 0x1281)
 9604	},
 9605	{
 9606		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9607			       PCI_VENDOR_ID_ADAPTEC2, 0x1282)
 9608	},
 9609	{
 9610		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9611			       PCI_VENDOR_ID_ADAPTEC2, 0x1300)
 9612	},
 9613	{
 9614		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9615			       PCI_VENDOR_ID_ADAPTEC2, 0x1301)
 9616	},
 9617	{
 9618		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9619			       PCI_VENDOR_ID_ADAPTEC2, 0x1302)
 9620	},
 9621	{
 9622		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9623			       PCI_VENDOR_ID_ADAPTEC2, 0x1303)
 9624	},
 9625	{
 9626		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9627			       PCI_VENDOR_ID_ADAPTEC2, 0x1304)
 9628	},
 9629	{
 9630		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9631			       PCI_VENDOR_ID_ADAPTEC2, 0x1380)
 9632	},
 9633	{
 9634		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9635			       PCI_VENDOR_ID_ADAPTEC2, 0x1400)
 9636	},
 9637	{
 9638		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9639			       PCI_VENDOR_ID_ADAPTEC2, 0x1402)
 9640	},
 9641	{
 9642		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9643			       PCI_VENDOR_ID_ADAPTEC2, 0x1410)
 9644	},
 9645	{
 9646		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9647			       PCI_VENDOR_ID_ADAPTEC2, 0x1411)
 9648	},
 9649	{
 9650		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9651			       PCI_VENDOR_ID_ADAPTEC2, 0x1412)
 9652	},
 9653	{
 9654		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9655			       PCI_VENDOR_ID_ADAPTEC2, 0x1420)
 9656	},
 9657	{
 9658		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9659			       PCI_VENDOR_ID_ADAPTEC2, 0x1430)
 9660	},
 9661	{
 9662		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9663			       PCI_VENDOR_ID_ADAPTEC2, 0x1440)
 9664	},
 9665	{
 9666		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9667			       PCI_VENDOR_ID_ADAPTEC2, 0x1441)
 9668	},
 9669	{
 9670		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9671			       PCI_VENDOR_ID_ADAPTEC2, 0x1450)
 9672	},
 9673	{
 9674		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9675			       PCI_VENDOR_ID_ADAPTEC2, 0x1452)
 9676	},
 9677	{
 9678		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9679			       PCI_VENDOR_ID_ADAPTEC2, 0x1460)
 9680	},
 9681	{
 9682		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9683			       PCI_VENDOR_ID_ADAPTEC2, 0x1461)
 9684	},
 9685	{
 9686		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9687			       PCI_VENDOR_ID_ADAPTEC2, 0x1462)
 9688	},
 9689	{
 9690		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9691			       PCI_VENDOR_ID_ADAPTEC2, 0x1463)
 9692	},
 9693	{
 9694		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9695			       PCI_VENDOR_ID_ADAPTEC2, 0x1470)
 9696	},
 9697	{
 9698		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9699			       PCI_VENDOR_ID_ADAPTEC2, 0x1471)
 9700	},
 9701	{
 9702		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9703			       PCI_VENDOR_ID_ADAPTEC2, 0x1472)
 9704	},
 9705	{
 9706		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9707			       PCI_VENDOR_ID_ADAPTEC2, 0x1473)
 9708	},
 9709	{
 9710		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9711			       PCI_VENDOR_ID_ADAPTEC2, 0x1474)
 9712	},
 9713	{
 9714		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9715			       PCI_VENDOR_ID_ADAPTEC2, 0x1475)
 9716	},
 9717	{
 9718		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9719			       PCI_VENDOR_ID_ADAPTEC2, 0x1480)
 9720	},
 9721	{
 9722		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9723			       PCI_VENDOR_ID_ADAPTEC2, 0x1490)
 9724	},
 9725	{
 9726		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9727			       PCI_VENDOR_ID_ADAPTEC2, 0x1491)
 9728	},
 9729	{
 9730		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9731			       PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
 9732	},
 9733	{
 9734		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9735			       PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
 9736	},
 9737	{
 9738		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9739			       PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
 9740	},
 9741	{
 9742		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9743			       PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
 9744	},
 9745	{
 9746		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9747			       PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
 9748	},
 9749	{
 9750		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9751			       PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
 9752	},
 9753	{
 9754		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9755			       PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
 9756	},
 9757	{
 9758		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9759			       PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
 9760	},
 9761	{
 9762		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9763			       PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
 9764	},
 9765	{
 9766		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9767			       PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
 9768	},
 9769	{
 9770		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9771			       PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
 9772	},
 9773	{
 9774		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9775			       PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
 9776	},
 9777	{
 9778		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9779			       PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
 9780	},
 9781	{
 9782		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9783			       PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
 9784	},
 9785	{
 9786		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9787			       PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
 9788	},
 9789	{
 9790		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9791			       PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
 9792	},
 9793	{
 9794		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9795			       PCI_VENDOR_ID_ADVANTECH, 0x8312)
 9796	},
 9797	{
 9798		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9799			       PCI_VENDOR_ID_DELL, 0x1fe0)
 9800	},
 9801	{
 9802		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9803			       PCI_VENDOR_ID_HP, 0x0600)
 9804	},
 9805	{
 9806		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9807			       PCI_VENDOR_ID_HP, 0x0601)
 9808	},
 9809	{
 9810		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9811			       PCI_VENDOR_ID_HP, 0x0602)
 9812	},
 9813	{
 9814		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9815			       PCI_VENDOR_ID_HP, 0x0603)
 9816	},
 9817	{
 9818		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9819			       PCI_VENDOR_ID_HP, 0x0609)
 9820	},
 9821	{
 9822		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9823			       PCI_VENDOR_ID_HP, 0x0650)
 9824	},
 9825	{
 9826		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9827			       PCI_VENDOR_ID_HP, 0x0651)
 9828	},
 9829	{
 9830		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9831			       PCI_VENDOR_ID_HP, 0x0652)
 9832	},
 9833	{
 9834		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9835			       PCI_VENDOR_ID_HP, 0x0653)
 9836	},
 9837	{
 9838		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9839			       PCI_VENDOR_ID_HP, 0x0654)
 9840	},
 9841	{
 9842		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9843			       PCI_VENDOR_ID_HP, 0x0655)
 9844	},
 9845	{
 9846		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9847			       PCI_VENDOR_ID_HP, 0x0700)
 9848	},
 9849	{
 9850		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9851			       PCI_VENDOR_ID_HP, 0x0701)
 9852	},
 9853	{
 9854		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9855			       PCI_VENDOR_ID_HP, 0x1001)
 9856	},
 9857	{
 9858		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9859			       PCI_VENDOR_ID_HP, 0x1002)
 9860	},
 9861	{
 9862		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9863			       PCI_VENDOR_ID_HP, 0x1100)
 9864	},
 9865	{
 9866		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9867			       PCI_VENDOR_ID_HP, 0x1101)
 9868	},
 9869	{
 9870		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9871			       0x1590, 0x0294)
 9872	},
 9873	{
 9874		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9875			       0x1590, 0x02db)
 9876	},
 9877	{
 9878		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9879			       0x1590, 0x02dc)
 9880	},
 9881	{
 9882		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9883			       0x1590, 0x032e)
 9884	},
 9885	{
 9886		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9887			       0x1590, 0x036f)
 9888	},
 9889	{
 9890		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9891			       0x1590, 0x0381)
 9892	},
 9893	{
 9894		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9895			       0x1590, 0x0382)
 9896	},
 9897	{
 9898		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9899			       0x1590, 0x0383)
 9900	},
 9901	{
 9902		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9903			       0x1d8d, 0x0800)
 9904	},
 9905	{
 9906		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9907			       0x1d8d, 0x0908)
 9908	},
 9909	{
 9910		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9911			       0x1d8d, 0x0806)
 9912	},
 9913	{
 9914		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9915			       0x1d8d, 0x0916)
 9916	},
 9917	{
 9918		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9919			       PCI_VENDOR_ID_GIGABYTE, 0x1000)
 9920	},
 9921	{
 9922		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9923			       0x1dfc, 0x3161)
 9924	},
 9925	{
 9926		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9927			       0x1f0c, 0x3161)
 9928	},
 9929	{
 9930		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9931			       0x1cf2, 0x5445)
 9932	},
 9933	{
 9934		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9935			       0x1cf2, 0x5446)
 9936	},
 9937	{
 9938		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9939			       0x1cf2, 0x5447)
 9940	},
 9941	{
 9942		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9943			       0x1cf2, 0x5449)
 9944	},
 9945	{
 9946		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9947			       0x1cf2, 0x544a)
 9948	},
 9949	{
 9950		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9951			       0x1cf2, 0x544b)
 9952	},
 9953	{
 9954		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9955			       0x1cf2, 0x544d)
 9956	},
 9957	{
 9958		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9959			       0x1cf2, 0x544e)
 9960	},
 9961	{
 9962		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9963			       0x1cf2, 0x544f)
 9964	},
 9965	{
 9966		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9967			       0x1cf2, 0x0b27)
 9968	},
 9969	{
 9970		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9971			       0x1cf2, 0x0b29)
 9972	},
 9973	{
 9974		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9975			       0x1cf2, 0x0b45)
 9976	},
 9977	{
 9978		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9979			       0x1cc4, 0x0101)
 9980	},
 9981	{
 9982		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9983			       0x1cc4, 0x0201)
 9984	},
 9985	{
 9986		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9987			       PCI_VENDOR_ID_LENOVO, 0x0220)
 9988	},
 9989	{
 9990		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9991			       PCI_VENDOR_ID_LENOVO, 0x0221)
 9992	},
 9993	{
 9994		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9995			       PCI_VENDOR_ID_LENOVO, 0x0520)
 9996	},
 9997	{
 9998		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 9999			       PCI_VENDOR_ID_LENOVO, 0x0522)
10000	},
10001	{
10002		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10003			       PCI_VENDOR_ID_LENOVO, 0x0620)
10004	},
10005	{
10006		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10007			       PCI_VENDOR_ID_LENOVO, 0x0621)
10008	},
10009	{
10010		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10011			       PCI_VENDOR_ID_LENOVO, 0x0622)
10012	},
10013	{
10014		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10015			       PCI_VENDOR_ID_LENOVO, 0x0623)
10016	},
10017	{
10018		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10019				0x1e93, 0x1000)
10020	},
10021	{
10022		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10023				0x1e93, 0x1001)
10024	},
10025	{
10026		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10027				0x1e93, 0x1002)
10028	},
10029	{
10030		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10031			       PCI_ANY_ID, PCI_ANY_ID)
10032	},
10033	{ 0 }
10034};
10035
10036MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10037
10038static struct pci_driver pqi_pci_driver = {
10039	.name = DRIVER_NAME_SHORT,
10040	.id_table = pqi_pci_id_table,
10041	.probe = pqi_pci_probe,
10042	.remove = pqi_pci_remove,
10043	.shutdown = pqi_shutdown,
10044#if defined(CONFIG_PM)
10045	.driver = {
10046		.pm = &pqi_pm_ops
10047	},
10048#endif
10049};
10050
10051static int __init pqi_init(void)
10052{
10053	int rc;
10054
10055	pr_info(DRIVER_NAME "\n");
10056	pqi_verify_structures();
10057	sis_verify_structures();
10058
10059	pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10060	if (!pqi_sas_transport_template)
10061		return -ENODEV;
10062
10063	pqi_process_module_params();
10064
10065	rc = pci_register_driver(&pqi_pci_driver);
10066	if (rc)
10067		sas_release_transport(pqi_sas_transport_template);
10068
10069	return rc;
10070}
10071
10072static void __exit pqi_cleanup(void)
10073{
10074	pci_unregister_driver(&pqi_pci_driver);
10075	sas_release_transport(pqi_sas_transport_template);
10076}
10077
10078module_init(pqi_init);
10079module_exit(pqi_cleanup);
10080
10081static void pqi_verify_structures(void)
10082{
10083	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10084		sis_host_to_ctrl_doorbell) != 0x20);
10085	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10086		sis_interrupt_mask) != 0x34);
10087	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10088		sis_ctrl_to_host_doorbell) != 0x9c);
10089	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10090		sis_ctrl_to_host_doorbell_clear) != 0xa0);
10091	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10092		sis_driver_scratch) != 0xb0);
10093	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10094		sis_product_identifier) != 0xb4);
10095	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10096		sis_firmware_status) != 0xbc);
10097	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10098		sis_ctrl_shutdown_reason_code) != 0xcc);
10099	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10100		sis_mailbox) != 0x1000);
10101	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10102		pqi_registers) != 0x4000);
10103
10104	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10105		iu_type) != 0x0);
10106	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10107		iu_length) != 0x2);
10108	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10109		response_queue_id) != 0x4);
10110	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10111		driver_flags) != 0x6);
10112	BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10113
10114	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10115		status) != 0x0);
10116	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10117		service_response) != 0x1);
10118	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10119		data_present) != 0x2);
10120	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10121		reserved) != 0x3);
10122	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10123		residual_count) != 0x4);
10124	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10125		data_length) != 0x8);
10126	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10127		reserved1) != 0xa);
10128	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10129		data) != 0xc);
10130	BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10131
10132	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10133		data_in_result) != 0x0);
10134	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10135		data_out_result) != 0x1);
10136	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10137		reserved) != 0x2);
10138	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10139		status) != 0x5);
10140	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10141		status_qualifier) != 0x6);
10142	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10143		sense_data_length) != 0x8);
10144	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10145		response_data_length) != 0xa);
10146	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10147		data_in_transferred) != 0xc);
10148	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10149		data_out_transferred) != 0x10);
10150	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10151		data) != 0x14);
10152	BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10153
10154	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10155		signature) != 0x0);
10156	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10157		function_and_status_code) != 0x8);
10158	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10159		max_admin_iq_elements) != 0x10);
10160	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10161		max_admin_oq_elements) != 0x11);
10162	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10163		admin_iq_element_length) != 0x12);
10164	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10165		admin_oq_element_length) != 0x13);
10166	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10167		max_reset_timeout) != 0x14);
10168	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10169		legacy_intx_status) != 0x18);
10170	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10171		legacy_intx_mask_set) != 0x1c);
10172	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10173		legacy_intx_mask_clear) != 0x20);
10174	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10175		device_status) != 0x40);
10176	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10177		admin_iq_pi_offset) != 0x48);
10178	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10179		admin_oq_ci_offset) != 0x50);
10180	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10181		admin_iq_element_array_addr) != 0x58);
10182	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10183		admin_oq_element_array_addr) != 0x60);
10184	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10185		admin_iq_ci_addr) != 0x68);
10186	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10187		admin_oq_pi_addr) != 0x70);
10188	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10189		admin_iq_num_elements) != 0x78);
10190	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10191		admin_oq_num_elements) != 0x79);
10192	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10193		admin_queue_int_msg_num) != 0x7a);
10194	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10195		device_error) != 0x80);
10196	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10197		error_details) != 0x88);
10198	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10199		device_reset) != 0x90);
10200	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10201		power_action) != 0x94);
10202	BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10203
10204	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10205		header.iu_type) != 0);
10206	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10207		header.iu_length) != 2);
10208	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10209		header.driver_flags) != 6);
10210	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10211		request_id) != 8);
10212	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10213		function_code) != 10);
10214	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10215		data.report_device_capability.buffer_length) != 44);
10216	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10217		data.report_device_capability.sg_descriptor) != 48);
10218	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10219		data.create_operational_iq.queue_id) != 12);
10220	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10221		data.create_operational_iq.element_array_addr) != 16);
10222	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10223		data.create_operational_iq.ci_addr) != 24);
10224	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10225		data.create_operational_iq.num_elements) != 32);
10226	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10227		data.create_operational_iq.element_length) != 34);
10228	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10229		data.create_operational_iq.queue_protocol) != 36);
10230	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10231		data.create_operational_oq.queue_id) != 12);
10232	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10233		data.create_operational_oq.element_array_addr) != 16);
10234	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10235		data.create_operational_oq.pi_addr) != 24);
10236	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10237		data.create_operational_oq.num_elements) != 32);
10238	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10239		data.create_operational_oq.element_length) != 34);
10240	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10241		data.create_operational_oq.queue_protocol) != 36);
10242	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10243		data.create_operational_oq.int_msg_num) != 40);
10244	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10245		data.create_operational_oq.coalescing_count) != 42);
10246	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10247		data.create_operational_oq.min_coalescing_time) != 44);
10248	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10249		data.create_operational_oq.max_coalescing_time) != 48);
10250	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10251		data.delete_operational_queue.queue_id) != 12);
10252	BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10253	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10254		data.create_operational_iq) != 64 - 11);
10255	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10256		data.create_operational_oq) != 64 - 11);
10257	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10258		data.delete_operational_queue) != 64 - 11);
10259
10260	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10261		header.iu_type) != 0);
10262	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10263		header.iu_length) != 2);
10264	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10265		header.driver_flags) != 6);
10266	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10267		request_id) != 8);
10268	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10269		function_code) != 10);
10270	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10271		status) != 11);
10272	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10273		data.create_operational_iq.status_descriptor) != 12);
10274	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10275		data.create_operational_iq.iq_pi_offset) != 16);
10276	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10277		data.create_operational_oq.status_descriptor) != 12);
10278	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10279		data.create_operational_oq.oq_ci_offset) != 16);
10280	BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10281
10282	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10283		header.iu_type) != 0);
10284	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10285		header.iu_length) != 2);
10286	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10287		header.response_queue_id) != 4);
10288	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10289		header.driver_flags) != 6);
10290	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10291		request_id) != 8);
10292	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10293		nexus_id) != 10);
10294	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10295		buffer_length) != 12);
10296	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10297		lun_number) != 16);
10298	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10299		protocol_specific) != 24);
10300	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10301		error_index) != 27);
10302	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10303		cdb) != 32);
10304	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10305		timeout) != 60);
10306	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10307		sg_descriptors) != 64);
10308	BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10309		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10310
10311	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10312		header.iu_type) != 0);
10313	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10314		header.iu_length) != 2);
10315	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10316		header.response_queue_id) != 4);
10317	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10318		header.driver_flags) != 6);
10319	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10320		request_id) != 8);
10321	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10322		nexus_id) != 12);
10323	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10324		buffer_length) != 16);
10325	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10326		data_encryption_key_index) != 22);
10327	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10328		encrypt_tweak_lower) != 24);
10329	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10330		encrypt_tweak_upper) != 28);
10331	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10332		cdb) != 32);
10333	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10334		error_index) != 48);
10335	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10336		num_sg_descriptors) != 50);
10337	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10338		cdb_length) != 51);
10339	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10340		lun_number) != 52);
10341	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10342		sg_descriptors) != 64);
10343	BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10344		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10345
10346	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10347		header.iu_type) != 0);
10348	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10349		header.iu_length) != 2);
10350	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10351		request_id) != 8);
10352	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10353		error_index) != 10);
10354
10355	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10356		header.iu_type) != 0);
10357	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10358		header.iu_length) != 2);
10359	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10360		header.response_queue_id) != 4);
10361	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10362		request_id) != 8);
10363	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10364		data.report_event_configuration.buffer_length) != 12);
10365	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10366		data.report_event_configuration.sg_descriptors) != 16);
10367	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10368		data.set_event_configuration.global_event_oq_id) != 10);
10369	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10370		data.set_event_configuration.buffer_length) != 12);
10371	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10372		data.set_event_configuration.sg_descriptors) != 16);
10373
10374	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10375		max_inbound_iu_length) != 6);
10376	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10377		max_outbound_iu_length) != 14);
10378	BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10379
10380	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10381		data_length) != 0);
10382	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10383		iq_arbitration_priority_support_bitmask) != 8);
10384	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10385		maximum_aw_a) != 9);
10386	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10387		maximum_aw_b) != 10);
10388	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10389		maximum_aw_c) != 11);
10390	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10391		max_inbound_queues) != 16);
10392	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10393		max_elements_per_iq) != 18);
10394	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10395		max_iq_element_length) != 24);
10396	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10397		min_iq_element_length) != 26);
10398	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10399		max_outbound_queues) != 30);
10400	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10401		max_elements_per_oq) != 32);
10402	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10403		intr_coalescing_time_granularity) != 34);
10404	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10405		max_oq_element_length) != 36);
10406	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10407		min_oq_element_length) != 38);
10408	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10409		iu_layer_descriptors) != 64);
10410	BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10411
10412	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10413		event_type) != 0);
10414	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10415		oq_id) != 2);
10416	BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10417
10418	BUILD_BUG_ON(offsetof(struct pqi_event_config,
10419		num_event_descriptors) != 2);
10420	BUILD_BUG_ON(offsetof(struct pqi_event_config,
10421		descriptors) != 4);
10422
10423	BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10424		ARRAY_SIZE(pqi_supported_event_types));
10425
10426	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10427		header.iu_type) != 0);
10428	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10429		header.iu_length) != 2);
10430	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10431		event_type) != 8);
10432	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10433		event_id) != 10);
10434	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10435		additional_event_id) != 12);
10436	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10437		data) != 16);
10438	BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10439
10440	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10441		header.iu_type) != 0);
10442	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10443		header.iu_length) != 2);
10444	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10445		event_type) != 8);
10446	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10447		event_id) != 10);
10448	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10449		additional_event_id) != 12);
10450	BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10451
10452	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10453		header.iu_type) != 0);
10454	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10455		header.iu_length) != 2);
10456	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10457		request_id) != 8);
10458	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10459		nexus_id) != 10);
10460	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10461		timeout) != 14);
10462	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10463		lun_number) != 16);
10464	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10465		protocol_specific) != 24);
10466	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10467		outbound_queue_id_to_manage) != 26);
10468	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10469		request_id_to_manage) != 28);
10470	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10471		task_management_function) != 30);
10472	BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10473
10474	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10475		header.iu_type) != 0);
10476	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10477		header.iu_length) != 2);
10478	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10479		request_id) != 8);
10480	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10481		nexus_id) != 10);
10482	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10483		additional_response_info) != 12);
10484	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10485		response_code) != 15);
10486	BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10487
10488	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10489		configured_logical_drive_count) != 0);
10490	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10491		configuration_signature) != 1);
10492	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10493		firmware_version_short) != 5);
10494	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10495		extended_logical_unit_count) != 154);
10496	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10497		firmware_build_number) != 190);
10498	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10499		vendor_id) != 200);
10500	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10501		product_id) != 208);
10502	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10503		extra_controller_flags) != 286);
10504	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10505		controller_mode) != 292);
10506	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10507		spare_part_number) != 293);
10508	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10509		firmware_version_long) != 325);
10510
10511	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10512		phys_bay_in_box) != 115);
10513	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10514		device_type) != 120);
10515	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10516		redundant_path_present_map) != 1736);
10517	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10518		active_path_number) != 1738);
10519	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10520		alternate_paths_phys_connector) != 1739);
10521	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10522		alternate_paths_phys_box_on_port) != 1755);
10523	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10524		current_queue_depth_limit) != 1796);
10525	BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10526
10527	BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10528	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10529		page_code) != 0);
10530	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10531		subpage_code) != 1);
10532	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10533		buffer_length) != 2);
10534
10535	BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10536	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10537		page_code) != 0);
10538	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10539		subpage_code) != 1);
10540	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10541		page_length) != 2);
10542
10543	BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10544		!= 18);
10545	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10546		header) != 0);
10547	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10548		firmware_read_support) != 4);
10549	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10550		driver_read_support) != 5);
10551	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10552		firmware_write_support) != 6);
10553	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10554		driver_write_support) != 7);
10555	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10556		max_transfer_encrypted_sas_sata) != 8);
10557	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10558		max_transfer_encrypted_nvme) != 10);
10559	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10560		max_write_raid_5_6) != 12);
10561	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10562		max_write_raid_1_10_2drive) != 14);
10563	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10564		max_write_raid_1_10_3drive) != 16);
10565
10566	BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10567	BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10568	BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10569		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10570	BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10571		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10572	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10573	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10574		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10575	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10576	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10577		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10578
10579	BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10580	BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10581		PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
10582}