blob: bc3b2aea3f8bfbbabf0f82bdfaba30c8f5a28b9f [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
*/
#include "qla_def.h"
#include <linux/bitfield.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
#include <linux/trace.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include "qla_target.h"
/*
* Driver version
*/
char qla2x00_version_str[40];
static int apidev_major;
/*
* SRB allocation cache
*/
struct kmem_cache *srb_cachep;
static struct trace_array *qla_trc_array;
int ql2xfulldump_on_mpifail;
module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
"Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
"1: track usage per queue, 2: track usage per adapter");
/*
* CT6 CTX allocation cache
*/
static struct kmem_cache *ctx_cachep;
/*
* error level for logging
*/
uint ql_errlev = 0x8001;
int ql2xsecenable;
module_param(ql2xsecenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xsecenable,
"Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
"beginning. Default is 0 - class 2 not supported.");
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
"Login timeout value in seconds.");
int qlport_down_retry;
module_param(qlport_down_retry, int, S_IRUGO);
MODULE_PARM_DESC(qlport_down_retry,
"Maximum number of command retries to a port that returns "
"a PORT-DOWN status.");
int ql2xplogiabsentdevice;
module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xplogiabsentdevice,
"Option to enable PLOGI to devices that are not present after "
"a Fabric scan. This is needed for several broken switches. "
"Default is 0 - no PLOGI. 1 - perform PLOGI.");
int ql2xloginretrycount;
module_param(ql2xloginretrycount, int, S_IRUGO);
MODULE_PARM_DESC(ql2xloginretrycount,
"Specify an alternate value for the NVRAM login retry count.");
int ql2xallocfwdump = 1;
module_param(ql2xallocfwdump, int, S_IRUGO);
MODULE_PARM_DESC(ql2xallocfwdump,
"Option to enable allocation of memory for a firmware dump "
"during HBA initialization. Memory allocation requirements "
"vary by ISP type. Default is 1 - allocate memory.");
int ql2xextended_error_logging;
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging,
"Option to enable extended error logging,\n"
"\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
"\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
"\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
"\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
"\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
"\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
"\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
"\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
"\t\t0x1e400000 - Preferred value for capturing essential "
"debug information (equivalent to old "
"ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xextended_error_logging_ktrace = 1;
module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
"Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
MODULE_PARM_DESC(ql2xshiftctondsd,
"Set to control shifting of command type processing "
"based on total number of SG elements.");
int ql2xfdmienable = 1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI registrations. "
"1 - provide FDMI registrations (default).");
#define MAX_Q_DEPTH 64
static int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 64.");
int ql2xenabledif = 2;
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
" Default is 2.\n"
" 0 -- No DIF Support\n"
" 1 -- Enable DIF for all types\n"
" 2 -- Enable DIF for all types, except Type 0.\n");
#if (IS_ENABLED(CONFIG_NVME_FC))
int ql2xnvmeenable = 1;
#else
int ql2xnvmeenable;
#endif
module_param(ql2xnvmeenable, int, 0644);
MODULE_PARM_DESC(ql2xnvmeenable,
"Enables NVME support. "
"0 - no NVMe. Default is Y");
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
" Enable T10-CRC-DIF Error isolation by HBA:\n"
" Default is 2.\n"
" 0 -- Error isolation disabled\n"
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
int ql2xiidmaenable = 1;
module_param(ql2xiidmaenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
int ql2xmqsupport = 1;
module_param(ql2xmqsupport, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmqsupport,
"Enable on demand multiple queue pairs support "
"Default is 1 for supported. "
"Set it to 0 to turn off mq qpair support.");
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfwloadbin,
"Option to specify location from which to load ISP firmware:.\n"
" 2 -- load firmware via the request_firmware() (hotplug).\n"
" interface.\n"
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
int ql2xetsenable;
module_param(ql2xetsenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xetsenable,
"Enables firmware ETS burst."
"Default is 0 - skip ETS enablement.");
int ql2xdbwr = 1;
module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdbwr,
"Option to specify scheme for request queue posting.\n"
" 0 -- Regular doorbell.\n"
" 1 -- CAMRAM doorbell (faster).\n");
int ql2xgffidenable;
module_param(ql2xgffidenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xgffidenable,
"Enables GFF_ID checks of port type. "
"Default is 0 - Do not use GFF_ID information.");
int ql2xasynctmfenable = 1;
module_param(ql2xasynctmfenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xasynctmfenable,
"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
"Default is 1 - Issue TM IOCBs via mailbox mechanism.");
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdontresethba,
"Option to specify reset behaviour.\n"
" 0 (Default) -- Reset on failure.\n"
" 1 -- Do not reset on failure.\n");
uint64_t ql2xmaxlun = MAX_LUNS;
module_param(ql2xmaxlun, ullong, S_IRUGO);
MODULE_PARM_DESC(ql2xmaxlun,
"Defines the maximum LU number to register with the SCSI "
"midlayer. Default is 65535.");
int ql2xmdcapmask = 0x1F;
module_param(ql2xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
"Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
"0 - MiniDump disabled. "
"1 (Default) - MiniDump enabled.");
int ql2xexlogins;
module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xexlogins,
"Number of extended Logins. "
"0 (Default)- Disabled.");
int ql2xexchoffld = 1024;
module_param(ql2xexchoffld, uint, 0644);
MODULE_PARM_DESC(ql2xexchoffld,
"Number of target exchanges.");
int ql2xiniexchg = 1024;
module_param(ql2xiniexchg, uint, 0644);
MODULE_PARM_DESC(ql2xiniexchg,
"Number of initiator exchanges.");
int ql2xfwholdabts;
module_param(ql2xfwholdabts, int, S_IRUGO);
MODULE_PARM_DESC(ql2xfwholdabts,
"Allow FW to hold status IOCB until ABTS rsp received. "
"0 (Default) Do not set fw option. "
"1 - Set fw option to hold ABTS.");
int ql2xmvasynctoatio = 1;
module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmvasynctoatio,
"Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
"0 (Default). Do not move IOCBs"
"1 - Move IOCBs.");
int ql2xautodetectsfp = 1;
module_param(ql2xautodetectsfp, int, 0444);
MODULE_PARM_DESC(ql2xautodetectsfp,
"Detect SFP range and set appropriate distance.\n"
"1 (Default): Enable\n");
int ql2xenablemsix = 1;
module_param(ql2xenablemsix, int, 0444);
MODULE_PARM_DESC(ql2xenablemsix,
"Set to enable MSI or MSI-X interrupt mechanism.\n"
" Default is 1, enable MSI-X interrupt mechanism.\n"
" 0 -- enable traditional pin-based mechanism.\n"
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
int qla2xuseresexchforels;
module_param(qla2xuseresexchforels, int, 0444);
MODULE_PARM_DESC(qla2xuseresexchforels,
"Reserve 1/2 of emergency exchanges for ELS.\n"
" 0 (default): disabled");
static int ql2xprotmask;
module_param(ql2xprotmask, int, 0644);
MODULE_PARM_DESC(ql2xprotmask,
"Override DIF/DIX protection capabilities mask\n"
"Default is 0 which sets protection mask based on "
"capabilities reported by HBA firmware.\n");
static int ql2xprotguard;
module_param(ql2xprotguard, int, 0644);
MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
" 0 -- Let HBA firmware decide\n"
" 1 -- Force T10 CRC\n"
" 2 -- Force IP checksum\n");
int ql2xdifbundlinginternalbuffers;
module_param(ql2xdifbundlinginternalbuffers, int, 0644);
MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
"Force using internal buffers for DIF information\n"
"0 (Default). Based on check.\n"
"1 Force using internal buffers\n");
int ql2xsmartsan;
module_param(ql2xsmartsan, int, 0444);
module_param_named(smartsan, ql2xsmartsan, int, 0444);
MODULE_PARM_DESC(ql2xsmartsan,
"Send SmartSAN Management Attributes for FDMI Registration."
" Default is 0 - No SmartSAN registration,"
" 1 - Register SmartSAN Management Attributes.");
int ql2xrdpenable;
module_param(ql2xrdpenable, int, 0444);
module_param_named(rdpenable, ql2xrdpenable, int, 0444);
MODULE_PARM_DESC(ql2xrdpenable,
"Enables RDP responses. "
"0 - no RDP responses (default). "
"1 - provide RDP responses.");
int ql2xabts_wait_nvme = 1;
module_param(ql2xabts_wait_nvme, int, 0444);
MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
static u32 ql2xdelay_before_pci_error_handling = 5;
module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
"Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
module_param(ql2xnvme_queues, uint, S_IRUGO);
MODULE_PARM_DESC(ql2xnvme_queues,
"Number of NVMe Queues that can be configured.\n"
"Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
"1 - Minimum number of queues supported\n"
"8 - Default value");
int ql2xfc2target = 1;
module_param(ql2xfc2target, int, 0444);
MODULE_PARM_DESC(qla2xfc2target,
"Enables FC2 Target support. "
"0 - FC2 Target support is disabled. "
"1 - FC2 Target support is enabled (default).");
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
/* TODO Convert to inlines
*
* Timer routines
*/
__inline__ void
qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
{
timer_setup(&vha->timer, qla2x00_timer, 0);
vha->timer.expires = jiffies + interval * HZ;
add_timer(&vha->timer);
vha->timer_active = 1;
}
static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
/* Currently used for 82XX only. */
if (vha->device_flags & DFLG_DEV_FAILED) {
ql_dbg(ql_dbg_timer, vha, 0x600d,
"Device in a failed state, returning.\n");
return;
}
mod_timer(&vha->timer, jiffies + interval * HZ);
}
static __inline__ void
qla2x00_stop_timer(scsi_qla_host_t *vha)
{
del_timer_sync(&vha->timer);
vha->timer_active = 0;
}
static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
rsp->qpair = ha->base_qpair;
rsp->req = req;
ha->base_qpair->hw = ha;
ha->base_qpair->req = req;
ha->base_qpair->rsp = rsp;
ha->base_qpair->vha = vha;
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
ha->base_qpair->pdev = ha->pdev;
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
}
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
GFP_KERNEL);
if (!ha->req_q_map) {
ql_log(ql_log_fatal, vha, 0x003b,
"Unable to allocate memory for request queue ptrs.\n");
goto fail_req_map;
}
ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
GFP_KERNEL);
if (!ha->rsp_q_map) {
ql_log(ql_log_fatal, vha, 0x003c,
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
if (ha->base_qpair == NULL) {
ql_log(ql_log_warn, vha, 0x00e0,
"Failed to allocate base queue pair memory.\n");
goto fail_base_qpair;
}
qla_init_base_qpair(vha, req, rsp);
if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL);
if (!ha->queue_pair_map) {
ql_log(ql_log_fatal, vha, 0x0180,
"Unable to allocate memory for queue pair ptrs.\n");
goto fail_qpair_map;
}
if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
kfree(ha->queue_pair_map);
ha->queue_pair_map = NULL;
goto fail_qpair_map;
}
}
/*
* Make sure we record at least the request and response queue zero in
* case we need to free them if part of the probe fails.
*/
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map);
return 0;
fail_qpair_map:
kfree(ha->base_qpair);
ha->base_qpair = NULL;
fail_base_qpair:
kfree(ha->rsp_q_map);
ha->rsp_q_map = NULL;
fail_rsp_map:
kfree(ha->req_q_map);
ha->req_q_map = NULL;
fail_req_map:
return -ENOMEM;
}
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
if (IS_QLAFX00(ha)) {
if (req && req->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
(req->length_fx00 + 1) * sizeof(request_t),
req->ring_fx00, req->dma_fx00);
} else if (req && req->ring)
dma_free_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
if (req)
kfree(req->outstanding_cmds);
kfree(req);
}
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
if (IS_QLAFX00(ha)) {
if (rsp && rsp->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
(rsp->length_fx00 + 1) * sizeof(request_t),
rsp->ring_fx00, rsp->dma_fx00);
} else if (rsp && rsp->ring) {
dma_free_coherent(&ha->pdev->dev,
(rsp->length + 1) * sizeof(response_t),
rsp->ring, rsp->dma);
}
kfree(rsp);
}
static void qla2x00_free_queues(struct qla_hw_data *ha)
{
struct req_que *req;
struct rsp_que *rsp;
int cnt;
unsigned long flags;
if (ha->queue_pair_map) {
kfree(ha->queue_pair_map);
ha->queue_pair_map = NULL;
}
if (ha->base_qpair) {
kfree(ha->base_qpair);
ha->base_qpair = NULL;
}
qla_mapq_free_qp_cpu_map(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
req = ha->req_q_map[cnt];
clear_bit(cnt, ha->req_qid_map);
ha->req_q_map[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_req_que(ha, req);
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(ha->req_q_map);
ha->req_q_map = NULL;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
if (!test_bit(cnt, ha->rsp_qid_map))
continue;
rsp = ha->rsp_q_map[cnt];
clear_bit(cnt, ha->rsp_qid_map);
ha->rsp_q_map[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(ha->rsp_q_map);
ha->rsp_q_map = NULL;
}
static char *
qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
{
struct qla_hw_data *ha = vha->hw;
static const char *const pci_bus_modes[] = {
"33", "66", "100", "133",
};
uint16_t pci_bus;
pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
if (pci_bus) {
snprintf(str, str_len, "PCI-X (%s MHz)",
pci_bus_modes[pci_bus]);
} else {
pci_bus = (ha->pci_attr & BIT_8) >> 8;
snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
}
return str;
}
static char *
qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
{
static const char *const pci_bus_modes[] = {
"33", "66", "100", "133",
};
struct qla_hw_data *ha = vha->hw;
uint32_t pci_bus;
if (pci_is_pcie(ha->pdev)) {
uint32_t lstat, lspeed, lwidth;
const char *speed_str;
pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
lspeed = FIELD_GET(PCI_EXP_LNKCAP_SLS, lstat);
lwidth = FIELD_GET(PCI_EXP_LNKCAP_MLW, lstat);
switch (lspeed) {
case 1:
speed_str = "2.5GT/s";
break;
case 2:
speed_str = "5.0GT/s";
break;
case 3:
speed_str = "8.0GT/s";
break;
case 4:
speed_str = "16.0GT/s";
break;
default:
speed_str = "<unknown>";
break;
}
snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
return str;
}
pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
if (pci_bus == 0 || pci_bus == 8)
snprintf(str, str_len, "PCI (%s MHz)",
pci_bus_modes[pci_bus >> 3]);
else
snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
pci_bus & 4 ? 2 : 1,
pci_bus_modes[pci_bus & 3]);
return str;
}
static char *
qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
char un_str[10];
struct qla_hw_data *ha = vha->hw;
snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
ha->fw_minor_version, ha->fw_subminor_version);
if (ha->fw_attributes & BIT_9) {
strcat(str, "FLX");
return (str);
}
switch (ha->fw_attributes & 0xFF) {
case 0x7:
strcat(str, "EF");
break;
case 0x17:
strcat(str, "TP");
break;
case 0x37:
strcat(str, "IP");
break;
case 0x77:
strcat(str, "VI");
break;
default:
sprintf(un_str, "(%x)", ha->fw_attributes);
strcat(str, un_str);
break;
}
if (ha->fw_attributes & 0x100)
strcat(str, "X");
return (str);
}
static char *
qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
struct qla_hw_data *ha = vha->hw;
snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
return str;
}
void qla2x00_sp_free_dma(srb_t *sp)
{
struct qla_hw_data *ha = sp->vha->hw;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
scsi_prot_sg_count(cmd), cmd->sc_data_direction);
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
}
if (sp->flags & SRB_GOT_BUF)
qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
}
void qla2x00_sp_compl(srb_t *sp, int res)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
/* kref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
sp->type = 0;
scsi_done(cmd);
if (comp)
complete(comp);
}
void qla2xxx_qpair_sp_free_dma(srb_t *sp)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct qla_hw_data *ha = sp->fcport->vha->hw;
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
scsi_prot_sg_count(cmd), cmd->sc_data_direction);
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
struct crc_context *difctx = sp->u.scmd.crc_ctx;
struct dsd_dma *dif_dsd, *nxt_dsd;
list_for_each_entry_safe(dif_dsd, nxt_dsd,
&difctx->ldif_dma_hndl_list, list) {
list_del(&dif_dsd->list);
dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
dif_dsd->dsd_list_dma);
kfree(dif_dsd);
difctx->no_dif_bundl--;
}
list_for_each_entry_safe(dif_dsd, nxt_dsd,
&difctx->ldif_dsd_list, list) {
list_del(&dif_dsd->list);
dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
dif_dsd->dsd_list_dma);
kfree(dif_dsd);
difctx->no_ldif_dsd--;
}
if (difctx->no_ldif_dsd) {
ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
"%s: difctx->no_ldif_dsd=%x\n",
__func__, difctx->no_ldif_dsd);
}
if (difctx->no_dif_bundl) {
ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
"%s: difctx->no_dif_bundl=%x\n",
__func__, difctx->no_dif_bundl);
}
sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_GOT_BUF)
qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
}
void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
sp->type = 0;
scsi_done(cmd);
if (comp)
complete(comp);
}
static int
qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
WARN_ON_ONCE(!rport)) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
if (ha->mqenable) {
uint32_t tag;
uint16_t hwq;
struct qla_qpair *qpair = NULL;
tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq];
if (qpair)
return qla2xxx_mqueuecommand(host, cmd, qpair);
}
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
"PCI Channel IO permanent failure, exiting "
"cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
} else {
ql_dbg(ql_dbg_aer, vha, 0x9011,
"EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
}
goto qc24_fail_command;
}
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
cmd, rval);
goto qc24_fail_command;
}
if (!vha->flags.difdix_supported &&
scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
ql_dbg(ql_dbg_io, vha, 0x3004,
"DIF Cap not reg, fail DIF capable cmd's:%p.\n",
cmd);
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
if (!fcport || fcport->deleted) {
cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
ql_dbg(ql_dbg_io, vha, 0x3005,
"Returning DNC, fcport_state=%d loop_state=%d.\n",
atomic_read(&fcport->state),
atomic_read(&base_vha->loop_state));
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
goto qc24_target_busy;
}
/*
* Return target busy if we've received a non-zero retry_delay_timer
* in a FCP_RSP.
*/
if (fcport->retry_delay_timestamp == 0) {
/* retry delay not set */
} else if (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0;
else
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
/* ref: INIT */
qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
goto qc24_host_busy_free_sp;
}
return 0;
qc24_host_busy_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
scsi_done(cmd);
return 0;
}
/* For MQ supported I/O */
int
qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
struct qla_qpair *qpair)
{
scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
if (rval) {
cmd->result = rval;
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
cmd, rval);
goto qc24_fail_command;
}
if (!qpair->online) {
ql_dbg(ql_dbg_io, vha, 0x3077,
"qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
if (!fcport || fcport->deleted) {
cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
ql_dbg(ql_dbg_io, vha, 0x3077,
"Returning DNC, fcport_state=%d loop_state=%d.\n",
atomic_read(&fcport->state),
atomic_read(&base_vha->loop_state));
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
goto qc24_target_busy;
}
/*
* Return target busy if we've received a non-zero retry_delay_timer
* in a FCP_RSP.
*/
if (fcport->retry_delay_timestamp == 0) {
/* retry delay not set */
} else if (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0;
else
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
/* ref: INIT */
qla2xxx_init_sp(sp, vha, qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
rval = ha->isp_ops->start_scsi_mq(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
goto qc24_host_busy_free_sp;
}
return 0;
qc24_host_busy_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
scsi_done(cmd);
return 0;
}
/*
* qla2x00_wait_for_hba_online
* Wait till the HBA is online after going through
* <= MAX_RETRIES_OF_ISP_ABORT or
* finally HBA is disabled ie marked offline
*
* Input:
* ha - pointer to host adapter structure
*
* Note:
* Does context switching-Release SPIN_LOCK
* (if any) before calling this routine.
*
* Return:
* Success (Adapter is online) : 0
* Failed (Adapter is offline/disabled) : 1
*/
int
qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
{
int return_status;
unsigned long wait_online;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
ha->dpc_active) && time_before(jiffies, wait_online)) {
msleep(1000);
}
if (base_vha->flags.online)
return_status = QLA_SUCCESS;
else
return_status = QLA_FUNCTION_FAILED;
return (return_status);
}
static inline int test_fcport_count(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
int res;
/* Return 0 = sleep, x=wake */
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_init, vha, 0x00ec,
"tgt %p, fcport_count=%d\n",
vha, vha->fcport_count);
res = (vha->fcport_count == 0);
if (res) {
struct fc_port *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->deleted != QLA_SESS_DELETED) {
/* session(s) may not be fully logged in
* (ie fcport_count=0), but session
* deletion thread(s) may be inflight.
*/
res = 0;
break;
}
}
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
}
/*
* qla2x00_wait_for_sess_deletion can only be called from remove_one.
* it has dependency on UNLOADING flag to stop device discovery
*/
void
qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
u8 i;
qla2x00_mark_all_devices_lost(vha);
for (i = 0; i < 10; i++) {
if (wait_event_timeout(vha->fcport_waitQ,
test_fcport_count(vha), HZ) > 0)
break;
}
flush_workqueue(vha->hw->wq);
}
/*
* qla2x00_wait_for_hba_ready
* Wait till the HBA is ready before doing driver unload
*
* Input:
* ha - pointer to host adapter structure
*
* Note:
* Does context switching-Release SPIN_LOCK
* (if any) before calling this routine.
*
*/
static void
qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
while ((qla2x00_reset_active(vha) || ha->dpc_active ||
ha->flags.mbox_busy) ||
test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
if (test_bit(UNLOADING, &base_vha->dpc_flags))
break;
msleep(1000);
}
}
int
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
{
int return_status;
unsigned long wait_reset;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
ha->dpc_active) && time_before(jiffies, wait_reset)) {
msleep(1000);
if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
ha->flags.chip_reset_done)
break;
}
if (ha->flags.chip_reset_done)
return_status = QLA_SUCCESS;
else
return_status = QLA_FUNCTION_FAILED;
return return_status;
}
/**************************************************************************
* qla2xxx_eh_abort
*
* Description:
* The abort function will abort the specified command.
*
* Input:
* cmd = Linux SCSI command packet to be aborted.
*
* Returns:
* Either SUCCESS or FAILED.
*
* Note:
* Only return FAILED if command not returned by firmware.
**************************************************************************/
static int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
DECLARE_COMPLETION_ONSTACK(comp);
srb_t *sp;
int ret;
unsigned int id;
uint64_t lun;
int rval;
struct qla_hw_data *ha = vha->hw;
uint32_t ratov_j;
struct qla_qpair *qpair;
unsigned long flags;
int fast_fail_status = SUCCESS;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
"PCI/Register disconnect, exiting.\n");
qla_pci_set_eeh_busy(vha);
return FAILED;
}
/* Save any FAST_IO_FAIL value to return later if abort succeeds */
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
fast_fail_status = ret;
sp = scsi_cmd_priv(cmd);
qpair = sp->qpair;
vha->cmd_timeout_cnt++;
if ((sp->fcport && sp->fcport->deleted) || !qpair)
return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
sp->comp = &comp;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
id = cmd->device->id;
lun = cmd->device->lun;
ql_dbg(ql_dbg_taskm, vha, 0x8002,
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
/*
* Abort will release the original Command/sp from FW. Let the
* original command call scsi_done. In return, he will wakeup
* this sleeping thread.
*/
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_taskm, vha, 0x8003,
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
/* Wait for the command completion. */
ratov_j = ha->r_a_tov/10 * 4 * 1000;
ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_dbg(ql_dbg_taskm, vha, 0xffff,
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
__func__, ha->r_a_tov/10);
ret = FAILED;
} else {
ret = fast_fail_status;
}
break;
default:
ret = FAILED;
break;
}
sp->comp = NULL;
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
return ret;
}
#define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
static int
__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
scsi_qla_host_t *vha = qpair->vha;
struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
unsigned long wait_iter = ABORT_WAIT_ITER;
bool found;
struct qla_hw_data *ha = vha->hw;
status = QLA_SUCCESS;
while (wait_iter--) {
found = false;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
if (sp->type != SRB_SCSI_CMD)
continue;
if (vha->vp_idx != sp->vha->vp_idx)
continue;
match = 0;
cmd = GET_CMD_SP(sp);
switch (type) {
case WAIT_HOST:
match = 1;
break;
case WAIT_TARGET:
if (sp->fcport)
match = sp->fcport->d_id.b24 == t;
else
match = 0;
break;
case WAIT_LUN:
if (sp->fcport)
match = (sp->fcport->d_id.b24 == t &&
cmd->device->lun == l);
else
match = 0;
break;
}
if (!match)
continue;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (unlikely(pci_channel_offline(ha->pdev)) ||
ha->flags.eeh_busy) {
ql_dbg(ql_dbg_taskm, vha, 0x8005,
"Return:eh_wait.\n");
return status;
}
/*
* SRB_SCSI_CMD is still in the outstanding_cmds array.
* it means scsi_done has not called. Wait for it to
* clear from outstanding_cmds.
*/
msleep(ABORT_POLLING_PERIOD);
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
found = true;
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (!found)
break;
}
if (wait_iter == -1)
status = QLA_FUNCTION_FAILED;
return status;
}
int
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
uint64_t l, enum nexus_wait_type type)
{
struct qla_qpair *qpair;
struct qla_hw_data *ha = vha->hw;
int i, status = QLA_SUCCESS;
status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
type);
for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
qpair = ha->queue_pair_map[i];
if (!qpair)
continue;
status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
type);
}
return status;
}
static char *reset_errors[] = {
"HBA not online",
"HBA not ready",
"Task management failed",
"Waiting for command completions",
};
static int
qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
struct qla_hw_data *ha = vha->hw;
int err;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x803e,
"PCI/Register disconnect, exiting.\n");
qla_pci_set_eeh_busy(vha);
return FAILED;
}
if (!fcport) {
return FAILED;
}
err = fc_block_rport(rport);
if (err != 0)
return err;
if (fcport->deleted)
return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
sdev->id, sdev->lun, cmd);
err = 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800a,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
err = 2;
if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
err = 3;
if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
cmd->device->lun,
WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
ql_log(ql_log_info, vha, 0x800e,
"DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
vha->host_no, sdev->id, sdev->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
"DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
reset_errors[err], vha->host_no, sdev->id, sdev->lun,
cmd);
vha->reset_cmd_err_cnt++;
return FAILED;
}
static int
qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
int err;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x803f,
"PCI/Register disconnect, exiting.\n");
qla_pci_set_eeh_busy(vha);
return FAILED;
}
if (!fcport) {
return FAILED;
}
err = fc_block_rport(rport);
if (err != 0)
return err;
if (fcport->deleted)
return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
sdev->id, cmd);
err = 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800a,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
err = 2;
if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"target_reset failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
err = 3;
if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
WAIT_TARGET) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
ql_log(ql_log_info, vha, 0x800e,
"TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
vha->host_no, sdev->id, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
"TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
cmd);
vha->reset_cmd_err_cnt++;
return FAILED;
}
/**************************************************************************
* qla2xxx_eh_bus_reset
*
* Description:
* The bus reset function will reset the bus and abort any executing
* commands.
*
* Input:
* cmd = Linux SCSI command packet of the command that cause the
* bus reset.
*
* Returns:
* SUCCESS/FAILURE (defined as macro in scsi.h).
*
**************************************************************************/
static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
int ret = FAILED;
unsigned int id;
uint64_t lun;
struct qla_hw_data *ha = vha->hw;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8040,
"PCI/Register disconnect, exiting.\n");
qla_pci_set_eeh_busy(vha);
return FAILED;
}
id = cmd->device->id;
lun = cmd->device->lun;
if (qla2x00_chip_is_down(vha))
return ret;
ql_log(ql_log_info, vha, 0x8012,
"BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
"Wait for hba online failed board disabled.\n");
goto eh_bus_reset_done;
}
if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
ret = SUCCESS;
if (ret == FAILED)
goto eh_bus_reset_done;
/* Flush outstanding commands. */
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x8014,
"Wait for pending commands failed.\n");
ret = FAILED;
}
eh_bus_reset_done:
ql_log(ql_log_warn, vha, 0x802b,
"BUS RESET %s nexus=%ld:%d:%llu.\n",
(ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
}
/**************************************************************************
* qla2xxx_eh_host_reset
*
* Description:
* The reset function will reset the Adapter.
*
* Input:
* cmd = Linux SCSI command packet of the command that cause the
* adapter reset.
*
* Returns:
* Either SUCCESS or FAILED.
*
* Note:
**************************************************************************/
static int
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
int ret = FAILED;
unsigned int id;
uint64_t lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8041,
"PCI/Register disconnect, exiting.\n");
qla_pci_set_eeh_busy(vha);
return SUCCESS;
}
id = cmd->device->id;
lun = cmd->device->lun;
ql_log(ql_log_info, vha, 0x8018,
"ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
/*
* No point in issuing another reset if one is active. Also do not
* attempt a reset if we are updating flash.
*/
if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
goto eh_host_reset_lock;
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
if (IS_P3P_TYPE(vha->hw)) {
if (!qla82xx_fcoe_ctx_reset(vha)) {
/* Ctx reset success */
ret = SUCCESS;
goto eh_host_reset_lock;
}
/* fall thru if ctx reset failed */
}
if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (ha->isp_ops->abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x802a,
"wait for hba online failed.\n");
goto eh_host_reset_lock;
}
}
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
/* Waiting for command to be returned to OS.*/
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
QLA_SUCCESS)
ret = SUCCESS;
eh_host_reset_lock:
ql_log(ql_log_info, vha, 0x8017,
"ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
(ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
}
/*
* qla2x00_loop_reset
* Issue loop reset.
*
* Input:
* ha = adapter block pointer.
*
* Returns:
* 0 = success
*/
int
qla2x00_loop_reset(scsi_qla_host_t *vha)
{
int ret;
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(ha))
return QLA_SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
"full_login_lip=%d.\n", ret);
}
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
}
/* Issue marker command only when we are going to start the I/O */
vha->marker_needed = 1;
return QLA_SUCCESS;
}
/*
* The caller must ensure that no completion interrupts will happen
* while this function is in progress.
*/
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
unsigned long *flags)
__releases(qp->qp_lock_ptr)
__acquires(qp->qp_lock_ptr)
{
DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
int rval;
bool ret_cmd;
uint32_t ratov_j;
lockdep_assert_held(qp->qp_lock_ptr);
if (qla2x00_chip_is_down(vha)) {
sp->done(sp, res);
return;
}
if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
(sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!qla2x00_isp_reg_stat(ha))) {
if (sp->comp) {
sp->done(sp, res);
return;
}
sp->comp = &comp;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
rval = ha->isp_ops->abort_command(sp);
/* Wait for command completion. */
ret_cmd = false;
ratov_j = ha->r_a_tov/10 * 4 * 1000;
ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (wait_for_completion_timeout(&comp, ratov_j)) {
ql_dbg(ql_dbg_taskm, vha, 0xffff,
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
__func__, ha->r_a_tov/10);
ret_cmd = true;
}
/* else FW return SP to driver */
break;
default:
ret_cmd = true;
break;
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
switch (sp->type) {
case SRB_SCSI_CMD:
if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
sp->done(sp, res);
break;
default:
if (ret_cmd)
sp->done(sp, res);
break;
}
} else {
sp->done(sp, res);
}
}
/*
* The caller must ensure that no completion interrupts will happen
* while this function is in progress.
*/
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
int cnt;
unsigned long flags;
srb_t *sp;
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
if (!ha->req_q_map)
return;
spin_lock_irqsave(qp->qp_lock_ptr, flags);
req = qp->req;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (qla2x00_chip_is_down(vha)) {
req->outstanding_cmds[cnt] = NULL;
sp->done(sp, res);
continue;
}
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
break;
case TYPE_TGT_CMD:
if (!vha->hw->tgt.tgt_ops || !tgt ||
qla_ini_mode_enabled(vha)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
"HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
vha->dpc_flags);
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
cmd->aborted = 1;
break;
case TYPE_TGT_TMCMD:
/* Skip task management functions. */
break;
default:
break;
}
req->outstanding_cmds[cnt] = NULL;
}
}
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
}
/*
* The caller must ensure that no completion interrupts will happen
* while this function is in progress.
*/
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
int que;
struct qla_hw_data *ha = vha->hw;
/* Continue only if initialization complete. */
if (!ha->base_qpair)
return;
__qla2x00_abort_all_cmds(ha->base_qpair, res);
if (!ha->queue_pair_map)
return;
for (que = 0; que < ha->max_qpairs; que++) {
if (!ha->queue_pair_map[que])
continue;
__qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
}
}
static int
qla2xxx_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
sdev->hostdata = *(fc_port_t **)rport->dd_data;
return 0;
}
static int
qla2xxx_slave_configure(struct scsi_device *sdev)
{
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req;
scsi_change_queue_depth(sdev, req->max_q_depth);
return 0;
}
static void
qla2xxx_slave_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
*
* At exit, the @ha's flags.enable_64bit_addressing set to indicated
* supported addressing method.
*/
static void
qla2x00_config_dma_addressing(struct qla_hw_data *ha)
{
/* Assume a 32bit DMA mask. */
ha->flags.enable_64bit_addressing = 0;
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Any upper-dword bits set? */
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
!dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
return;
}
}
dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
}
static void
qla2x00_enable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla2x00_disable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
wrt_reg_word(&reg->ictrl, 0);
rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla24xx_enable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla24xx_disable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
if (IS_NOPOLLING_TYPE(ha))
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
wrt_reg_dword(&reg->ictrl, 0);
rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static int
qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
uint16_t msix;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
"Failed to reserve PIO/MMIO regions (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
if (!(ha->bars & 1))
goto skip_pio;
/* We only need PIO for Flash operations on ISP2312 v2 chips. */
pio = pci_resource_start(ha->pdev, 0);
if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
"Invalid pci I/O region size (%s).\n",
pci_name(ha->pdev));
pio = 0;
}
} else {
ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
"Region #0 no a PIO resource (%s).\n",
pci_name(ha->pdev));
pio = 0;
}
ha->pio_address = pio;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
"PIO address=%llu.\n",
(unsigned long long)ha->pio_address);
skip_pio:
/* Use MMIO operations for all accesses. */
if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
"Region #1 not an MMIO resource (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
"Invalid PCI mem region size (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
if (!ha->iobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
"Cannot remap MMIO (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
/* Check if FW supports MQ or not */
if (!(ha->fw_attributes & BIT_6))
goto mqiobase_exit;
if (!ql2xmqsupport || !ql2xnvmeenable ||
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
pci_resource_len(ha->pdev, 3));
if (ha->mqiobase) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
"MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
ha->msix_count = msix + 1;
/* Max queues are bounded by available msix vectors */
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
ha->max_rsp_queues = ha->max_req_queues;
/* Queue pairs is the max value minus the base queue pair */
ha->max_qpairs = ha->max_rsp_queues - 1;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
"Max no of queues pairs: %d.\n", ha->max_qpairs);
ql_log_pci(ql_log_info, ha->pdev, 0x001a,
"MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x001b,
"BAR 3 not enabled.\n");
mqiobase_exit:
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
"MSIX Count: %d.\n", ha->msix_count);
return (0);
iospace_error_exit:
return (-ENOMEM);
}
static int
qla83xx_iospace_config(struct qla_hw_data *ha)
{
uint16_t msix;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
"Failed to reserve PIO/MMIO regions (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
/* Use MMIO operations for all accesses. */
if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
"Invalid pci I/O region size (%s).\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
"Invalid PCI mem region size (%s), aborting\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
if (!ha->iobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
"Cannot remap MMIO (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
/* 64bit PCI BAR - BAR2 will correspoond to region 4 */
/* 83XX 26XX always use MQ type access for queues
* - mbar 2, a.k.a region 4 */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
pci_resource_len(ha->pdev, 4));
if (!ha->mqiobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
"BAR2/region4 not enabled\n");
goto mqiobase_exit;
}
ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
pci_resource_len(ha->pdev, 2));
if (ha->msixbase) {
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
/*
* By default, driver uses at least two msix vectors
* (default & rspq)
*/
if (ql2xmqsupport || ql2xnvmeenable) {
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
/* ATIOQ needs 1 vector. That's 1 less QPair */
if (QLA_TGT_MODE_ENABLED())
ha->max_req_queues--;
ha->max_rsp_queues = ha->max_req_queues;
/* Queue pairs is the max value minus
* the base queue pair */
ha->max_qpairs = ha->max_req_queues - 1;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
"Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
"MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x011e,
"BAR 1 not enabled.\n");
mqiobase_exit:
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
"MSIX Count: %d.\n", ha->msix_count);
return 0;
iospace_error_exit:
return -ENOMEM;
}
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
.chip_diag = qla2x00_chip_diag,
.config_rings = qla2x00_config_rings,
.reset_adapter = qla2x00_reset_adapter,
.nvram_config = qla2x00_nvram_config,
.update_fw_options = qla2x00_update_fw_options,
.load_risc = qla2x00_load_risc,
.pci_info_str = qla2x00_pci_info_str,
.fw_version_str = qla2x00_fw_version_str,
.intr_handler = qla2100_intr_handler,
.enable_intrs = qla2x00_enable_intrs,
.disable_intrs = qla2x00_disable_intrs,
.abort_command = qla2x00_abort_command,
.target_reset = qla2x00_abort_target,
.lun_reset = qla2x00_lun_reset,
.fabric_login = qla2x00_login_fabric,
.fabric_logout = qla2x00_fabric_logout,
.calc_req_entries = qla2x00_calc_iocbs_32,
.build_iocbs = qla2x00_build_scsi_iocbs_32,
.prep_ms_iocb = qla2x00_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
.read_nvram = qla2x00_read_nvram_data,
.write_nvram = qla2x00_write_nvram_data,
.fw_dump = qla2100_fw_dump,
.beacon_on = NULL,
.beacon_off = NULL,
.beacon_blink = NULL,
.read_optrom = qla2x00_read_optrom_data,
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla2300_isp_ops = {
.pci_config = qla2300_pci_config,
.reset_chip = qla2x00_reset_chip,
.chip_diag = qla2x00_chip_diag,
.config_rings = qla2x00_config_rings,
.reset_adapter = qla2x00_reset_adapter,
.nvram_config = qla2x00_nvram_config,
.update_fw_options = qla2x00_update_fw_options,
.load_risc = qla2x00_load_risc,
.pci_info_str = qla2x00_pci_info_str,
.fw_version_str = qla2x00_fw_version_str,
.intr_handler = qla2300_intr_handler,
.enable_intrs = qla2x00_enable_intrs,
.disable_intrs = qla2x00_disable_intrs,
.abort_command = qla2x00_abort_command,
.target_reset = qla2x00_abort_target,
.lun_reset = qla2x00_lun_reset,
.fabric_login = qla2x00_login_fabric,
.fabric_logout = qla2x00_fabric_logout,
.calc_req_entries = qla2x00_calc_iocbs_32,
.build_iocbs = qla2x00_build_scsi_iocbs_32,
.prep_ms_iocb = qla2x00_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
.read_nvram = qla2x00_read_nvram_data,
.write_nvram = qla2x00_write_nvram_data,
.fw_dump = qla2300_fw_dump,
.beacon_on = qla2x00_beacon_on,
.beacon_off = qla2x00_beacon_off,
.beacon_blink = qla2x00_beacon_blink,
.read_optrom = qla2x00_read_optrom_data,
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla24xx_isp_ops = {
.pci_config = qla24xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla24xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla24xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
.fw_dump = qla24xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla24xx_beacon_blink,
.read_optrom = qla24xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
.start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla25xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla24xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla24xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla25xx_read_nvram_data,
.write_nvram = qla25xx_write_nvram_data,
.fw_dump = qla25xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla24xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla81xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla81xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla82xx_isp_ops = {
.pci_config = qla82xx_pci_config,
.reset_chip = qla82xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla82xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla82xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla82xx_intr_handler,
.enable_intrs = qla82xx_enable_intrs,
.disable_intrs = qla82xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
.fw_dump = qla82xx_fw_dump,
.beacon_on = qla82xx_beacon_on,
.beacon_off = qla82xx_beacon_off,
.beacon_blink = NULL,
.read_optrom = qla82xx_read_optrom_data,
.write_optrom = qla82xx_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.start_scsi_mq = NULL,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla8044_isp_ops = {
.pci_config = qla82xx_pci_config,
.reset_chip = qla82xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla82xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla82xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla8044_intr_handler,
.enable_intrs = qla82xx_enable_intrs,
.disable_intrs = qla82xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla8044_fw_dump,
.beacon_on = qla82xx_beacon_on,
.beacon_off = qla82xx_beacon_off,
.beacon_blink = NULL,
.read_optrom = qla8044_read_optrom_data,
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.start_scsi_mq = NULL,
.abort_isp = qla8044_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla83xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla83xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qlafx00_isp_ops = {
.pci_config = qlafx00_pci_config,
.reset_chip = qlafx00_soft_reset,
.chip_diag = qlafx00_chip_diag,
.config_rings = qlafx00_config_rings,
.reset_adapter = qlafx00_soft_reset,
.nvram_config = NULL,
.update_fw_options = NULL,
.load_risc = NULL,
.pci_info_str = qlafx00_pci_info_str,
.fw_version_str = qlafx00_fw_version_str,
.intr_handler = qlafx00_intr_handler,
.enable_intrs = qlafx00_enable_intrs,
.disable_intrs = qlafx00_disable_intrs,
.abort_command = qla24xx_async_abort_command,
.target_reset = qlafx00_abort_target,
.lun_reset = qlafx00_lun_reset,
.fabric_login = NULL,
.fabric_logout = NULL,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
.fw_dump = NULL,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = NULL,
.read_optrom = qla24xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qlafx00_start_scsi,
.start_scsi_mq = NULL,
.abort_isp = qlafx00_abort_isp,
.iospace_config = qlafx00_iospace_config,
.initialize_adapter = qlafx00_initialize_adapter,
};
static struct isp_operations qla27xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.target_reset = qla24xx_abort_target,
.lun_reset = qla24xx_lun_reset,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla27xx_fwdump,
.mpi_fw_dump = qla27xx_mpi_fwdump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static inline void
qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
ha->device_type = DT_EXTENDED_IDS;
switch (ha->pdev->device) {
case PCI_DEVICE_ID_QLOGIC_ISP2100:
ha->isp_type |= DT_ISP2100;
ha->device_type &= ~DT_EXTENDED_IDS;
ha->fw_srisc_address = RISC_START_ADDRESS_2100;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2200:
ha->isp_type |= DT_ISP2200;
ha->device_type &= ~DT_EXTENDED_IDS;
ha->fw_srisc_address = RISC_START_ADDRESS_2100;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2300:
ha->isp_type |= DT_ISP2300;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2312:
ha->isp_type |= DT_ISP2312;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2322:
ha->isp_type |= DT_ISP2322;
ha->device_type |= DT_ZIO_SUPPORTED;
if (ha->pdev->subsystem_vendor == 0x1028 &&
ha->pdev->subsystem_device == 0x0170)
ha->device_type |= DT_OEM_001;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP6312:
ha->isp_type |= DT_ISP6312;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP6322:
ha->isp_type |= DT_ISP6322;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2422:
ha->isp_type |= DT_ISP2422;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2432:
ha->isp_type |= DT_ISP2432;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8432:
ha->isp_type |= DT_ISP8432;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5422:
ha->isp_type |= DT_ISP5422;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5432:
ha->isp_type |= DT_ISP5432;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2532:
ha->isp_type |= DT_ISP2532;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8001:
ha->isp_type |= DT_ISP8001;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8021:
ha->isp_type |= DT_ISP8021;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
case PCI_DEVICE_ID_QLOGIC_ISP8044:
ha->isp_type |= DT_ISP8044;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
case PCI_DEVICE_ID_QLOGIC_ISP2031:
ha->isp_type |= DT_ISP2031;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8031:
ha->isp_type |= DT_ISP8031;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISPF001:
ha->isp_type |= DT_ISPFX00;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2071:
ha->isp_type |= DT_ISP2071;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2271:
ha->isp_type |= DT_ISP2271;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2261:
ha->isp_type |= DT_ISP2261;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2081:
case PCI_DEVICE_ID_QLOGIC_ISP2089:
ha->isp_type |= DT_ISP2081;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2281:
case PCI_DEVICE_ID_QLOGIC_ISP2289:
ha->isp_type |= DT_ISP2281;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
}
if (IS_QLA82XX(ha))
ha->port_no = ha->portnum & 1;
else {
/* Get adapter physical port no from interrupt pin register. */
pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->port_no--;
else
ha->port_no = !(ha->port_no & 1);
}
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
"device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
ha->device_type, ha->port_no, ha->fw_srisc_address);
}
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
if (vha->hw->flags.running_gold_fw)
return;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(RSCN_UPDATE, &vha->dpc_flags);
set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
}
static int
qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
scsi_qla_host_t *vha = shost_priv(shost);
if (test_bit(UNLOADING, &vha->dpc_flags))
return 1;
if (!vha->host)
return 1;
if (time > vha->hw->loop_reset_delay * HZ)
return 1;
return atomic_read(&vha->loop_state) == LOOP_READY;
}
static void qla_heartbeat_work_fn(struct work_struct *work)
{
struct qla_hw_data *ha = container_of(work,
struct qla_hw_data, heartbeat_work);
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
if (!ha->flags.mbox_busy && base_vha->flags.init_done)
qla_no_op_mb(base_vha);
}
static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int i = 2;
unsigned long flags;
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return;
while (!list_empty(&vha->work_list) && i > 0) {
qla2x00_do_work(vha);
i--;
}
spin_lock_irqsave(&vha->work_lock, flags);
clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
spin_unlock_irqrestore(&vha->work_lock, flags);
}
static void
qla_trace_init(void)
{
qla_trc_array = trace_array_get_by_name("qla2xxx", NULL);
if (!qla_trc_array) {
ql_log(ql_log_fatal, NULL, 0x0001,
"Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
return;
}
QLA_TRACE_ENABLE(qla_trc_array);
}
static void
qla_trace_uninit(void)
{
if (!qla_trc_array)
return;
trace_array_put(qla_trc_array);
}
/*
* PCI driver interface
*/
static int
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret = -ENODEV;
struct Scsi_Host *host;
scsi_qla_host_t *base_vha = NULL;
struct qla_hw_data *ha;
char pci_info[30];
char fw_str[30], wq_name[30];
struct scsi_host_template *sht;
int bars, mem_only = 0;
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
int i;
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
"Mem only adapter.\n");
}
ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
"Bars=%d.\n", bars);
if (mem_only) {
if (pci_enable_device_mem(pdev))
return ret;
} else {
if (pci_enable_device(pdev))
return ret;
}
if (is_kdump_kernel()) {
ql2xmqsupport = 0;
ql2xallocfwdump = 0;
}
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
"Unable to allocate memory for ha.\n");
goto disable_device;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
INIT_LIST_HEAD(&ha->tgt.q_full_list);
spin_lock_init(&ha->tgt.q_full_lock);
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
spin_lock_init(&ha->sadb_lock);
INIT_LIST_HEAD(&ha->sadb_tx_index_list);
INIT_LIST_HEAD(&ha->sadb_rx_index_list);
spin_lock_init(&ha->sadb_fp_lock);
if (qla_edif_sadb_build_free_pool(ha)) {
kfree(ha);
goto disable_device;
}
atomic_set(&ha->nvme_active_aen_cnt, 0);
/* Clear our data area */
ha->bars = bars;
ha->mem_only = mem_only;
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->vport_slock);
mutex_init(&ha->selflogin_lock);
mutex_init(&ha->optrom_mutex);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
pdev->needs_freset = 1;
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;