blob: b9f6d83ff380c872bba3d5e40f65043d2a892d7f [file] [log] [blame]
/*
* PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 PMC-Sierra, Inc.,
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
*/
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm80xx_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
{
u32 reg_val;
unsigned long start;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
/* confirm the setting is written */
start = jiffies + HZ; /* 1 sec */
do {
reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
} while ((reg_val != shift_value) && time_before(jiffies, start));
if (reg_val != shift_value) {
pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MEMBASE_II_SHIFT_REGISTER = 0x%x\n",
reg_val);
return -1;
}
return 0;
}
static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
const void *destination,
u32 dw_count, u32 bus_base_number)
{
u32 index, value, offset;
u32 *destination1;
destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) {
offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value);
}
}
return;
}
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len, reg_val, index, *temp;
u32 status = 1;
unsigned long start;
u8 *direct_data;
char *fatal_error_data = buf;
u32 length_to_read;
u32 offset;
pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"Not supported for SPC controller");
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
/* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
pm8001_dbg(pm8001_ha, IO,
"forensic_info TYPE_NON_FATAL..............\n");
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
pm8001_ha->forensic_preserved_accumulated_transfer = 0;
/* Write signature to fatal dump table */
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
pm8001_dbg(pm8001_ha, IO, "ossaHwCB: status1 %d\n", status);
pm8001_dbg(pm8001_ha, IO, "ossaHwCB: read_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.read_len);
pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_len);
pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_offset 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_offset);
}
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->forensic_last_offset = 0;
pm8001_ha->forensic_fatal_step = 0;
pm8001_ha->fatal_bar_loc = 0;
}
/* Read until accum_len is retrieved */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
/* Determine length of data between previously stored transfer length
* and current accumulated transfer length
*/
length_to_read =
accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: accum_len 0x%x\n",
accum_len);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: length_to_read 0x%x\n",
length_to_read);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: last_offset 0x%x\n",
pm8001_ha->forensic_last_offset);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: read_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.read_len);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_len);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_offset 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_offset);
/* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
pm8001_dbg(pm8001_ha, IO,
"Possible PCI issue 0x%x not expected\n",
accum_len);
return status;
}
/* If accumulated length is zero fail the attempt */
if (accum_len == 0) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
/* Accumulated length is good so start capturing the first data */
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) {
moreData:
/* If data to read is less than SYSFS_OFFSET then reduce the
* length of dataLen
*/
if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET
> length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_len =
length_to_read -
pm8001_ha->forensic_last_offset;
} else {
pm8001_ha->forensic_info.data_buf.direct_len =
SYSFS_OFFSET;
}
if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */
pm80xx_pci_mem_copy(pm8001_ha,
pm8001_ha->fatal_bar_loc,
pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
pm8001_ha->forensic_info.data_buf.direct_len, 1);
}
pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len;
pm8001_ha->forensic_info.data_buf.direct_offset +=
pm8001_ha->forensic_info.data_buf.direct_len;
pm8001_ha->forensic_last_offset +=
pm8001_ha->forensic_info.data_buf.direct_len;
pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len;
if (pm8001_ha->forensic_last_offset >= length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3);
for (index = 0; index <
(pm8001_ha->forensic_info.data_buf.direct_len
/ 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
status = 0;
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
pm8001_dbg(pm8001_ha, IO,
"get_fatal_spcv:return1 0x%x\n", offset);
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 2);
for (index = 0; index <
(pm8001_ha->forensic_info.data_buf.direct_len
/ 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data
+= sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
status = 0;
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
pm8001_dbg(pm8001_ha, IO,
"get_fatal_spcv:return2 0x%x\n", offset);
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
}
/* Increment the MEMBASE II Shifting Register value by 0x100.*/
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2);
for (index = 0; index <
(pm8001_ha->forensic_info.data_buf.direct_len
/ 4) ; index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
status = 0;
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return3 0x%x\n",
offset);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_fatal_step == 1) {
/* store previous accumulated length before triggering next
* accumulated length update
*/
pm8001_ha->forensic_preserved_accumulated_transfer =
pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
/* continue capturing the fatal log until Dump status is 0x3 */
if (pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS) <
MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
/* reset fddstat bit by writing to zero*/
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS, 0x0);
/* set dump control value to '1' so that new data will
* be transferred to shared memory
*/
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY);
/*Poll FDDHSHK until clear */
start = jiffies + (2 * HZ); /* 2 sec */
do {
reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
} while ((reg_val) && time_before(jiffies, start));
if (reg_val != 0) {
pm8001_dbg(pm8001_ha, FAIL,
"TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
reg_val);
/* Fail the dump if a timeout occurs */
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
return((char *)
pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
}
/* Poll status register until set to 2 or
* 3 for up to 2 seconds
*/
start = jiffies + (2 * HZ); /* 2 sec */
do {
reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS);
} while (((reg_val != 2) && (reg_val != 3)) &&
time_before(jiffies, start));
if (reg_val < 2) {
pm8001_dbg(pm8001_ha, FAIL,
"TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
reg_val);
/* Fail the dump if a timeout occurs */
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
return((char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf);
}
/* reset fatal_forensic_shift_offset back to zero and reset MEMBASE 2 register to zero */
pm8001_ha->fatal_forensic_shift_offset = 0; /* location in 64k region */
pm8001_cw32(pm8001_ha, 0,
MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
}
/* Read the next block of the debug data.*/
length_to_read = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
pm8001_ha->forensic_preserved_accumulated_transfer;
if (length_to_read != 0x0) {
pm8001_ha->forensic_fatal_step = 0;
goto moreData;
} else {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 4);
pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
pm8001_ha->forensic_info.data_buf.direct_len = 0;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
}
}
offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset);
return ((char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf);
}
/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma
* location by the firmware.
*/
ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len = 0;
u32 total_len = 0;
u32 reg_val = 0;
u32 *temp = NULL;
u32 index = 0;
u32 output_length;
unsigned long start = 0;
char *buf_copy = buf;
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (++pm8001_ha->non_fatal_count == 1) {
if (pm8001_ha->chip_id == chip_8001) {
snprintf(pm8001_ha->forensic_info.data_buf.direct_data,
PAGE_SIZE, "Not supported for SPC controller");
return 0;
}
pm8001_dbg(pm8001_ha, IO, "forensic_info TYPE_NON_FATAL...\n");
/*
* Step 1: Write the host buffer parameters in the MPI Fatal and
* Non-Fatal Error Dump Capture Table.This is the buffer
* where debug data will be DMAed to.
*/
pm8001_mw32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_LO_OFFSET,
pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo);
pm8001_mw32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_HI_OFFSET,
pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi);
pm8001_mw32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET);
/* Optionally, set the DUMPCTRL bit to 1 if the host
* keeps sending active I/Os while capturing the non-fatal
* debug data. Otherwise, leave this bit set to zero
*/
pm8001_mw32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY);
/*
* Step 2: Clear Accumulative Length of Debug Data Transferred
* [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump
* Capture Table to zero.
*/
pm8001_mw32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0);
/* initiallize previous accumulated length to 0 */
pm8001_ha->forensic_preserved_accumulated_transfer = 0;
pm8001_ha->non_fatal_read_length = 0;
}
total_len = pm8001_mr32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_TOTAL_LEN);
/*
* Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT]
* field and then request that the SPCv controller transfer the debug
* data by setting bit 7 of the Inbound Doorbell Set Register.
*/
pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0);
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET,
SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP);
/*
* Step 4.1: Read back the Inbound Doorbell Set Register (by polling for
* 2 seconds) until register bit 7 is cleared.
* This step only indicates the request is accepted by the controller.
*/
start = jiffies + (2 * HZ); /* 2 sec */
do {
reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) &
SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP;
} while ((reg_val != 0) && time_before(jiffies, start));
/* Step 4.2: To check the completion of the transfer, poll the Fatal/Non
* Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in
* the MPI Fatal and Non-Fatal Error Dump Capture Table.
*/
start = jiffies + (2 * HZ); /* 2 sec */
do {
reg_val = pm8001_mr32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS);
} while ((!reg_val) && time_before(jiffies, start));
if ((reg_val == 0x00) ||
(reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) ||
(reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) {
pm8001_ha->non_fatal_read_length = 0;
buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF);
pm8001_ha->non_fatal_count = 0;
return (buf_copy - buf);
} else if (reg_val ==
MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) {
buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2);
} else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) ||
(pm8001_ha->non_fatal_read_length >= total_len)) {
pm8001_ha->non_fatal_read_length = 0;
buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4);
pm8001_ha->non_fatal_count = 0;
}
accum_len = pm8001_mr32(nonfatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
output_length = accum_len -
pm8001_ha->forensic_preserved_accumulated_transfer;
for (index = 0; index < output_length/4; index++)
buf_copy += snprintf(buf_copy, PAGE_SIZE,
"%08x ", *(temp+index));
pm8001_ha->non_fatal_read_length += output_length;
/* store current accumulated length to use in next iteration as
* the previous accumulated length
*/
pm8001_ha->forensic_preserved_accumulated_transfer = accum_len;
return (buf_copy - buf);
}
/**
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
*/
static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature =
pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
pm8001_mr32(address, MAIN_INTERFACE_REVISION);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
pm8001_mr32(address, MAIN_FW_REVISION);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io =
pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl =
pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset =
pm8001_mr32(address, MAIN_GST_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
pm8001_mr32(address, MAIN_IBQ_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
pm8001_mr32(address, MAIN_OBQ_OFFSET);
/* read Error Dump Offset and Length */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
/* read GPIO LED settings from the configuration table */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
/* read analog Setting offset from the configuration table */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
/* read port recover and reset timeout */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
/* read ILA and inactive firmware version */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version =
pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
pm8001_dbg(pm8001_ha, DEV,
"Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev);
pm8001_dbg(pm8001_ha, DEV,
"table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset);
pm8001_dbg(pm8001_ha, DEV,
"Main cfg table; ila rev:%x Inactive fw rev:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version);
}
/**
* read_general_status_table - read the general status table and save it.
* @pm8001_ha: our hba card information
*/
static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->general_stat_tbl_addr;
pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate =
pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 =
pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 =
pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt =
pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt =
pm8001_mr32(address, GST_IOPTCNT_OFFSET);
pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val =
pm8001_mr32(address, GST_GPIO_INPUT_VAL);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
pm8001_mr32(address, GST_RERRINFO_OFFSET0);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
pm8001_mr32(address, GST_RERRINFO_OFFSET1);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
pm8001_mr32(address, GST_RERRINFO_OFFSET2);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
pm8001_mr32(address, GST_RERRINFO_OFFSET3);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
pm8001_mr32(address, GST_RERRINFO_OFFSET4);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
pm8001_mr32(address, GST_RERRINFO_OFFSET5);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
pm8001_mr32(address, GST_RERRINFO_OFFSET6);
pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
pm8001_mr32(address, GST_RERRINFO_OFFSET7);
}
/**
* read_phy_attr_table - read the phy attribute table and save it.
* @pm8001_ha: our hba card information
*/
static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
pm8001_ha->phy_attr_table.phystart1_16[0] =
pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[1] =
pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[2] =
pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[3] =
pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[4] =
pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[5] =
pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[6] =
pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[7] =
pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[8] =
pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[9] =
pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[10] =
pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[11] =
pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[12] =
pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[13] =
pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[14] =
pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
pm8001_ha->phy_attr_table.phystart1_16[15] =
pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
}
/**
* read_inbnd_queue_table - read the inbound queue table and save it.
* @pm8001_ha: our hba card information
*/
static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int i;
void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
u32 offset = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(address,
(offset + IB_PIPCI_BAR)));
pm8001_ha->inbnd_q_tbl[i].pi_offset =
pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
}
}
/**
* read_outbnd_queue_table - read the outbound queue table and save it.
* @pm8001_ha: our hba card information
*/
static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int i;
void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
u32 offset = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(address,
(offset + OB_CIPCI_BAR)));
pm8001_ha->outbnd_q_tbl[i].ci_offset =
pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
}
}
/**
* init_default_table_values - init the default table.
* @pm8001_ha: our hba card information
*/
static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
{
int i;
u32 offsetib, offsetob;
void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
u32 ib_offset = pm8001_ha->ib_offset;
u32 ob_offset = pm8001_ha->ob_offset;
u32 ci_offset = pm8001_ha->ci_offset;
u32 pi_offset = pm8001_ha->pi_offset;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr =
pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr =
pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size =
PM8001_EVENT_LOG_SIZE;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr =
pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr =
pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size =
PM8001_EVENT_LOG_SIZE;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].base_virt =
(u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr;
pm8001_ha->inbnd_q_tbl[i].total_length =
pm8001_ha->memoryMap.region[ib_offset + i].total_len;
pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0);
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
(offsetib + 0x14)));
pm8001_ha->inbnd_q_tbl[i].pi_offset =
pm8001_mr32(addressib, (offsetib + 0x18));
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
pm8001_dbg(pm8001_ha, DEV,
"IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
pm8001_ha->inbnd_q_tbl[i].pi_offset);
}
for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].base_virt =
(u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr;
pm8001_ha->outbnd_q_tbl[i].total_length =
pm8001_ha->memoryMap.region[ob_offset + i].total_len;
pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo;
/* interrupt vector based on oq */
pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0);
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
offsetob + 0x14));
pm8001_ha->outbnd_q_tbl[i].ci_offset =
pm8001_mr32(addressob, (offsetob + 0x18));
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
pm8001_dbg(pm8001_ha, DEV,
"OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
pm8001_ha->outbnd_q_tbl[i].ci_offset);
}
}
/**
* update_main_config_table - update the main default table to the HBA.
* @pm8001_ha: our hba card information
*/
static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
/* Update Fatal error interrupt vector */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
((pm8001_ha->max_q_num - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
pm8001_dbg(pm8001_ha, DEV,
"Updated Fatal error interrupt vector 0x%x\n",
pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT));
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
/* SPCv specific */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
/* Set GPIOLED to 0x2 for LED indicator */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
pm8001_dbg(pm8001_ha, DEV,
"Programming DW 0x21 in main cfg table with 0x%x\n",
pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
PORT_RECOVERY_TIMEOUT;
if (pm8001_ha->chip_id == chip_8006) {
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
0x0000ffff;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
CHIP_8006_PORT_RECOVERY_TIMEOUT;
}
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
}
/**
* update_inbnd_queue_table - update the inbound queue table to the HBA.
* @pm8001_ha: our hba card information
* @number: entry in the queue
*/
static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
int number)
{
void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
u16 offset = number * 0x20;
pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
pm8001_dbg(pm8001_ha, DEV,
"IQ %d: Element pri size 0x%x\n",
number,
pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
pm8001_dbg(pm8001_ha, DEV,
"IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
pm8001_dbg(pm8001_ha, DEV,
"CI upper base addr 0x%x CI lower base addr 0x%x\n",
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
}
/**
* update_outbnd_queue_table - update the outbound queue table to the HBA.
* @pm8001_ha: our hba card information
* @number: entry in the queue
*/
static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
int number)
{
void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
u16 offset = number * 0x24;
pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
pm8001_dbg(pm8001_ha, DEV,
"OQ %d: Element pri size 0x%x\n",
number,
pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
pm8001_dbg(pm8001_ha, DEV,
"OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
pm8001_dbg(pm8001_ha, DEV,
"PI upper base addr 0x%x PI lower base addr 0x%x\n",
pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
}
/**
* mpi_init_check - check firmware initialization status.
* @pm8001_ha: our hba card information
*/
static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
u32 value;
u32 gst_len_mpistate;
/* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
table is updated */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
value &= SPCv_MSGU_CFG_TABLE_UPDATE;
} while ((value != 0) && (--max_wait_count));
if (!max_wait_count) {
/* additional check */
pm8001_dbg(pm8001_ha, FAIL,
"Inb doorbell clear not toggled[value:%x]\n",
value);
return -EBUSY;
}
/* check the MPI-State for initialization up to 100ms*/
max_wait_count = 5;/* 100 msec */
do {
msleep(FW_READY_INTERVAL);
gst_len_mpistate =
pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
GST_GSTLEN_MPIS_OFFSET);
} while ((GST_MPI_STATE_INIT !=
(gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
if (!max_wait_count)
return -EBUSY;
/* check MPI Initialization error */
gst_len_mpistate = gst_len_mpistate >> 16;
if (0x0000 != gst_len_mpistate)
return -EBUSY;
return 0;
}
/**
* check_fw_ready - The LLDD check if the FW is ready, if not, return error.
* This function sleeps hence it must not be used in atomic context.
* @pm8001_ha: our hba card information
*/
static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
{
u32 value;
u32 max_wait_count;
u32 max_wait_time;
u32 expected_mask;
int ret = 0;
/* reset / PCIe ready */
max_wait_time = max_wait_count = 5; /* 100 milli sec */
do {
msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while ((value == 0xFFFFFFFF) && (--max_wait_count));
/* check ila, RAAE and iops status */
if ((pm8001_ha->chip_id != chip_8008) &&
(pm8001_ha->chip_id != chip_8009)) {
max_wait_time = max_wait_count = 180; /* 3600 milli sec */
expected_mask = SCRATCH_PAD_ILA_READY |
SCRATCH_PAD_RAAE_READY |
SCRATCH_PAD_IOP0_READY |
SCRATCH_PAD_IOP1_READY;
} else {
max_wait_time = max_wait_count = 170; /* 3400 milli sec */
expected_mask = SCRATCH_PAD_ILA_READY |
SCRATCH_PAD_RAAE_READY |
SCRATCH_PAD_IOP0_READY;
}
do {
msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while (((value & expected_mask) !=
expected_mask) && (--max_wait_count));
if (!max_wait_count) {
pm8001_dbg(pm8001_ha, INIT,
"At least one FW component failed to load within %d millisec: Scratchpad1: 0x%x\n",
max_wait_time * FW_READY_INTERVAL, value);
ret = -1;
} else {
pm8001_dbg(pm8001_ha, MSG,
"All FW components ready by %d ms\n",
(max_wait_time - max_wait_count) * FW_READY_INTERVAL);
}
return ret;
}
static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *base_addr;
u32 value;
u32 offset;
u32 pcibar;
u32 pcilogic;
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
/*
* lower 26 bits of SCRATCHPAD0 register describes offset within the
* PCIe BAR where the MPI configuration table is present
*/
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value);
/*
* Upper 6 bits describe the offset within PCI config space where BAR
* is located.
*/
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
/*
* Make sure the offset falls inside the ioremapped PCI BAR
*/
if (offset > pm8001_ha->io_mem[pcibar].memsize) {
pm8001_dbg(pm8001_ha, FAIL,
"Main cfg tbl offset outside %u > %u\n",
offset, pm8001_ha->io_mem[pcibar].memsize);
return -EBUSY;
}
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
/*
* Validate main configuration table address: first DWord should read
* "PMCS"
*/
value = pm8001_mr32(pm8001_ha->main_cfg_tbl_addr, 0);
if (memcmp(&value, "PMCS", 4) != 0) {
pm8001_dbg(pm8001_ha, FAIL,
"BAD main config signature 0x%x\n",
value);
return -EBUSY;
}
pm8001_dbg(pm8001_ha, INIT,
"VALID main config signature 0x%x\n", value);
pm8001_ha->general_stat_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
0xFFFFFF);
pm8001_ha->inbnd_q_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
0xFFFFFF);
pm8001_ha->outbnd_q_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
0xFFFFFF);
pm8001_ha->ivt_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
0xFFFFFF);
pm8001_ha->pspa_q_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
0xFFFFFF);
pm8001_ha->fatal_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
0xFFFFFF);
pm8001_dbg(pm8001_ha, INIT, "GST OFFSET 0x%x\n",
pm8001_cr32(pm8001_ha, pcibar, offset + 0x18));
pm8001_dbg(pm8001_ha, INIT, "INBND OFFSET 0x%x\n",
pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C));
pm8001_dbg(pm8001_ha, INIT, "OBND OFFSET 0x%x\n",
pm8001_cr32(pm8001_ha, pcibar, offset + 0x20));
pm8001_dbg(pm8001_ha, INIT, "IVT OFFSET 0x%x\n",
pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C));
pm8001_dbg(pm8001_ha, INIT, "PSPA OFFSET 0x%x\n",
pm8001_cr32(pm8001_ha, pcibar, offset + 0x90));
pm8001_dbg(pm8001_ha, INIT, "addr - main cfg %p general status %p\n",
pm8001_ha->main_cfg_tbl_addr,
pm8001_ha->general_stat_tbl_addr);
pm8001_dbg(pm8001_ha, INIT, "addr - inbnd %p obnd %p\n",
pm8001_ha->inbnd_q_tbl_addr,
pm8001_ha->outbnd_q_tbl_addr);
pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n",
pm8001_ha->pspa_q_tbl_addr,
pm8001_ha->ivt_tbl_addr);
return 0;
}
/**
* pm80xx_set_thermal_config - support the thermal configuration
* @pm8001_ha: our hba card information.
*/
int
pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
u32 page_code;
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
return -1;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
if (IS_SPCV_12G(pm8001_ha->pdev))
page_code = THERMAL_PAGE_CODE_7H;
else
page_code = THERMAL_PAGE_CODE_8H;
payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
(THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
pm8001_dbg(pm8001_ha, DEV,
"Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
payload.cfg_pg[0], payload.cfg_pg[1]);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
}
/**
* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
* Timer configuration page
* @pm8001_ha: our hba card information.
*/
static int
pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
struct inbound_queue_table *circularQ;
SASProtocolTimerConfig_t SASConfigPage;
int rc;
u32 tag;
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
return -1;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
SASConfigPage.MST_MSI = 3 << 15;
SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
(SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
SAS_OPNRJT_RTRY_INTVL;
SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
| SAS_COPNRJT_RTRY_TMO;
SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
| SAS_COPNRJT_RTRY_THR;
SASConfigPage.MAX_AIP = SAS_MAX_AIP;
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
SASConfigPage.pageCode);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
SASConfigPage.MST_MSI);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
SASConfigPage.STP_SSP_MCT_TMO);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
SASConfigPage.STP_FRM_TMO);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
SASConfigPage.STP_IDLE_TMO);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
SASConfigPage.OPNRJT_RTRY_INTVL);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
SASConfigPage.MAX_AIP);
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
}
/**
* pm80xx_get_encrypt_info - Check for encryption
* @pm8001_ha: our hba card information.
*/
static int
pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
{
u32 scratch3_value;
int ret = -1;
/* Read encryption status from SCRATCH PAD 3 */
scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_READY) {
if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMF_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMA_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
pm8001_ha->encrypt_info.status = 0;
pm8001_dbg(pm8001_ha, INIT,
"Encryption: SCRATCH_PAD3_ENC_READY 0x%08X.Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
scratch3_value,
pm8001_ha->encrypt_info.cipher_mode,
pm8001_ha->encrypt_info.sec_mode,
pm8001_ha->encrypt_info.status);
ret = 0;
} else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
SCRATCH_PAD3_ENC_DISABLED) {
pm8001_dbg(pm8001_ha, INIT,
"Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
scratch3_value);
pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
pm8001_ha->encrypt_info.cipher_mode = 0;
pm8001_ha->encrypt_info.sec_mode = 0;
ret = 0;
} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_DIS_ERR) {
pm8001_ha->encrypt_info.status =
(scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMF_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMA_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
pm8001_dbg(pm8001_ha, INIT,
"Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
scratch3_value,
pm8001_ha->encrypt_info.cipher_mode,
pm8001_ha->encrypt_info.sec_mode,
pm8001_ha->encrypt_info.status);
} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_ENA_ERR) {
pm8001_ha->encrypt_info.status =
(scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMF_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMA_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
pm8001_dbg(pm8001_ha, INIT,
"Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
scratch3_value,
pm8001_ha->encrypt_info.cipher_mode,
pm8001_ha->encrypt_info.sec_mode,
pm8001_ha->encrypt_info.status);
}
return ret;
}
/**
* pm80xx_encrypt_update - update flash with encryption information
* @pm8001_ha: our hba card information.
*/
static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
{
struct kek_mgmt_req payload;
struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_KEK_MANAGEMENT;
memset(&payload, 0, sizeof(struct kek_mgmt_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
return -1;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
/* Currently only one key is used. New KEK index is 1.
* Current KEK index is 1. Store KEK to NVRAM is 1.
*/
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE);
pm8001_dbg(pm8001_ha, DEV,
"Saving Encryption info to flash. payload 0x%x\n",
payload.new_curidx_ksop);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
}
/**
* pm80xx_chip_init - the main init function that initializes whole PM8001 chip.
* @pm8001_ha: our hba card information
*/
static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
{
int ret;
u8 i = 0;
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
return -EBUSY;
}
/* Initialize the controller fatal error flag */
pm8001_ha->controller_fatal_error = false;
/* Initialize pci space address eg: mpi offset */
ret = init_pci_device_addresses(pm8001_ha);
if (ret) {
pm8001_dbg(pm8001_ha, FAIL,
"Failed to init pci addresses");
return ret;
}
init_default_table_values(pm8001_ha);
read_main_config_table(pm8001_ha);
read_general_status_table(pm8001_ha);
read_inbnd_queue_table(pm8001_ha);
read_outbnd_queue_table(pm8001_ha);
read_phy_attr_table(pm8001_ha);
/* update main config table ,inbound table and outbound table */
update_main_config_table(pm8001_ha);
for (i = 0; i < pm8001_ha->max_q_num; i++) {
update_inbnd_queue_table(pm8001_ha, i);
update_outbnd_queue_table(pm8001_ha, i);
}
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n");
} else
return -EBUSY;
/* send SAS protocol timer configuration page to FW */
ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
pm8001_dbg(pm8001_ha, INIT, "Encryption error !!\n");
if (pm8001_ha->encrypt_info.status == 0x81) {
pm8001_dbg(pm8001_ha, INIT,
"Encryption enabled with error.Saving encryption key to flash\n");
pm80xx_encrypt_update(pm8001_ha);
}
}
}
return 0;
}
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
u32 value;
u32 gst_len_mpistate;
int ret;
ret = init_pci_device_addresses(pm8001_ha);
if (ret) {
pm8001_dbg(pm8001_ha, FAIL,
"Failed to init pci addresses");
return ret;
}
/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
table is stop */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
value &= SPCv_MSGU_CFG_TABLE_RESET;
} while ((value != 0) && (--max_wait_count));
if (!max_wait_count) {
pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=%x\n", value);
return -1;
}
/* check the MPI-State for termination in progress */
/* wait until Inbound DoorBell Clear Register toggled */
max_wait_count = 100; /* 2 sec for spcv/ve */
do {
msleep(FW_READY_INTERVAL);
gst_len_mpistate =
pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
GST_GSTLEN_MPIS_OFFSET);
if (GST_MPI_STATE_UNINIT ==
(gst_len_mpistate & GST_MPI_STATE_MASK))
break;
} while (--max_wait_count);
if (!max_wait_count) {
pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n",
gst_len_mpistate & GST_MPI_STATE_MASK);
return -1;
}
return 0;
}
/**
* pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors
* @pm8001_ha: our hba card information
*
* Fatal errors are recoverable only after a host reboot.
*/
int
pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
{
int ret = 0;
u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_6);
u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_7);
u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
if (pm8001_ha->chip_id != chip_8006 &&
pm8001_ha->chip_id != chip_8074 &&
pm8001_ha->chip_id != chip_8076) {
return 0;
}
if (MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(scratch_pad1)) {
pm8001_dbg(pm8001_ha, FAIL,
"Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n",
scratch_pad1, scratch_pad2, scratch_pad3,
scratch_pad_rsvd0, scratch_pad_rsvd1);
ret = 1;
}
return ret;
}
/**
* pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all
* FW register status are reset to the originated status.
* @pm8001_ha: our hba card information
*/
static int
pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 regval;
u32 bootloader_state;
u32 ibutton0, ibutton1;
/* Process MPI table uninitialization only if FW is ready */
if (!pm8001_ha->controller_fatal_error) {
/* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) {
u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
pm8001_dbg(pm8001_ha, FAIL,
"MPI state is not ready scratch: %x:%x:%x:%x\n",
r0, r1, r2, r3);
/* if things aren't ready but the bootloader is ok then
* try the reset anyway.
*/
if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK)
return -1;
}
}
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
pm8001_dbg(pm8001_ha, INIT, "reset register before write : 0x%x\n",
regval);
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
pm8001_dbg(pm8001_ha, INIT, "reset register after write 0x%x\n",
regval);
if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
pm8001_dbg(pm8001_ha, MSG,
" soft reset successful [regval: 0x%x]\n",
regval);
} else {
pm8001_dbg(pm8001_ha, MSG,
" soft reset failed [regval: 0x%x]\n",
regval);
/* check bootloader is successfully executed or in HDA mode */
bootloader_state =
pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
SCRATCH_PAD1_BOOTSTATE_MASK;
if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
pm8001_dbg(pm8001_ha, MSG,
"Bootloader state - HDA mode SEEPROM\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
pm8001_dbg(pm8001_ha, MSG,
"Bootloader state - HDA mode Bootstrap Pin\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
pm8001_dbg(pm8001_ha, MSG,
"Bootloader state - HDA mode soft reset\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
pm8001_dbg(pm8001_ha, MSG,
"Bootloader state-HDA mode critical error\n");
}
return -EBUSY;
}
/* check the firmware status after reset */
if (-1 == check_fw_ready(pm8001_ha)) {
pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
/* check iButton feature support for motherboard controller */
if (pm8001_ha->pdev->subsystem_vendor !=
PCI_VENDOR_ID_ADAPTEC2 &&
pm8001_ha->pdev->subsystem_vendor !=
PCI_VENDOR_ID_ATTO &&
pm8001_ha->pdev->subsystem_vendor != 0) {
ibutton0 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_6);
ibutton1 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_7);
if (!ibutton0 && !ibutton1) {
pm8001_dbg(pm8001_ha, FAIL,
"iButton Feature is not Available!!!\n");
return -EBUSY;
}
if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
pm8001_dbg(pm8001_ha, FAIL,
"CRC Check for iButton Feature Failed!!!\n");
return -EBUSY;
}
}
}
pm8001_dbg(pm8001_ha, INIT, "SPCv soft reset Complete\n");
return 0;
}
static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 i;
pm8001_dbg(pm8001_ha, INIT, "chip reset start\n");
/* do SPCv chip reset. */
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n");
/* Check this ..whether delay is required or no */
/* delay 10 usec */
udelay(10);
/* wait for 20 msec until the firmware gets reloaded */
i = 20;
do {
mdelay(1);
} while ((--i) != 0);
pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
}
/**
* pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
{
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
}
/**
* pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
{
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
}
/**
* pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
* @vec: interrupt number to enable
*/
static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
u32 mask;
mask = (u32)(1 << vec);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
}
/**
* pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
* @vec: interrupt number to disable
*/
static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
u32 mask;
if (vec == 0xFF)
mask = 0xFFFFFFFF;
else
mask = (u32)(1 << vec);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
}
static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
int res;
u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct task_abort_req task_abort;
struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
if (!pm8001_ha_dev) {
pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
return;
}
task = sas_alloc_slow_task(GFP_ATOMIC);
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
task->task_done = pm8001_task_done;
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res) {
sas_free_task(task);
return;
}
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
sizeof(task_abort), 0);
pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n");
if (ret) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
}
}
static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
struct sata_start_req sata_cmd;
int res;
u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res) {
sas_free_task(task);
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
return;
}
/* allocate domain device by ourselves as libsas
* is not going to provide any
*/
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
pm8001_dbg(pm8001_ha, FAIL,
"Domain device cannot be allocated\n");
return;
}
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
ccb->n_elem = 0;
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
memset(&sata_cmd, 0, sizeof(sata_cmd));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.fis_type = 0x27;
fis.flags = 0x80;
fis.command = ATA_CMD_READ_LOG_EXT;
fis.lbal = 0x10;
fis.sector_count = 0x1;
sata_cmd.tag = cpu_to_le32(ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
sizeof(sata_cmd), 0);
pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n");
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
kfree(dev);
}
}
/**
* mpi_ssp_completion - process the event that FW response to the SSP request.
* @pm8001_ha: our hba card information
* @piomb: the message contents of this outbound message.
*
* When FW has completed a ssp request for example a IO request, after it has
* filled the SG data with the data, it will trigger this event representing
* that he has finished the job; please check the corresponding buffer.
* So we will tell the caller who maybe waiting the result to tell upper layer
* that the task has been finished.
*/
static void
mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
unsigned long flags;
u32 status;
u32 param;
u32 tag;
struct ssp_completion_resp *psspPayload;
struct task_status_struct *ts;
struct ssp_response_iu *iu;
struct pm8001_device *pm8001_dev;
psspPayload = (struct ssp_completion_resp *)(piomb + 4);
status = le32_to_cpu(psspPayload->status);
tag = le32_to_cpu(psspPayload->tag);
ccb = &pm8001_ha->ccb_info[tag];
if ((status == IO_ABORTED) && ccb->open_retry) {
/* Being completed by another */
ccb->open_retry = 0;
return;
}
pm8001_dev = ccb->device;
param = le32_to_cpu(psspPayload->param);
t = ccb->task;
if (status && status != IO_UNDERFLOW)
pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
pm8001_dbg(pm8001_ha, DEV,
"tag::0x%x, status::0x%x task::0x%p\n", tag, status, t);
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n",
SAS_ADDR(t->dev->sas_addr));
switch (status) {
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS ,param = 0x%x\n",
param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
ts->residual = param;
iu = &psspPayload->ssp_resp_iu;
sas_ssp_task_response(pm8001_ha->dev, t, iu);
}
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_UNDERFLOW:
/* SSP Completion with error */
pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW ,param = 0x%x\n",
param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
/* Force the midlayer to retry */
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
pm8001_dbg(pm8001_ha, IO,
"IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
if (!t->uldd_task)
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (!t->uldd_task)
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_DS_NON_OPERATIONAL);
break;
case IO_DS_IN_RECOVERY:
pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_TM_TAG_NOT_FOUND:
pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
default:
pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
}
pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ",
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
if (t->slow_task)
complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
}
/*See the comments for mpi_ssp_completion */
static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
unsigned long flags;
struct task_status_struct *ts;
struct pm8001_ccb_info *ccb;
struct pm8001_device *pm8001_dev;
struct ssp_event_resp *psspPayload =
(struct ssp_event_resp *)(piomb + 4);
u32 event = le32_to_cpu(psspPayload->event);
u32 tag = le32_to_cpu(psspPayload->tag);
u32 port_id = le32_to_cpu(psspPayload->port_id);
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event);
switch (event) {
case IO_OVERFLOW:
pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
return;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
if (!t->uldd_task)
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
pm8001_dbg(pm8001_ha, IO,
"IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
pm8001_dbg(pm8001_ha, IO,
"IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
pm8001_dbg(pm8001_ha, IO,
"IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
pm8001_dbg(pm8001_ha, IOERR,
"IO_XFR_ERROR_INTERNAL_CRC_ERROR\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_CMD_FRAME_ISSUED:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
return;
default:
pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
}
/*See the comments for mpi_ssp_completion */
static void
mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
u32 param;
u32 status;
u32 tag;
int i, j;
u8 sata_addr_low[4];
u32 temp_sata_addr_low, temp_sata_addr_hi;
u8 sata_addr_hi[4];
struct sata_completion_resp *psataPayload;
struct task_status_struct *ts;
struct ata_task_resp *resp ;
u32 *sata_resp;
struct pm8001_device *pm8001_dev;
unsigned long flags;
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
ccb = &pm8001_ha->ccb_info[tag];
param = le32_to_cpu(psataPayload->param);
if (ccb) {
t = ccb->task;
pm8001_dev = ccb->device;
} else {
pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
return;
}
if (t) {
if (t->dev && (t->dev->lldd_dev))
pm8001_dev = t->dev->lldd_dev;
} else {
pm8001_dbg(pm8001_ha, FAIL, "task null\n");
return;
}
if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
&& unlikely(!t || !t->lldd_task || !t->dev)) {
pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
if (!ts) {
pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
return;
}
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
"IO failed device_id %u status 0x%x tag %d\n",
pm8001_dev->device_id, status, tag);
}
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
memcpy(&temp_sata_addr_hi, sata_addr_hi,
sizeof(sata_addr_hi));
temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
|((temp_sata_addr_hi << 8) &
0xff0000) |
((temp_sata_addr_hi >> 8)
& 0xff00) |
((temp_sata_addr_hi << 24) &
0xff000000));
temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
& 0xff) |
((temp_sata_addr_low << 8)
& 0xff0000) |
((temp_sata_addr_low >> 8)
& 0xff00) |
((temp_sata_addr_low << 24)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
pm8001_dbg(pm8001_ha, FAIL,
"SAS Address of IO Failure Drive:%08x%08x\n",
temp_sata_addr_hi,
temp_sata_addr_low);
} else {
pm8001_dbg(pm8001_ha, FAIL,
"SAS Address of IO Failure Drive:%016llx\n",
SAS_ADDR(t->dev->sas_addr));
}
}
switch (status) {
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
/* set new bit for abort_all */
pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
/* clear bit for read log */
pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
sas_free_task(t);
return;
}
} else {
u8 len;
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
ts->residual = param;
pm8001_dbg(pm8001_ha, IO,
"SAS_PROTO_RESPONSE len = %d\n",
param);
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
} else if (t->ata_task.use_ncq) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
} else {
len = sizeof(struct dev_to_host_fis);
pm8001_dbg(pm8001_ha, IO, "other len = %d\n",
len);
}
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = len;
memcpy(&resp->ending_fis[0], sata_resp, len);
ts->buf_valid_size = sizeof(*resp);
} else
pm8001_dbg(pm8001_ha, IO,
"response too large\n");
}
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
/* following cases are to do cases */
case IO_UNDERFLOW:
/* SATA Completion with error */
pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
if (!t->uldd_task) {
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
pm8001_handle_event(pm8001_ha, pm8001_dev,
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
}
break;
case IO_DS_IN_RECOVERY:
pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_IN_ERROR:
pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
pm8001_handle_event(pm8001_ha, pm8001_dev,
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
default:
pm8001_dbg(pm8001_ha, DEVIO,
"Unknown status device_id %u status 0x%x tag %d\n",
pm8001_dev->device_id, status, tag);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
if (t->slow_task)
complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
}
}
/*See the comments for mpi_ssp_completion */
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
struct pm8001_ccb_info *ccb;
struct pm8001_device *pm8001_dev;
struct sata_event_resp *psataPayload =
(struct sata_event_resp *)(piomb + 4);
u32 event = le32_to_cpu(psataPayload->event);
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
unsigned long flags;
ccb = &pm8001_ha->ccb_info[tag];
if (ccb) {
t = ccb->task;
pm8001_dev = ccb->device;
} else {
pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
return;
}
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
/* find device using device id */
pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
/* send read log extension */
if (pm8001_dev)
pm80xx_send_read_log(pm8001_ha, pm8001_dev);
return;
}
if (unlikely(!t || !t->lldd_task || !t->dev)) {
pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event);
switch (event) {
case IO_OVERFLOW:
pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
pm8001_dbg(pm8001_ha, IO,
"IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
break;