Commit 26780d9e authored by Bradley Grove's avatar Bradley Grove Committed by James Bottomley

[SCSI] esas2r: ATTO Technology ExpressSAS 6G SAS/SATA RAID Adapter Driver

This is a new driver for ATTO Technology's ExpressSAS series of hardware RAID
adapters.  It supports the following adapters:

    - ExpressSAS R60F
    - ExpressSAS R680
    - ExpressSAS R608
    - ExpressSAS R644
Signed-off-by: default avatarBradley Grove <bgrove@attotech.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 127be355
...@@ -1547,6 +1547,13 @@ W: http://atmelwlandriver.sourceforge.net/ ...@@ -1547,6 +1547,13 @@ W: http://atmelwlandriver.sourceforge.net/
S: Maintained S: Maintained
F: drivers/net/wireless/atmel* F: drivers/net/wireless/atmel*
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M: Bradley Grove <linuxdrivers@attotech.com>
L: linux-scsi@vger.kernel.org
W: http://www.attotech.com
S: Supported
F: drivers/scsi/esas2r
AUDIT SUBSYSTEM AUDIT SUBSYSTEM
M: Al Viro <viro@zeniv.linux.org.uk> M: Al Viro <viro@zeniv.linux.org.uk>
M: Eric Paris <eparis@redhat.com> M: Eric Paris <eparis@redhat.com>
......
...@@ -601,6 +601,7 @@ config SCSI_ARCMSR ...@@ -601,6 +601,7 @@ config SCSI_ARCMSR
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called arcmsr (modprobe arcmsr). module will be called arcmsr (modprobe arcmsr).
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig" source "drivers/scsi/mpt2sas/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig" source "drivers/scsi/mpt3sas/Kconfig"
......
...@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ ...@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
......
config SCSI_ESAS2R
tristate "ATTO Technology's ExpressSAS RAID adapter driver"
depends on PCI && SCSI
---help---
This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o
esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \
esas2r_vda.o esas2r_main.o
/* linux/drivers/scsi/esas2r/atioctl.h
* ATTO IOCTL Handling
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include "atvda.h"
#ifndef ATIOCTL_H
#define ATIOCTL_H
#define EXPRESS_IOCTL_SIGNATURE "Express"
#define EXPRESS_IOCTL_SIGNATURE_SIZE 8
/* structure definitions for IOCTls */
struct __packed atto_express_ioctl_header {
u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
u8 return_code;
#define IOCTL_SUCCESS 0
#define IOCTL_ERR_INVCMD 101
#define IOCTL_INIT_FAILED 102
#define IOCTL_NOT_IMPLEMENTED 103
#define IOCTL_BAD_CHANNEL 104
#define IOCTL_TARGET_OVERRUN 105
#define IOCTL_TARGET_NOT_ENABLED 106
#define IOCTL_BAD_FLASH_IMGTYPE 107
#define IOCTL_OUT_OF_RESOURCES 108
#define IOCTL_GENERAL_ERROR 109
#define IOCTL_INVALID_PARAM 110
u8 channel;
u8 retries;
u8 pad[5];
};
/*
* NOTE - if channel == 0xFF, the request is
* handled on the adapter it came in on.
*/
#define MAX_NODE_NAMES 256
struct __packed atto_firmware_rw_request {
u8 function;
#define FUNC_FW_DOWNLOAD 0x09
#define FUNC_FW_UPLOAD 0x12
u8 img_type;
#define FW_IMG_FW 0x01
#define FW_IMG_BIOS 0x02
#define FW_IMG_NVR 0x03
#define FW_IMG_RAW 0x04
#define FW_IMG_FM_API 0x05
#define FW_IMG_FS_API 0x06
u8 pad[2];
u32 img_offset;
u32 img_size;
u8 image[0x80000];
};
struct __packed atto_param_rw_request {
u16 code;
char data_buffer[512];
};
#define MAX_CHANNEL 256
struct __packed atto_channel_list {
u32 num_channels;
u8 channel[MAX_CHANNEL];
};
struct __packed atto_channel_info {
u8 major_rev;
u8 minor_rev;
u8 IRQ;
u8 revision_id;
u8 pci_bus;
u8 pci_dev_func;
u8 core_rev;
u8 host_no;
u16 device_id;
u16 vendor_id;
u16 ven_dev_id;
u8 pad[3];
u32 hbaapi_rev;
};
/*
* CSMI control codes
* class independent
*/
#define CSMI_CC_GET_DRVR_INFO 1
#define CSMI_CC_GET_CNTLR_CFG 2
#define CSMI_CC_GET_CNTLR_STS 3
#define CSMI_CC_FW_DOWNLOAD 4
/* RAID class */
#define CSMI_CC_GET_RAID_INFO 10
#define CSMI_CC_GET_RAID_CFG 11
/* HBA class */
#define CSMI_CC_GET_PHY_INFO 20
#define CSMI_CC_SET_PHY_INFO 21
#define CSMI_CC_GET_LINK_ERRORS 22
#define CSMI_CC_SMP_PASSTHRU 23
#define CSMI_CC_SSP_PASSTHRU 24
#define CSMI_CC_STP_PASSTHRU 25
#define CSMI_CC_GET_SATA_SIG 26
#define CSMI_CC_GET_SCSI_ADDR 27
#define CSMI_CC_GET_DEV_ADDR 28
#define CSMI_CC_TASK_MGT 29
#define CSMI_CC_GET_CONN_INFO 30
/* PHY class */
#define CSMI_CC_PHY_CTRL 60
/*
* CSMI status codes
* class independent
*/
#define CSMI_STS_SUCCESS 0
#define CSMI_STS_FAILED 1
#define CSMI_STS_BAD_CTRL_CODE 2
#define CSMI_STS_INV_PARAM 3
#define CSMI_STS_WRITE_ATTEMPTED 4
/* RAID class */
#define CSMI_STS_INV_RAID_SET 1000
/* HBA class */
#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS
#define CSMI_STS_PHY_UNCHANGEABLE 2000
#define CSMI_STS_INV_LINK_RATE 2001
#define CSMI_STS_INV_PHY 2002
#define CSMI_STS_INV_PHY_FOR_PORT 2003
#define CSMI_STS_PHY_UNSELECTABLE 2004
#define CSMI_STS_SELECT_PHY_OR_PORT 2005
#define CSMI_STS_INV_PORT 2006
#define CSMI_STS_PORT_UNSELECTABLE 2007
#define CSMI_STS_CONNECTION_FAILED 2008
#define CSMI_STS_NO_SATA_DEV 2009
#define CSMI_STS_NO_SATA_SIGNATURE 2010
#define CSMI_STS_SCSI_EMULATION 2011
#define CSMI_STS_NOT_AN_END_DEV 2012
#define CSMI_STS_NO_SCSI_ADDR 2013
#define CSMI_STS_NO_DEV_ADDR 2014
/* CSMI class independent structures */
struct atto_csmi_get_driver_info {
char name[81];
char description[81];
u16 major_rev;
u16 minor_rev;
u16 build_rev;
u16 release_rev;
u16 csmi_major_rev;
u16 csmi_minor_rev;
#define CSMI_MAJOR_REV_0_81 0
#define CSMI_MINOR_REV_0_81 81
#define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81
#define CSMI_MINOR_REV CSMI_MINOR_REV_0_81
};
struct atto_csmi_get_pci_bus_addr {
u8 bus_num;
u8 device_num;
u8 function_num;
u8 reserved;
};
struct atto_csmi_get_cntlr_cfg {
u32 base_io_addr;
struct {
u32 base_memaddr_lo;
u32 base_memaddr_hi;
};
u32 board_id;
u16 slot_num;
#define CSMI_SLOT_NUM_UNKNOWN 0xFFFF
u8 cntlr_class;
#define CSMI_CNTLR_CLASS_HBA 5
u8 io_bus_type;
#define CSMI_BUS_TYPE_PCI 3
#define CSMI_BUS_TYPE_PCMCIA 4
union {
struct atto_csmi_get_pci_bus_addr pci_addr;
u8 reserved[32];
};
char serial_num[81];
u16 major_rev;
u16 minor_rev;
u16 build_rev;
u16 release_rev;
u16 bios_major_rev;
u16 bios_minor_rev;
u16 bios_build_rev;
u16 bios_release_rev;
u32 cntlr_flags;
#define CSMI_CNTLRF_SAS_HBA 0x00000001
#define CSMI_CNTLRF_SAS_RAID 0x00000002
#define CSMI_CNTLRF_SATA_HBA 0x00000004
#define CSMI_CNTLRF_SATA_RAID 0x00000008
#define CSMI_CNTLRF_FWD_SUPPORT 0x00010000
#define CSMI_CNTLRF_FWD_ONLINE 0x00020000
#define CSMI_CNTLRF_FWD_SRESET 0x00040000
#define CSMI_CNTLRF_FWD_HRESET 0x00080000
#define CSMI_CNTLRF_FWD_RROM 0x00100000
u16 rrom_major_rev;
u16 rrom_minor_rev;
u16 rrom_build_rev;
u16 rrom_release_rev;
u16 rrom_biosmajor_rev;
u16 rrom_biosminor_rev;
u16 rrom_biosbuild_rev;
u16 rrom_biosrelease_rev;
u8 reserved2[7];
};
struct atto_csmi_get_cntlr_sts {
u32 status;
#define CSMI_CNTLR_STS_GOOD 1
#define CSMI_CNTLR_STS_FAILED 2
#define CSMI_CNTLR_STS_OFFLINE 3
#define CSMI_CNTLR_STS_POWEROFF 4
u32 offline_reason;
#define CSMI_OFFLINE_NO_REASON 0
#define CSMI_OFFLINE_INITIALIZING 1
#define CSMI_OFFLINE_BUS_DEGRADED 2
#define CSMI_OFFLINE_BUS_FAILURE 3
u8 reserved[28];
};
struct atto_csmi_fw_download {
u32 buffer_len;
u32 download_flags;
#define CSMI_FWDF_VALIDATE 0x00000001
#define CSMI_FWDF_SOFT_RESET 0x00000002
#define CSMI_FWDF_HARD_RESET 0x00000004
u8 reserved[32];
u16 status;
#define CSMI_FWD_STS_SUCCESS 0
#define CSMI_FWD_STS_FAILED 1
#define CSMI_FWD_STS_USING_RROM 2
#define CSMI_FWD_STS_REJECT 3
#define CSMI_FWD_STS_DOWNREV 4
u16 severity;
#define CSMI_FWD_SEV_INFO 0
#define CSMI_FWD_SEV_WARNING 1
#define CSMI_FWD_SEV_ERROR 2
#define CSMI_FWD_SEV_FATAL 3
};
/* CSMI RAID class structures */
struct atto_csmi_get_raid_info {
u32 num_raid_sets;
u32 max_drivesper_set;
u8 reserved[92];
};
struct atto_csmi_raid_drives {
char model[40];
char firmware[8];
char serial_num[40];
u8 sas_addr[8];
u8 lun[8];
u8 drive_sts;
#define CSMI_DRV_STS_OK 0
#define CSMI_DRV_STS_REBUILDING 1
#define CSMI_DRV_STS_FAILED 2
#define CSMI_DRV_STS_DEGRADED 3
u8 drive_usage;
#define CSMI_DRV_USE_NOT_USED 0
#define CSMI_DRV_USE_MEMBER 1
#define CSMI_DRV_USE_SPARE 2
u8 reserved[30]; /* spec says 22 */
};
struct atto_csmi_get_raid_cfg {
u32 raid_set_index;
u32 capacity;
u32 stripe_size;
u8 raid_type;
u8 status;
u8 information;
u8 drive_cnt;
u8 reserved[20];
struct atto_csmi_raid_drives drives[1];
};
/* CSMI HBA class structures */
struct atto_csmi_phy_entity {
u8 ident_frame[0x1C];
u8 port_id;
u8 neg_link_rate;
u8 min_link_rate;
u8 max_link_rate;
u8 phy_change_cnt;
u8 auto_discover;
#define CSMI_DISC_NOT_SUPPORTED 0x00
#define CSMI_DISC_NOT_STARTED 0x01
#define CSMI_DISC_IN_PROGRESS 0x02
#define CSMI_DISC_COMPLETE 0x03
#define CSMI_DISC_ERROR 0x04
u8 reserved[2];
u8 attach_ident_frame[0x1C];
};
struct atto_csmi_get_phy_info {
u8 number_of_phys;
u8 reserved[3];
struct atto_csmi_phy_entity
phy[32];
};
struct atto_csmi_set_phy_info {
u8 phy_id;
u8 neg_link_rate;
#define CSMI_NEG_RATE_NEGOTIATE 0x00
#define CSMI_NEG_RATE_PHY_DIS 0x01
u8 prog_minlink_rate;
u8 prog_maxlink_rate;
u8 signal_class;
#define CSMI_SIG_CLASS_UNKNOWN 0x00
#define CSMI_SIG_CLASS_DIRECT 0x01
#define CSMI_SIG_CLASS_SERVER 0x02
#define CSMI_SIG_CLASS_ENCLOSURE 0x03
u8 reserved[3];
};
struct atto_csmi_get_link_errors {
u8 phy_id;
u8 reset_cnts;
#define CSMI_RESET_CNTS_NO 0x00
#define CSMI_RESET_CNTS_YES 0x01
u8 reserved[2];
u32 inv_dw_cnt;
u32 disp_err_cnt;
u32 loss_ofdw_sync_cnt;
u32 phy_reseterr_cnt;
/*
* The following field has been added by ATTO for ease of
* implementation of additional statistics. Drivers must validate
* the length of the IOCTL payload prior to filling them in so CSMI
* complaint applications function correctly.
*/
u32 crc_err_cnt;
};
struct atto_csmi_smp_passthru {
u8 phy_id;
u8 port_id;
u8 conn_rate;
u8 reserved;
u8 dest_sas_addr[8];
u32 req_len;
u8 smp_req[1020];
u8 conn_sts;
u8 reserved2[3];
u32 rsp_len;
u8 smp_rsp[1020];
};
struct atto_csmi_ssp_passthru_sts {
u8 conn_sts;
u8 reserved[3];
u8 data_present;
u8 status;
u16 rsp_length;
u8 rsp[256];
u32 data_bytes;
};
struct atto_csmi_ssp_passthru {
u8 phy_id;
u8 port_id;
u8 conn_rate;
u8 reserved;
u8 dest_sas_addr[8];
u8 lun[8];
u8 cdb_len;
u8 add_cdb_len;
u8 reserved2[2];
u8 cdb[16];
u32 flags;
#define CSMI_SSPF_DD_READ 0x00000001
#define CSMI_SSPF_DD_WRITE 0x00000002
#define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
#define CSMI_SSPF_TA_SIMPLE 0x00000000
#define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010
#define CSMI_SSPF_TA_ORDERED 0x00000020
#define CSMI_SSPF_TA_ACA 0x00000040
u8 add_cdb[24];
u32 data_len;
struct atto_csmi_ssp_passthru_sts sts;
};
struct atto_csmi_stp_passthru_sts {
u8 conn_sts;
u8 reserved[3];
u8 sts_fis[20];
u32 scr[16];
u32 data_bytes;
};
struct atto_csmi_stp_passthru {
u8 phy_id;
u8 port_id;
u8 conn_rate;
u8 reserved;
u8 dest_sas_addr[8];
u8 reserved2[4];
u8 command_fis[20];
u32 flags;
#define CSMI_STPF_DD_READ 0x00000001
#define CSMI_STPF_DD_WRITE 0x00000002
#define CSMI_STPF_DD_UNSPECIFIED 0x00000004
#define CSMI_STPF_PIO 0x00000010
#define CSMI_STPF_DMA 0x00000020
#define CSMI_STPF_PACKET 0x00000040
#define CSMI_STPF_DMA_QUEUED 0x00000080
#define CSMI_STPF_EXECUTE_DIAG 0x00000100
#define CSMI_STPF_RESET_DEVICE 0x00000200
u32 data_len;
struct atto_csmi_stp_passthru_sts sts;
};
struct atto_csmi_get_sata_sig {
u8 phy_id;
u8 reserved[3];
u8 reg_dth_fis[20];
};
struct atto_csmi_get_scsi_addr {
u8 sas_addr[8];
u8 sas_lun[8];
u8 host_index;
u8 path_id;
u8 target_id;
u8 lun;
};
struct atto_csmi_get_dev_addr {
u8 host_index;
u8 path_id;
u8 target_id;
u8 lun;
u8 sas_addr[8];
u8 sas_lun[8];
};
struct atto_csmi_task_mgmt {
u8 host_index;
u8 path_id;
u8 target_id;
u8 lun;
u32 flags;
#define CSMI_TMF_TASK_IU 0x00000001
#define CSMI_TMF_HARD_RST 0x00000002
#define CSMI_TMF_SUPPRESS_RSLT 0x00000004
u32 queue_tag;
u32 reserved;
u8 task_mgt_func;
u8 reserved2[7];
u32 information;
#define CSMI_TM_INFO_TEST 1
#define CSMI_TM_INFO_EXCEEDED 2
#define CSMI_TM_INFO_DEMAND 3
#define CSMI_TM_INFO_TRIGGER 4
struct atto_csmi_ssp_passthru_sts sts;
};
struct atto_csmi_get_conn_info {
u32 pinout;
#define CSMI_CON_UNKNOWN 0x00000001
#define CSMI_CON_SFF_8482 0x00000002
#define CSMI_CON_SFF_8470_LANE_1 0x00000100
#define CSMI_CON_SFF_8470_LANE_2 0x00000200
#define CSMI_CON_SFF_8470_LANE_3 0x00000400
#define CSMI_CON_SFF_8470_LANE_4 0x00000800
#define CSMI_CON_SFF_8484_LANE_1 0x00010000
#define CSMI_CON_SFF_8484_LANE_2 0x00020000
#define CSMI_CON_SFF_8484_LANE_3 0x00040000
#define CSMI_CON_SFF_8484_LANE_4 0x00080000
u8 connector[16];
u8 location;
#define CSMI_CON_INTERNAL 0x02
#define CSMI_CON_EXTERNAL 0x04
#define CSMI_CON_SWITCHABLE 0x08
#define CSMI_CON_AUTO 0x10
u8 reserved[15];
};
/* CSMI PHY class structures */
struct atto_csmi_character {
u8 type_flags;
#define CSMI_CTF_POS_DISP 0x01
#define CSMI_CTF_NEG_DISP 0x02
#define CSMI_CTF_CTRL_CHAR 0x04
u8 value;
};
struct atto_csmi_pc_ctrl {
u8 type;
#define CSMI_PC_TYPE_UNDEFINED 0x00
#define CSMI_PC_TYPE_SATA 0x01
#define CSMI_PC_TYPE_SAS 0x02
u8 rate;
u8 reserved[6];
u32 vendor_unique[8];
u32 tx_flags;
#define CSMI_PC_TXF_PREEMP_DIS 0x00000001
signed char tx_amplitude;
signed char tx_preemphasis;
signed char tx_slew_rate;
signed char tx_reserved[13];
u8 tx_vendor_unique[64];
u32 rx_flags;
#define CSMI_PC_RXF_EQ_DIS 0x00000001
signed char rx_threshold;
signed char rx_equalization_gain;
signed char rx_reserved[14];
u8 rx_vendor_unique[64];
u32 pattern_flags;
#define CSMI_PC_PATF_FIXED 0x00000001
#define CSMI_PC_PATF_DIS_SCR 0x00000002
#define CSMI_PC_PATF_DIS_ALIGN 0x00000004
#define CSMI_PC_PATF_DIS_SSC 0x00000008
u8 fixed_pattern;
#define CSMI_PC_FP_CJPAT 0x00000001
#define CSMI_PC_FP_ALIGN 0x00000002
u8 user_pattern_len;
u8 pattern_reserved[6];
struct atto_csmi_character user_pattern_buffer[16];
};
struct atto_csmi_phy_ctrl {
u32 function;
#define CSMI_PC_FUNC_GET_SETUP 0x00000100
u8 phy_id;
u16 len_of_cntl;
u8 num_of_cntls;
u8 reserved[4];
u32 link_flags;
#define CSMI_PHY_ACTIVATE_CTRL 0x00000001
#define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
#define CSMI_PHY_AUTO_COMWAKE 0x00000004
u8 spinup_rate;
u8 link_reserved[7];
u32 vendor_unique[8];
struct atto_csmi_pc_ctrl control[1];
};
union atto_ioctl_csmi {
struct atto_csmi_get_driver_info drvr_info;
struct atto_csmi_get_cntlr_cfg cntlr_cfg;
struct atto_csmi_get_cntlr_sts cntlr_sts;
struct atto_csmi_fw_download fw_dwnld;
struct atto_csmi_get_raid_info raid_info;
struct atto_csmi_get_raid_cfg raid_cfg;
struct atto_csmi_get_phy_info get_phy_info;
struct atto_csmi_set_phy_info set_phy_info;
struct atto_csmi_get_link_errors link_errs;
struct atto_csmi_smp_passthru smp_pass_thru;
struct atto_csmi_ssp_passthru ssp_pass_thru;
struct atto_csmi_stp_passthru stp_pass_thru;
struct atto_csmi_task_mgmt tsk_mgt;
struct atto_csmi_get_sata_sig sata_sig;
struct atto_csmi_get_scsi_addr scsi_addr;
struct atto_csmi_get_dev_addr dev_addr;
struct atto_csmi_get_conn_info conn_info[32];
struct atto_csmi_phy_ctrl phy_ctrl;
};
struct atto_csmi {
u32 control_code;
u32 status;
union atto_ioctl_csmi data;
};
struct atto_module_info {
void *adapter;
void *pci_dev;
void *scsi_host;
unsigned short host_no;
union {
struct {
u64 node_name;
u64 port_name;
};
u64 sas_addr;
};
};
#define ATTO_FUNC_GET_ADAP_INFO 0x00
#define ATTO_VER_GET_ADAP_INFO0 0
#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0
struct __packed atto_hba_get_adapter_info {
struct {
u16 vendor_id;
u16 device_id;
u16 ss_vendor_id;
u16 ss_device_id;
u8 class_code[3];
u8 rev_id;
u8 bus_num;
u8 dev_num;
u8 func_num;
u8 link_width_max;
u8 link_width_curr;
#define ATTO_GAI_PCILW_UNKNOWN 0x00
u8 link_speed_max;
u8 link_speed_curr;
#define ATTO_GAI_PCILS_UNKNOWN 0x00
#define ATTO_GAI_PCILS_GEN1 0x01
#define ATTO_GAI_PCILS_GEN2 0x02
#define ATTO_GAI_PCILS_GEN3 0x03
u8 interrupt_mode;
#define ATTO_GAI_PCIIM_UNKNOWN 0x00
#define ATTO_GAI_PCIIM_LEGACY 0x01
#define ATTO_GAI_PCIIM_MSI 0x02
#define ATTO_GAI_PCIIM_MSIX 0x03
u8 msi_vector_cnt;
u8 reserved[19];
} pci;
u8 adap_type;
#define ATTO_GAI_AT_EPCIU320 0x00
#define ATTO_GAI_AT_ESASRAID 0x01
#define ATTO_GAI_AT_ESASRAID2 0x02
#define ATTO_GAI_AT_ESASHBA 0x03
#define ATTO_GAI_AT_ESASHBA2 0x04
#define ATTO_GAI_AT_CELERITY 0x05
#define ATTO_GAI_AT_CELERITY8 0x06
#define ATTO_GAI_AT_FASTFRAME 0x07
#define ATTO_GAI_AT_ESASHBA3 0x08
#define ATTO_GAI_AT_CELERITY16 0x09
#define ATTO_GAI_AT_TLSASHBA 0x0A
#define ATTO_GAI_AT_ESASHBA4 0x0B
u8 adap_flags;
#define ATTO_GAI_AF_DEGRADED 0x01
#define ATTO_GAI_AF_SPT_SUPP 0x02
#define ATTO_GAI_AF_DEVADDR_SUPP 0x04
#define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
#define ATTO_GAI_AF_TEST_SUPP 0x10
#define ATTO_GAI_AF_DIAG_SUPP 0x20
#define ATTO_GAI_AF_VIRT_SES 0x40
#define ATTO_GAI_AF_CONN_CTRL 0x80
u8 num_ports;
u8 num_phys;
u8 drvr_rev_major;
u8 drvr_rev_minor;
u8 drvr_revsub_minor;
u8 drvr_rev_build;
char drvr_rev_ascii[16];
char drvr_name[32];
char firmware_rev[16];
char flash_rev[16];
char model_name_short[16];
char model_name[32];
u32 num_targets;
u32 num_targsper_bus;
u32 num_lunsper_targ;
u8 num_busses;
u8 num_connectors;
u8 adap_flags2;
#define ATTO_GAI_AF2_FCOE_SUPP 0x01
#define ATTO_GAI_AF2_NIC_SUPP 0x02
#define ATTO_GAI_AF2_LOCATE_SUPP 0x04
#define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08
#define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10
#define ATTO_GAI_AF2_NPIV_SUPP 0x20
#define ATTO_GAI_AF2_MP_SUPP 0x40
u8 num_temp_sensors;
u32 num_targets_backend;
u32 tunnel_flags;
#define ATTO_GAI_TF_MEM_RW 0x00000001
#define ATTO_GAI_TF_TRACE 0x00000002
#define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004
#define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008
#define ATTO_GAI_TF_PHY_CTRL 0x00000010
#define ATTO_GAI_TF_CONN_CTRL 0x00000020
#define ATTO_GAI_TF_GET_DEV_INFO 0x00000040
u8 reserved3[0x138];
};
#define ATTO_FUNC_GET_ADAP_ADDR 0x01
#define ATTO_VER_GET_ADAP_ADDR0 0
#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0
struct __packed atto_hba_get_adapter_address {
u8 addr_type;
#define ATTO_GAA_AT_PORT 0x00
#define ATTO_GAA_AT_NODE 0x01
#define ATTO_GAA_AT_CURR_MAC 0x02
#define ATTO_GAA_AT_PERM_MAC 0x03
#define ATTO_GAA_AT_VNIC 0x04
u8 port_id;
u16 addr_len;
u8 address[256];
};
#define ATTO_FUNC_MEM_RW 0x02
#define ATTO_VER_MEM_RW0 0
#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0
struct __packed atto_hba_memory_read_write {
u8 mem_func;
u8 mem_type;
union {
u8 pci_index;
u8 i2c_dev;
};
u8 i2c_status;
u32 length;
u64 address;
u8 reserved[48];
};
#define ATTO_FUNC_TRACE 0x03
#define ATTO_VER_TRACE0 0
#define ATTO_VER_TRACE1 1
#define ATTO_VER_TRACE ATTO_VER_TRACE1
struct __packed atto_hba_trace {
u8 trace_func;
#define ATTO_TRC_TF_GET_INFO 0x00
#define ATTO_TRC_TF_ENABLE 0x01
#define ATTO_TRC_TF_DISABLE 0x02
#define ATTO_TRC_TF_SET_MASK 0x03
#define ATTO_TRC_TF_UPLOAD 0x04
#define ATTO_TRC_TF_RESET 0x05
u8 trace_type;
#define ATTO_TRC_TT_DRIVER 0x00
#define ATTO_TRC_TT_FWCOREDUMP 0x01
u8 reserved[2];
u32 current_offset;
u32 total_length;
u32 trace_mask;
u8 reserved2[48];
};
#define ATTO_FUNC_SCSI_PASS_THRU 0x04
#define ATTO_VER_SCSI_PASS_THRU0 0
#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0
struct __packed atto_hba_scsi_pass_thru {
u8 cdb[32];
u8 cdb_length;
u8 req_status;
#define ATTO_SPT_RS_SUCCESS 0x00
#define ATTO_SPT_RS_FAILED 0x01
#define ATTO_SPT_RS_OVERRUN 0x02
#define ATTO_SPT_RS_UNDERRUN 0x03
#define ATTO_SPT_RS_NO_DEVICE 0x04
#define ATTO_SPT_RS_NO_LUN 0x05
#define ATTO_SPT_RS_TIMEOUT 0x06
#define ATTO_SPT_RS_BUS_RESET 0x07
#define ATTO_SPT_RS_ABORTED 0x08
#define ATTO_SPT_RS_BUSY 0x09
#define ATTO_SPT_RS_DEGRADED 0x0A
u8 scsi_status;
u8 sense_length;
u32 flags;
#define ATTO_SPTF_DATA_IN 0x00000001
#define ATTO_SPTF_DATA_OUT 0x00000002
#define ATTO_SPTF_SIMPLE_Q 0x00000004
#define ATTO_SPTF_HEAD_OF_Q 0x00000008
#define ATTO_SPTF_ORDERED_Q 0x00000010
u32 timeout;
u32 target_id;
u8 lun[8];
u32 residual_length;
u8 sense_data[0xFC];
u8 reserved[0x28];
};
#define ATTO_FUNC_GET_DEV_ADDR 0x05
#define ATTO_VER_GET_DEV_ADDR0 0
#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0
struct __packed atto_hba_get_device_address {
u8 addr_type;
#define ATTO_GDA_AT_PORT 0x00
#define ATTO_GDA_AT_NODE 0x01
#define ATTO_GDA_AT_MAC 0x02
#define ATTO_GDA_AT_PORTID 0x03
#define ATTO_GDA_AT_UNIQUE 0x04
u8 reserved;
u16 addr_len;
u32 target_id;
u8 address[256];
};
/* The following functions are supported by firmware but do not have any
* associated driver structures
*/
#define ATTO_FUNC_PHY_CTRL 0x06
#define ATTO_FUNC_CONN_CTRL 0x0C
#define ATTO_FUNC_ADAP_CTRL 0x0E
#define ATTO_VER_ADAP_CTRL0 0
#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0
struct __packed atto_hba_adap_ctrl {
u8 adap_func;
#define ATTO_AC_AF_HARD_RST 0x00
#define ATTO_AC_AF_GET_STATE 0x01
#define ATTO_AC_AF_GET_TEMP 0x02
u8 adap_state;
#define ATTO_AC_AS_UNKNOWN 0x00
#define ATTO_AC_AS_OK 0x01
#define ATTO_AC_AS_RST_SCHED 0x02
#define ATTO_AC_AS_RST_IN_PROG 0x03
#define ATTO_AC_AS_RST_DISC 0x04
#define ATTO_AC_AS_DEGRADED 0x05
#define ATTO_AC_AS_DISABLED 0x06
#define ATTO_AC_AS_TEMP 0x07
u8 reserved[2];
union {
struct {
u8 temp_sensor;
u8 temp_state;
#define ATTO_AC_TS_UNSUPP 0x00
#define ATTO_AC_TS_UNKNOWN 0x01
#define ATTO_AC_TS_INIT_FAILED 0x02
#define ATTO_AC_TS_NORMAL 0x03
#define ATTO_AC_TS_OUT_OF_RANGE 0x04
#define ATTO_AC_TS_FAULT 0x05
signed short temp_value;
signed short temp_lower_lim;
signed short temp_upper_lim;
char temp_desc[32];
u8 reserved2[20];
};
};
};
#define ATTO_FUNC_GET_DEV_INFO 0x0F
#define ATTO_VER_GET_DEV_INFO0 0
#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0
struct __packed atto_hba_sas_device_info {
#define ATTO_SDI_MAX_PHYS_WIDE_PORT 16
u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
#define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV
u32 exp_target_id;
u32 sas_port_mask;
u8 sas_level;
#define ATTO_SDI_SAS_LVL_INV 0xFF
u8 slot_num;
#define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV
u8 dev_type;
#define ATTO_SDI_DT_END_DEVICE 0
#define ATTO_SDI_DT_EXPANDER 1
#define ATTO_SDI_DT_PORT_MULT 2
u8 ini_flags;
u8 tgt_flags;
u8 link_rate; /* SMP_RATE_XXX */
u8 loc_flags;
#define ATTO_SDI_LF_DIRECT 0x01
#define ATTO_SDI_LF_EXPANDER 0x02
#define ATTO_SDI_LF_PORT_MULT 0x04
u8 pm_port;
u8 reserved[0x60];
};
union atto_hba_device_info {
struct atto_hba_sas_device_info sas_dev_info;
};
struct __packed atto_hba_get_device_info {
u32 target_id;
u8 info_type;
#define ATTO_GDI_IT_UNKNOWN 0x00
#define ATTO_GDI_IT_SAS 0x01
#define ATTO_GDI_IT_FC 0x02
#define ATTO_GDI_IT_FCOE 0x03
u8 reserved[11];
union atto_hba_device_info dev_info;
};
struct atto_ioctl {
u8 version;
u8 function; /* ATTO_FUNC_XXX */
u8 status;
#define ATTO_STS_SUCCESS 0x00
#define ATTO_STS_FAILED 0x01
#define ATTO_STS_INV_VERSION 0x02
#define ATTO_STS_OUT_OF_RSRC 0x03
#define ATTO_STS_INV_FUNC 0x04
#define ATTO_STS_UNSUPPORTED 0x05
#define ATTO_STS_INV_ADAPTER 0x06
#define ATTO_STS_INV_DRVR_VER 0x07
#define ATTO_STS_INV_PARAM 0x08
#define ATTO_STS_TIMEOUT 0x09
#define ATTO_STS_NOT_APPL 0x0A
#define ATTO_STS_DEGRADED 0x0B
u8 flags;
#define HBAF_TUNNEL 0x01
u32 data_length;
u8 reserved2[56];
union {
u8 byte[1];
struct atto_hba_get_adapter_info get_adap_info;
struct atto_hba_get_adapter_address get_adap_addr;
struct atto_hba_scsi_pass_thru scsi_pass_thru;
struct atto_hba_get_device_address get_dev_addr;
struct atto_hba_adap_ctrl adap_ctrl;
struct atto_hba_get_device_info get_dev_info;
struct atto_hba_trace trace;
} data;
};
struct __packed atto_ioctl_vda_scsi_cmd {
#define ATTO_VDA_SCSI_VER0 0
#define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0
u8 cdb[16];
u32 flags;
u32 data_length;
u32 residual_length;
u16 target_id;
u8 sense_len;
u8 scsi_stat;
u8 reserved[8];
u8 sense_data[80];
};
struct __packed atto_ioctl_vda_flash_cmd {
#define ATTO_VDA_FLASH_VER0 0
#define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0
u32 flash_addr;
u32 data_length;
u8 sub_func;
u8 reserved[15];
union {
struct {
u32 flash_size;
u32 page_size;
u8 prod_info[32];
} info;
struct {
char file_name[16]; /* 8.3 fname, NULL term, wc=* */
u32 file_size;
} file;
} data;
};
struct __packed atto_ioctl_vda_diag_cmd {
#define ATTO_VDA_DIAG_VER0 0
#define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0
u64 local_addr;
u32 data_length;
u8 sub_func;
u8 flags;
u8 reserved[3];
};
struct __packed atto_ioctl_vda_cli_cmd {
#define ATTO_VDA_CLI_VER0 0
#define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0
u32 cmd_rsp_len;
};
struct __packed atto_ioctl_vda_smp_cmd {
#define ATTO_VDA_SMP_VER0 0
#define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0
u64 dest;
u32 cmd_rsp_len;
};
struct __packed atto_ioctl_vda_cfg_cmd {
#define ATTO_VDA_CFG_VER0 0
#define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0
u32 data_length;
u8 cfg_func;
u8 reserved[11];
union {
u8 bytes[112];
struct atto_vda_cfg_init init;
} data;
};
struct __packed atto_ioctl_vda_mgt_cmd {
#define ATTO_VDA_MGT_VER0 0
#define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0
u8 mgt_func;
u8 scan_generation;
u16 dev_index;
u32 data_length;
u8 reserved[8];
union {
u8 bytes[112];
struct atto_vda_devinfo dev_info;
struct atto_vda_grp_info grp_info;
struct atto_vdapart_info part_info;
struct atto_vda_dh_info dh_info;
struct atto_vda_metrics_info metrics_info;
struct atto_vda_schedule_info sched_info;
struct atto_vda_n_vcache_info nvcache_info;
struct atto_vda_buzzer_info buzzer_info;
struct atto_vda_adapter_info adapter_info;
struct atto_vda_temp_info temp_info;
struct atto_vda_fan_info fan_info;
} data;
};
struct __packed atto_ioctl_vda_gsv_cmd {
#define ATTO_VDA_GSV_VER0 0
#define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0
u8 rsp_len;
u8 reserved[7];
u8 version_info[1];
#define ATTO_VDA_VER_UNSUPPORTED 0xFF
};
struct __packed atto_ioctl_vda {
u8 version;
u8 function; /* VDA_FUNC_XXXX */
u8 status; /* ATTO_STS_XXX */
u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */
u32 data_length;
u8 reserved[8];
union {
struct atto_ioctl_vda_scsi_cmd scsi;
struct atto_ioctl_vda_flash_cmd flash;
struct atto_ioctl_vda_diag_cmd diag;
struct atto_ioctl_vda_cli_cmd cli;
struct atto_ioctl_vda_smp_cmd smp;
struct atto_ioctl_vda_cfg_cmd cfg;
struct atto_ioctl_vda_mgt_cmd mgt;
struct atto_ioctl_vda_gsv_cmd gsv;
u8 cmd_info[256];
} cmd;
union {
u8 data[1];
struct atto_vda_devinfo2 dev_info2;
} data;
};
struct __packed atto_ioctl_smp {
u8 version;
#define ATTO_SMP_VERSION0 0
#define ATTO_SMP_VERSION1 1
#define ATTO_SMP_VERSION2 2
#define ATTO_SMP_VERSION ATTO_SMP_VERSION2
u8 function;
#define ATTO_SMP_FUNC_DISC_SMP 0x00
#define ATTO_SMP_FUNC_DISC_TARG 0x01
#define ATTO_SMP_FUNC_SEND_CMD 0x02
#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03
#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04
#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05
u8 status; /* ATTO_STS_XXX */
u8 smp_status; /* if status == ATTO_STS_SUCCESS */
#define ATTO_SMP_STS_SUCCESS 0x00
#define ATTO_SMP_STS_FAILURE 0x01
#define ATTO_SMP_STS_RESCAN 0x02
#define ATTO_SMP_STS_NOT_FOUND 0x03
u16 target_id;
u8 phy_id;
u8 dev_index;
u64 smp_sas_addr;
u64 targ_sas_addr;
u32 req_length;
u32 rsp_length;
u8 flags;
#define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */
u8 reserved[31];
union {
u8 byte[1];
u32 dword[1];
} data;
};
struct __packed atto_express_ioctl {
struct atto_express_ioctl_header header;
union {
struct atto_firmware_rw_request fwrw;
struct atto_param_rw_request prw;
struct atto_channel_list chanlist;
struct atto_channel_info chaninfo;
struct atto_ioctl ioctl_hba;
struct atto_module_info modinfo;
struct atto_ioctl_vda ioctl_vda;
struct atto_ioctl_smp ioctl_smp;
struct atto_csmi csmi;
} data;
};
/* The struct associated with the code is listed after the definition */
#define EXPRESS_IOCTL_MIN 0x4500
#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */
#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */
#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */
#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */
#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */
#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */
#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */
#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */
#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */
#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */
#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */
#define EXPRESS_CSMI 0x450B /* CSMI */
#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */
#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */
#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */
#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */
#define EXPRESS_IOCTL_MAX 0x450F
#endif
/* linux/drivers/scsi/esas2r/atvda.h
* ATTO VDA interface definitions
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#ifndef ATVDA_H
#define ATVDA_H
struct __packed atto_dev_addr {
u64 dev_port;
u64 hba_port;
u8 lun;
u8 flags;
#define VDA_DEVADDRF_SATA 0x01
#define VDA_DEVADDRF_SSD 0x02
u8 link_speed; /* VDALINKSPEED_xxx */
u8 pad[1];
};
/* dev_addr2 was added for 64-bit alignment */
struct __packed atto_dev_addr2 {
u64 dev_port;
u64 hba_port;
u8 lun;
u8 flags;
u8 link_speed;
u8 pad[5];
};
struct __packed atto_vda_sge {
u32 length;
u64 address;
};
/* VDA request function codes */
#define VDA_FUNC_SCSI 0x00
#define VDA_FUNC_FLASH 0x01
#define VDA_FUNC_DIAG 0x02
#define VDA_FUNC_AE 0x03
#define VDA_FUNC_CLI 0x04
#define VDA_FUNC_IOCTL 0x05
#define VDA_FUNC_CFG 0x06
#define VDA_FUNC_MGT 0x07
#define VDA_FUNC_GSV 0x08
/* VDA request status values. for host driver considerations, values for
* SCSI requests start at zero. other requests may use these values as well. */
#define RS_SUCCESS 0x00 /*! successful completion */
#define RS_INV_FUNC 0x01 /*! invalid command function */
#define RS_BUSY 0x02 /*! insufficient resources */
#define RS_SEL 0x03 /*! no target at target_id */
#define RS_NO_LUN 0x04 /*! invalid LUN */
#define RS_TIMEOUT 0x05 /*! request timeout */
#define RS_OVERRUN 0x06 /*! data overrun */
#define RS_UNDERRUN 0x07 /*! data underrun */
#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */
#define RS_ABORTED 0x0A /*! command aborted */
#define RS_RESID_MISM 0x0B /*! residual length incorrect */
#define RS_TM_FAILED 0x0C /*! task management failed */
#define RS_RESET 0x0D /*! aborted due to bus reset */
#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */
#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */
#define RS_UNSUPPORTED 0x10 /*! unsupported request */
#define RS_SEL2 0x70 /*! internal generated RS_SEL */
#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */
#define RS_MGT_BASE 0x80 /*! base of VDA management errors */
#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00)
#define RS_DEV_INVALID (RS_MGT_BASE + 0x01)
#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02)
#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03)
#define RS_DEV_LOST (RS_MGT_BASE + 0x04)
#define RS_SCAN_GEN (RS_MGT_BASE + 0x05)
#define RS_GRP_INVALID (RS_MGT_BASE + 0x08)
#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09)
#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A)
#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B)
#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C)
#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D)
#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E)
#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F)
#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10)
#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12)
#define RS_CFG_SAVE (RS_MGT_BASE + 0x14)
#define RS_PART_LAST (RS_MGT_BASE + 0x18)
#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19)
#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A)
#define RS_PART_TARGET (RS_MGT_BASE + 0x1B)
#define RS_PART_LUN (RS_MGT_BASE + 0x1C)
#define RS_PART_DUP (RS_MGT_BASE + 0x1D)
#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E)
#define RS_PART_MAX (RS_MGT_BASE + 0x1F)
#define RS_PART_CAP (RS_MGT_BASE + 0x20)
#define RS_PART_STATE (RS_MGT_BASE + 0x21)
#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22)
#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23)
#define RS_HS_ERROR (RS_MGT_BASE + 0x24)
#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25)
#define RS_BAD_PARAM (RS_MGT_BASE + 0x26)
#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27)
#define RS_FLS_BASE 0xB0 /*! base of VDA errors */
#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00)
#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01)
#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02)
#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03)
#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04)
#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05)
#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06)
#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07)
#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08)
#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */
#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0)
#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1)
#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2)
#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3)
#define RS_DEGRADED 0xFB /*! degraded mode */
#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */
#define RS_VDA_INTERNAL 0xFD /*! catch-all */
#define RS_PENDING 0xFE /*! pending, not started */
#define RS_STARTED 0xFF /*! started */
/* flash request subfunctions. these are used in both the IOCTL and the
* driver-firmware interface (VDA_FUNC_FLASH). */
#define VDA_FLASH_BEGINW 0x00
#define VDA_FLASH_READ 0x01
#define VDA_FLASH_WRITE 0x02
#define VDA_FLASH_COMMIT 0x03
#define VDA_FLASH_CANCEL 0x04
#define VDA_FLASH_INFO 0x05
#define VDA_FLASH_FREAD 0x06
#define VDA_FLASH_FWRITE 0x07
#define VDA_FLASH_FINFO 0x08
/* IOCTL request subfunctions. these identify the payload type for
* VDA_FUNC_IOCTL.
*/
#define VDA_IOCTL_HBA 0x00
#define VDA_IOCTL_CSMI 0x01
#define VDA_IOCTL_SMP 0x02
struct __packed atto_vda_devinfo {
struct atto_dev_addr dev_addr;
u8 vendor_id[8];
u8 product_id[16];
u8 revision[4];
u64 capacity;
u32 block_size;
u8 dev_type;
union {
u8 dev_status;
#define VDADEVSTAT_INVALID 0x00
#define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID
#define VDADEVSTAT_ASSIGNED 0x01
#define VDADEVSTAT_SPARE 0x02
#define VDADEVSTAT_UNAVAIL 0x03
#define VDADEVSTAT_PT_MAINT 0x04
#define VDADEVSTAT_LCLSPARE 0x05
#define VDADEVSTAT_UNUSEABLE 0x06
#define VDADEVSTAT_AVAIL 0xFF
u8 op_ctrl;
#define VDA_DEV_OP_CTRL_START 0x01
#define VDA_DEV_OP_CTRL_HALT 0x02
#define VDA_DEV_OP_CTRL_RESUME 0x03
#define VDA_DEV_OP_CTRL_CANCEL 0x04
};
u8 member_state;
#define VDAMBRSTATE_ONLINE 0x00
#define VDAMBRSTATE_DEGRADED 0x01
#define VDAMBRSTATE_UNAVAIL 0x02
#define VDAMBRSTATE_FAULTED 0x03
#define VDAMBRSTATE_MISREAD 0x04
#define VDAMBRSTATE_INCOMPAT 0x05
u8 operation;
#define VDAOP_NONE 0x00
#define VDAOP_REBUILD 0x01
#define VDAOP_ERASE 0x02
#define VDAOP_PATTERN 0x03
#define VDAOP_CONVERSION 0x04
#define VDAOP_FULL_INIT 0x05
#define VDAOP_QUICK_INIT 0x06
#define VDAOP_SECT_SCAN 0x07
#define VDAOP_SECT_SCAN_PARITY 0x08
#define VDAOP_SECT_SCAN_PARITY_FIX 0x09
#define VDAOP_RECOV_REBUILD 0x0A
u8 op_status;
#define VDAOPSTAT_OK 0x00
#define VDAOPSTAT_FAULTED 0x01
#define VDAOPSTAT_HALTED 0x02
#define VDAOPSTAT_INT 0x03
u8 progress; /* 0 - 100% */
u16 ses_dev_index;
#define VDASESDI_INVALID 0xFFFF
u8 serial_no[32];
union {
u16 target_id;
#define VDATGTID_INVALID 0xFFFF
u16 features_mask;
};
u16 lun;
u16 features;
#define VDADEVFEAT_ENC_SERV 0x0001
#define VDADEVFEAT_IDENT 0x0002
#define VDADEVFEAT_DH_SUPP 0x0004
#define VDADEVFEAT_PHYS_ID 0x0008
u8 ses_element_id;
u8 link_speed;
#define VDALINKSPEED_UNKNOWN 0x00
#define VDALINKSPEED_1GB 0x01
#define VDALINKSPEED_1_5GB 0x02
#define VDALINKSPEED_2GB 0x03
#define VDALINKSPEED_3GB 0x04
#define VDALINKSPEED_4GB 0x05
#define VDALINKSPEED_6GB 0x06
#define VDALINKSPEED_8GB 0x07
u16 phys_target_id;
u8 reserved[2];
};
/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it
* extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
* the entire structure is DMaed between the firmware and host buffer and
* the data will always be in little endian format.
*/
struct __packed atto_vda_devinfo2 {
struct atto_dev_addr dev_addr;
u8 vendor_id[8];
u8 product_id[16];
u8 revision[4];
u64 capacity;
u32 block_size;
u8 dev_type;
u8 dev_status;
u8 member_state;
u8 operation;
u8 op_status;
u8 progress;
u16 ses_dev_index;
u8 serial_no[32];
union {
u16 target_id;
u16 features_mask;
};
u16 lun;
u16 features;
u8 ses_element_id;
u8 link_speed;
u16 phys_target_id;
u8 reserved[2];
/* This is where fields specific to struct atto_vda_devinfo2 begin. Note
* that the structure version started at one so applications that unionize this
* structure with atto_vda_dev_info can differentiate them if desired.
*/
u8 version;
#define VDADEVINFO_VERSION0 0x00
#define VDADEVINFO_VERSION1 0x01
#define VDADEVINFO_VERSION2 0x02
#define VDADEVINFO_VERSION3 0x03
#define VDADEVINFO_VERSION VDADEVINFO_VERSION3
u8 reserved2[3];
/* sector scanning fields */
u32 ss_curr_errors;
u64 ss_curr_scanned;
u32 ss_curr_recvrd;
u32 ss_scan_length;
u32 ss_total_errors;
u32 ss_total_recvrd;
u32 ss_num_scans;
/* grp_name was added in version 2 of this structure. */
char grp_name[15];
u8 reserved3[4];
/* dev_addr_list was added in version 3 of this structure. */
u8 num_dev_addr;
struct atto_dev_addr2 dev_addr_list[8];
};
struct __packed atto_vda_grp_info {
u8 grp_index;
#define VDA_MAX_RAID_GROUPS 32
char grp_name[15];
u64 capacity;
u32 block_size;
u32 interleave;
u8 type;
#define VDA_GRP_TYPE_RAID0 0
#define VDA_GRP_TYPE_RAID1 1
#define VDA_GRP_TYPE_RAID4 4
#define VDA_GRP_TYPE_RAID5 5
#define VDA_GRP_TYPE_RAID6 6
#define VDA_GRP_TYPE_RAID10 10
#define VDA_GRP_TYPE_RAID40 40
#define VDA_GRP_TYPE_RAID50 50
#define VDA_GRP_TYPE_RAID60 60
#define VDA_GRP_TYPE_DVRAID_HS 252
#define VDA_GRP_TYPE_DVRAID_NOHS 253
#define VDA_GRP_TYPE_JBOD 254
#define VDA_GRP_TYPE_SPARE 255
union {
u8 status;
#define VDA_GRP_STAT_INVALID 0x00
#define VDA_GRP_STAT_NEW 0x01
#define VDA_GRP_STAT_WAITING 0x02
#define VDA_GRP_STAT_ONLINE 0x03
#define VDA_GRP_STAT_DEGRADED 0x04
#define VDA_GRP_STAT_OFFLINE 0x05
#define VDA_GRP_STAT_DELETED 0x06
#define VDA_GRP_STAT_RECOV_BASIC 0x07
#define VDA_GRP_STAT_RECOV_EXTREME 0x08
u8 op_ctrl;
#define VDA_GRP_OP_CTRL_START 0x01
#define VDA_GRP_OP_CTRL_HALT 0x02
#define VDA_GRP_OP_CTRL_RESUME 0x03
#define VDA_GRP_OP_CTRL_CANCEL 0x04
};
u8 rebuild_state;
#define VDA_RBLD_NONE 0x00
#define VDA_RBLD_REBUILD 0x01
#define VDA_RBLD_ERASE 0x02
#define VDA_RBLD_PATTERN 0x03
#define VDA_RBLD_CONV 0x04
#define VDA_RBLD_FULL_INIT 0x05
#define VDA_RBLD_QUICK_INIT 0x06
#define VDA_RBLD_SECT_SCAN 0x07
#define VDA_RBLD_SECT_SCAN_PARITY 0x08
#define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
#define VDA_RBLD_RECOV_REBUILD 0x0A
#define VDA_RBLD_RECOV_BASIC 0x0B
#define VDA_RBLD_RECOV_EXTREME 0x0C
u8 span_depth;
u8 progress;
u8 mirror_width;
u8 stripe_width;
u8 member_cnt;
union {
u16 members[32];
#define VDA_MEMBER_MISSING 0xFFFF
#define VDA_MEMBER_NEW 0xFFFE
u16 features_mask;
};
u16 features;
#define VDA_GRP_FEAT_HOTSWAP 0x0001
#define VDA_GRP_FEAT_SPDRD_MASK 0x0006
#define VDA_GRP_FEAT_SPDRD_DIS 0x0000
#define VDA_GRP_FEAT_SPDRD_ENB 0x0002
#define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
#define VDA_GRP_FEAT_IDENT 0x0008
#define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
#define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010
#define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
#define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
#define VDA_GRP_FEAT_WRITE_CACHE 0x0040
#define VDA_GRP_FEAT_RBLD_RESUME 0x0080
#define VDA_GRP_FEAT_SECT_RESUME 0x0100
#define VDA_GRP_FEAT_INIT_RESUME 0x0200
#define VDA_GRP_FEAT_SSD 0x0400
#define VDA_GRP_FEAT_BOOT_DEV 0x0800
/*
* for backward compatibility, a prefetch value of zero means the
* setting is ignored/unsupported. therefore, the firmware supported
* 0-6 values are incremented to 1-7.
*/
u8 prefetch;
u8 op_status;
#define VDAGRPOPSTAT_MASK 0x0F
#define VDAGRPOPSTAT_INVALID 0x00
#define VDAGRPOPSTAT_OK 0x01
#define VDAGRPOPSTAT_FAULTED 0x02
#define VDAGRPOPSTAT_HALTED 0x03
#define VDAGRPOPSTAT_INT 0x04
#define VDAGRPOPPROC_MASK 0xF0
#define VDAGRPOPPROC_STARTABLE 0x10
#define VDAGRPOPPROC_CANCELABLE 0x20
#define VDAGRPOPPROC_RESUMABLE 0x40
#define VDAGRPOPPROC_HALTABLE 0x80
u8 over_provision;
u8 reserved[3];
};
struct __packed atto_vdapart_info {
u8 part_no;
#define VDA_MAX_PARTITIONS 128
char grp_name[15];
u64 part_size;
u64 start_lba;
u32 block_size;
u16 target_id;
u8 LUN;
char serial_no[41];
u8 features;
#define VDAPI_FEAT_WRITE_CACHE 0x01
u8 reserved[7];
};
struct __packed atto_vda_dh_info {
u8 req_type;
#define VDADH_RQTYPE_CACHE 0x01
#define VDADH_RQTYPE_FETCH 0x02
#define VDADH_RQTYPE_SET_STAT 0x03
#define VDADH_RQTYPE_GET_STAT 0x04
u8 req_qual;
#define VDADH_RQQUAL_SMART 0x01
#define VDADH_RQQUAL_MEDDEF 0x02
#define VDADH_RQQUAL_INFOEXC 0x04
u8 num_smart_attribs;
u8 status;
#define VDADH_STAT_DISABLE 0x00
#define VDADH_STAT_ENABLE 0x01
u32 med_defect_cnt;
u32 info_exc_cnt;
u8 smart_status;
#define VDADH_SMARTSTAT_OK 0x00
#define VDADH_SMARTSTAT_ERR 0x01
u8 reserved[35];
struct atto_vda_sge sge[1];
};
struct __packed atto_vda_dh_smart {
u8 attrib_id;
u8 current_val;
u8 worst;
u8 threshold;
u8 raw_data[6];
u8 raw_attrib_status;
#define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01
#define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02
#define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04
#define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08
#define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10
#define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20
u8 calc_attrib_status;
#define VDADHSM_CALCSTAT_UNKNOWN 0x00
#define VDADHSM_CALCSTAT_GOOD 0x01
#define VDADHSM_CALCSTAT_PREFAIL 0x02
#define VDADHSM_CALCSTAT_OLDAGE 0x03
u8 reserved[4];
};
struct __packed atto_vda_metrics_info {
u8 data_version;
#define VDAMET_VERSION0 0x00
#define VDAMET_VERSION VDAMET_VERSION0
u8 metrics_action;
#define VDAMET_METACT_NONE 0x00
#define VDAMET_METACT_START 0x01
#define VDAMET_METACT_STOP 0x02
#define VDAMET_METACT_RETRIEVE 0x03
#define VDAMET_METACT_CLEAR 0x04
u8 test_action;
#define VDAMET_TSTACT_NONE 0x00
#define VDAMET_TSTACT_STRT_INIT 0x01
#define VDAMET_TSTACT_STRT_READ 0x02
#define VDAMET_TSTACT_STRT_VERIFY 0x03
#define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04
#define VDAMET_TSTACT_STOP 0x05
u8 num_dev_indexes;
#define VDAMET_ALL_DEVICES 0xFF
u16 dev_indexes[32];
u8 reserved[12];
struct atto_vda_sge sge[1];
};
struct __packed atto_vda_metrics_data {
u16 dev_index;
u16 length;
#define VDAMD_LEN_LAST 0x8000
#define VDAMD_LEN_MASK 0x0FFF
u32 flags;
#define VDAMDF_RUN 0x00000007
#define VDAMDF_RUN_READ 0x00000001
#define VDAMDF_RUN_WRITE 0x00000002
#define VDAMDF_RUN_ALL 0x00000004
#define VDAMDF_READ 0x00000010
#define VDAMDF_WRITE 0x00000020
#define VDAMDF_ALL 0x00000040
#define VDAMDF_DRIVETEST 0x40000000
#define VDAMDF_NEW 0x80000000
u64 total_read_data;
u64 total_write_data;
u64 total_read_io;
u64 total_write_io;
u64 read_start_time;
u64 read_stop_time;
u64 write_start_time;
u64 write_stop_time;
u64 read_maxio_time;
u64 wpvdadmetricsdatarite_maxio_time;
u64 read_totalio_time;
u64 write_totalio_time;
u64 read_total_errs;
u64 write_total_errs;
u64 read_recvrd_errs;
u64 write_recvrd_errs;
u64 miscompares;
};
struct __packed atto_vda_schedule_info {
u8 schedule_type;
#define VDASI_SCHTYPE_ONETIME 0x01
#define VDASI_SCHTYPE_DAILY 0x02
#define VDASI_SCHTYPE_WEEKLY 0x03
u8 operation;
#define VDASI_OP_NONE 0x00
#define VDASI_OP_CREATE 0x01
#define VDASI_OP_CANCEL 0x02
u8 hour;
u8 minute;
u8 day;
#define VDASI_DAY_NONE 0x00
u8 progress;
#define VDASI_PROG_NONE 0xFF
u8 event_type;
#define VDASI_EVTTYPE_SECT_SCAN 0x01
#define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02
#define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03
u8 recurrences;
#define VDASI_RECUR_FOREVER 0x00
u32 id;
#define VDASI_ID_NONE 0x00
char grp_name[15];
u8 reserved[85];
};
struct __packed atto_vda_n_vcache_info {
u8 super_cap_status;
#define VDANVCI_SUPERCAP_NOT_PRESENT 0x00
#define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01
#define VDANVCI_SUPERCAP_NOT_CHARGED 0x02
u8 nvcache_module_status;
#define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00
#define VDANVCI_NVCACHEMODULE_PRESENT 0x01
u8 protection_mode;
#define VDANVCI_PROTMODE_HI_PROTECT 0x00
#define VDANVCI_PROTMODE_HI_PERFORM 0x01
u8 reserved[109];
};
struct __packed atto_vda_buzzer_info {
u8 status;
#define VDABUZZI_BUZZER_OFF 0x00
#define VDABUZZI_BUZZER_ON 0x01
#define VDABUZZI_BUZZER_LAST 0x02
u8 reserved[3];
u32 duration;
#define VDABUZZI_DURATION_INDEFINITE 0xffffffff
u8 reserved2[104];
};
struct __packed atto_vda_adapter_info {
u8 version;
#define VDAADAPINFO_VERSION0 0x00
#define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0
u8 reserved;
signed short utc_offset;
u32 utc_time;
u32 features;
#define VDA_ADAP_FEAT_IDENT 0x0001
#define VDA_ADAP_FEAT_BUZZ_ERR 0x0002
#define VDA_ADAP_FEAT_UTC_TIME 0x0004
u32 valid_features;
char active_config[33];
u8 temp_count;
u8 fan_count;
u8 reserved3[61];
};
struct __packed atto_vda_temp_info {
u8 temp_index;
u8 max_op_temp;
u8 min_op_temp;
u8 op_temp_warn;
u8 temperature;
u8 type;
#define VDA_TEMP_TYPE_CPU 1
u8 reserved[106];
};
struct __packed atto_vda_fan_info {
u8 fan_index;
u8 status;
#define VDA_FAN_STAT_UNKNOWN 0
#define VDA_FAN_STAT_NORMAL 1
#define VDA_FAN_STAT_FAIL 2
u16 crit_pvdafaninfothreshold;
u16 warn_threshold;
u16 speed;
u8 reserved[104];
};
/* VDA management commands */
#define VDAMGT_DEV_SCAN 0x00
#define VDAMGT_DEV_INFO 0x01
#define VDAMGT_DEV_CLEAN 0x02
#define VDAMGT_DEV_IDENTIFY 0x03
#define VDAMGT_DEV_IDENTSTOP 0x04
#define VDAMGT_DEV_PT_INFO 0x05
#define VDAMGT_DEV_FEATURES 0x06
#define VDAMGT_DEV_PT_FEATURES 0x07
#define VDAMGT_DEV_HEALTH_REQ 0x08
#define VDAMGT_DEV_METRICS 0x09
#define VDAMGT_DEV_INFO2 0x0A
#define VDAMGT_DEV_OPERATION 0x0B
#define VDAMGT_DEV_INFO2_BYADDR 0x0C
#define VDAMGT_GRP_INFO 0x10
#define VDAMGT_GRP_CREATE 0x11
#define VDAMGT_GRP_DELETE 0x12
#define VDAMGT_ADD_STORAGE 0x13
#define VDAMGT_MEMBER_ADD 0x14
#define VDAMGT_GRP_COMMIT 0x15
#define VDAMGT_GRP_REBUILD 0x16
#define VDAMGT_GRP_COMMIT_INIT 0x17
#define VDAMGT_QUICK_RAID 0x18
#define VDAMGT_GRP_FEATURES 0x19
#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A
#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B
#define VDAMGT_GRP_OPERATION 0x1C
#define VDAMGT_CFG_SAVE 0x20
#define VDAMGT_LAST_ERROR 0x21
#define VDAMGT_ADAP_INFO 0x22
#define VDAMGT_ADAP_FEATURES 0x23
#define VDAMGT_TEMP_INFO 0x24
#define VDAMGT_FAN_INFO 0x25
#define VDAMGT_PART_INFO 0x30
#define VDAMGT_PART_MAP 0x31
#define VDAMGT_PART_UNMAP 0x32
#define VDAMGT_PART_AUTOMAP 0x33
#define VDAMGT_PART_SPLIT 0x34
#define VDAMGT_PART_MERGE 0x35
#define VDAMGT_SPARE_LIST 0x40
#define VDAMGT_SPARE_ADD 0x41
#define VDAMGT_SPARE_REMOVE 0x42
#define VDAMGT_LOCAL_SPARE_ADD 0x43
#define VDAMGT_SCHEDULE_EVENT 0x50
#define VDAMGT_SCHEDULE_INFO 0x51
#define VDAMGT_NVCACHE_INFO 0x60
#define VDAMGT_NVCACHE_SET 0x61
#define VDAMGT_BUZZER_INFO 0x70
#define VDAMGT_BUZZER_SET 0x71
struct __packed atto_vda_ae_hdr {
u8 bylength;
u8 byflags;
#define VDAAE_HDRF_EVENT_ACK 0x01
u8 byversion;
#define VDAAE_HDR_VER_0 0
u8 bytype;
#define VDAAE_HDR_TYPE_RAID 1
#define VDAAE_HDR_TYPE_LU 2
#define VDAAE_HDR_TYPE_DISK 3
#define VDAAE_HDR_TYPE_RESET 4
#define VDAAE_HDR_TYPE_LOG_INFO 5
#define VDAAE_HDR_TYPE_LOG_WARN 6
#define VDAAE_HDR_TYPE_LOG_CRIT 7
#define VDAAE_HDR_TYPE_LOG_FAIL 8
#define VDAAE_HDR_TYPE_NVC 9
#define VDAAE_HDR_TYPE_TLG_INFO 10
#define VDAAE_HDR_TYPE_TLG_WARN 11
#define VDAAE_HDR_TYPE_TLG_CRIT 12
#define VDAAE_HDR_TYPE_PWRMGT 13
#define VDAAE_HDR_TYPE_MUTE 14
#define VDAAE_HDR_TYPE_DEV 15
};
struct __packed atto_vda_ae_raid {
struct atto_vda_ae_hdr hdr;
u32 dwflags;
#define VDAAE_GROUP_STATE 0x00000001
#define VDAAE_RBLD_STATE 0x00000002
#define VDAAE_RBLD_PROG 0x00000004
#define VDAAE_MEMBER_CHG 0x00000008
#define VDAAE_PART_CHG 0x00000010
#define VDAAE_MEM_STATE_CHG 0x00000020
u8 bygroup_state;
#define VDAAE_RAID_INVALID 0
#define VDAAE_RAID_NEW 1
#define VDAAE_RAID_WAITING 2
#define VDAAE_RAID_ONLINE 3
#define VDAAE_RAID_DEGRADED 4
#define VDAAE_RAID_OFFLINE 5
#define VDAAE_RAID_DELETED 6
#define VDAAE_RAID_BASIC 7
#define VDAAE_RAID_EXTREME 8
#define VDAAE_RAID_UNKNOWN 9
u8 byrebuild_state;
#define VDAAE_RBLD_NONE 0
#define VDAAE_RBLD_REBUILD 1
#define VDAAE_RBLD_ERASE 2
#define VDAAE_RBLD_PATTERN 3
#define VDAAE_RBLD_CONV 4
#define VDAAE_RBLD_FULL_INIT 5
#define VDAAE_RBLD_QUICK_INIT 6
#define VDAAE_RBLD_SECT_SCAN 7
#define VDAAE_RBLD_SECT_SCAN_PARITY 8
#define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
#define VDAAE_RBLD_RECOV_REBUILD 10
#define VDAAE_RBLD_UNKNOWN 11
u8 byrebuild_progress;
u8 op_status;
#define VDAAE_GRPOPSTAT_MASK 0x0F
#define VDAAE_GRPOPSTAT_INVALID 0x00
#define VDAAE_GRPOPSTAT_OK 0x01
#define VDAAE_GRPOPSTAT_FAULTED 0x02
#define VDAAE_GRPOPSTAT_HALTED 0x03
#define VDAAE_GRPOPSTAT_INT 0x04
#define VDAAE_GRPOPPROC_MASK 0xF0
#define VDAAE_GRPOPPROC_STARTABLE 0x10
#define VDAAE_GRPOPPROC_CANCELABLE 0x20
#define VDAAE_GRPOPPROC_RESUMABLE 0x40
#define VDAAE_GRPOPPROC_HALTABLE 0x80
char acname[15];
u8 byreserved;
u8 byreserved2[0x80 - 0x1C];
};
struct __packed atto_vda_ae_lu_tgt_lun {
u16 wtarget_id;
u8 bylun;
u8 byreserved;
};
struct __packed atto_vda_ae_lu_tgt_lun_raid {
u16 wtarget_id;
u8 bylun;
u8 byreserved;
u32 dwinterleave;
u32 dwblock_size;
};
struct __packed atto_vda_ae_lu {
struct atto_vda_ae_hdr hdr;
u32 dwevent;
#define VDAAE_LU_DISC 0x00000001
#define VDAAE_LU_LOST 0x00000002
#define VDAAE_LU_STATE 0x00000004
#define VDAAE_LU_PASSTHROUGH 0x10000000
#define VDAAE_LU_PHYS_ID 0x20000000
u8 bystate;
#define VDAAE_LU_UNDEFINED 0
#define VDAAE_LU_NOT_PRESENT 1
#define VDAAE_LU_OFFLINE 2
#define VDAAE_LU_ONLINE 3
#define VDAAE_LU_DEGRADED 4
#define VDAAE_LU_FACTORY_DISABLED 5
#define VDAAE_LU_DELETED 6
#define VDAAE_LU_BUSSCAN 7
#define VDAAE_LU_UNKNOWN 8
u8 byreserved;
u16 wphys_target_id;
union {
struct atto_vda_ae_lu_tgt_lun tgtlun;
struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
} id;
};
struct __packed atto_vda_ae_disk {
struct atto_vda_ae_hdr hdr;
};
#define VDAAE_LOG_STRSZ 64
struct __packed atto_vda_ae_log {
struct atto_vda_ae_hdr hdr;
char aclog_ascii[VDAAE_LOG_STRSZ];
};
#define VDAAE_TLG_STRSZ 56
struct __packed atto_vda_ae_timestamp_log {
struct atto_vda_ae_hdr hdr;
u32 dwtimestamp;
char aclog_ascii[VDAAE_TLG_STRSZ];
};
struct __packed atto_vda_ae_nvc {
struct atto_vda_ae_hdr hdr;
};
struct __packed atto_vda_ae_dev {
struct atto_vda_ae_hdr hdr;
struct atto_dev_addr devaddr;
};
union atto_vda_ae {
struct atto_vda_ae_hdr hdr;
struct atto_vda_ae_disk disk;
struct atto_vda_ae_lu lu;
struct atto_vda_ae_raid raid;
struct atto_vda_ae_log log;
struct atto_vda_ae_timestamp_log tslog;
struct atto_vda_ae_nvc nvcache;
struct atto_vda_ae_dev dev;
};
struct __packed atto_vda_date_and_time {
u8 flags;
#define VDA_DT_DAY_MASK 0x07
#define VDA_DT_DAY_NONE 0x00
#define VDA_DT_DAY_SUN 0x01
#define VDA_DT_DAY_MON 0x02
#define VDA_DT_DAY_TUE 0x03
#define VDA_DT_DAY_WED 0x04
#define VDA_DT_DAY_THU 0x05
#define VDA_DT_DAY_FRI 0x06
#define VDA_DT_DAY_SAT 0x07
#define VDA_DT_PM 0x40
#define VDA_DT_MILITARY 0x80
u8 seconds;
u8 minutes;
u8 hours;
u8 day;
u8 month;
u16 year;
};
#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */
#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */
#define SGE_LAST 0x01000000 /*! last entry */
#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */
#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */
#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */
#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */
struct __packed atto_vda_cfg_init {
struct atto_vda_date_and_time date_time;
u32 sgl_page_size;
u32 vda_version;
u32 fw_version;
u32 fw_build;
u32 fw_release;
u32 epoch_time;
u32 ioctl_tunnel;
#define VDA_ITF_MEM_RW 0x00000001
#define VDA_ITF_TRACE 0x00000002
#define VDA_ITF_SCSI_PASS_THRU 0x00000004
#define VDA_ITF_GET_DEV_ADDR 0x00000008
#define VDA_ITF_PHY_CTRL 0x00000010
#define VDA_ITF_CONN_CTRL 0x00000020
#define VDA_ITF_GET_DEV_INFO 0x00000040
u32 num_targets_backend;
u8 reserved[0x48];
};
/* configuration commands */
#define VDA_CFG_INIT 0x00
#define VDA_CFG_GET_INIT 0x01
#define VDA_CFG_GET_INIT2 0x02
/*! physical region descriptor (PRD) aka scatter/gather entry */
struct __packed atto_physical_region_description {
u64 address;
u32 ctl_len;
#define PRD_LEN_LIMIT 0x003FFFFF
#define PRD_LEN_MAX 0x003FF000
#define PRD_NXT_PRD_CNT 0x0000007F
#define PRD_CHAIN 0x01000000
#define PRD_DATA 0x00000000
#define PRD_INT_SEL 0xF0000000
#define PRD_INT_SEL_F0 0x00000000
#define PRD_INT_SEL_F1 0x40000000
#define PRD_INT_SEL_F2 0x80000000
#define PRD_INT_SEL_F3 0xc0000000
#define PRD_INT_SEL_SRAM 0x10000000
#define PRD_INT_SEL_PBSR 0x20000000
};
/* Request types. NOTE that ALL requests have the same layout for the first
* few bytes.
*/
struct __packed atto_vda_req_header {
u32 length;
u8 function;
u8 variable1;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
};
#define FCP_CDB_SIZE 16
struct __packed atto_vda_scsi_req {
u32 length;
u8 function; /* VDA_FUNC_SCSI */
u8 sense_len;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
u32 flags;
#define FCP_CMND_LUN_MASK 0x000000FF
#define FCP_CMND_TA_MASK 0x00000700
#define FCP_CMND_TA_SIMPL_Q 0x00000000
#define FCP_CMND_TA_HEAD_Q 0x00000100
#define FCP_CMND_TA_ORDRD_Q 0x00000200
#define FCP_CMND_TA_ACA 0x00000400
#define FCP_CMND_PRI_MASK 0x00007800
#define FCP_CMND_TM_MASK 0x00FF0000
#define FCP_CMND_ATS 0x00020000
#define FCP_CMND_CTS 0x00040000
#define FCP_CMND_LRS 0x00100000
#define FCP_CMND_TRS 0x00200000
#define FCP_CMND_CLA 0x00400000
#define FCP_CMND_TRM 0x00800000
#define FCP_CMND_DATA_DIR 0x03000000
#define FCP_CMND_WRD 0x01000000
#define FCP_CMND_RDD 0x02000000
u8 cdb[FCP_CDB_SIZE];
union {
struct __packed {
u64 ppsense_buf;
u16 target_id;
u8 iblk_cnt_prd;
u8 reserved;
};
struct atto_physical_region_description sense_buff_prd;
};
union {
struct atto_vda_sge sge[1];
u32 abort_handle;
u32 dwords[245];
struct atto_physical_region_description prd[1];
} u;
};
struct __packed atto_vda_flash_req {
u32 length;
u8 function; /* VDA_FUNC_FLASH */
u8 sub_func;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
u32 flash_addr;
u8 checksum;
u8 rsvd[3];
union {
struct {
char file_name[16]; /* 8.3 fname, NULL term, wc=* */
struct atto_vda_sge sge[1];
} file;
struct atto_vda_sge sge[1];
struct atto_physical_region_description prde[2];
} data;
};
struct __packed atto_vda_diag_req {
u32 length;
u8 function; /* VDA_FUNC_DIAG */
u8 sub_func;
#define VDA_DIAG_STATUS 0x00
#define VDA_DIAG_RESET 0x01
#define VDA_DIAG_PAUSE 0x02
#define VDA_DIAG_RESUME 0x03
#define VDA_DIAG_READ 0x04
#define VDA_DIAG_WRITE 0x05
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
u32 rsvd;
u64 local_addr;
struct atto_vda_sge sge[1];
};
struct __packed atto_vda_ae_req {
u32 length;
u8 function; /* VDA_FUNC_AE */
u8 reserved1;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
union {
struct atto_vda_sge sge[1];
struct atto_physical_region_description prde[1];
};
};
struct __packed atto_vda_cli_req {
u32 length;
u8 function; /* VDA_FUNC_CLI */
u8 reserved1;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
u32 cmd_rsp_len;
struct atto_vda_sge sge[1];
};
struct __packed atto_vda_ioctl_req {
u32 length;
u8 function; /* VDA_FUNC_IOCTL */
u8 sub_func;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
union {
struct atto_vda_sge reserved_sge;
struct atto_physical_region_description reserved_prde;
};
union {
struct {
u32 ctrl_code;
u16 target_id;
u8 lun;
u8 reserved;
} csmi;
};
union {
struct atto_vda_sge sge[1];
struct atto_physical_region_description prde[1];
};
};
struct __packed atto_vda_cfg_req {
u32 length;
u8 function; /* VDA_FUNC_CFG */
u8 sub_func;
u8 rsvd1;
u8 sg_list_offset;
u32 handle;
union {
u8 bytes[116];
struct atto_vda_cfg_init init;
struct atto_vda_sge sge;
struct atto_physical_region_description prde;
} data;
};
struct __packed atto_vda_mgmt_req {
u32 length;
u8 function; /* VDA_FUNC_MGT */
u8 mgt_func;
u8 chain_offset;
u8 sg_list_offset;
u32 handle;
u8 scan_generation;
u8 payld_sglst_offset;
u16 dev_index;
u32 payld_length;
u32 pad;
union {
struct atto_vda_sge sge[2];
struct atto_physical_region_description prde[2];
};
struct atto_vda_sge payld_sge[1];
};
union atto_vda_req {
struct atto_vda_scsi_req scsi;
struct atto_vda_flash_req flash;
struct atto_vda_diag_req diag;
struct atto_vda_ae_req ae;
struct atto_vda_cli_req cli;
struct atto_vda_ioctl_req ioctl;
struct atto_vda_cfg_req cfg;
struct atto_vda_mgmt_req mgt;
u8 bytes[1024];
};
/* Outbound response structures */
struct __packed atto_vda_scsi_rsp {
u8 scsi_stat;
u8 sense_len;
u8 rsvd[2];
u32 residual_length;
};
struct __packed atto_vda_flash_rsp {
u32 file_size;
};
struct __packed atto_vda_ae_rsp {
u32 length;
};
struct __packed atto_vda_cli_rsp {
u32 cmd_rsp_len;
};
struct __packed atto_vda_ioctl_rsp {
union {
struct {
u32 csmi_status;
u16 target_id;
u8 lun;
u8 reserved;
} csmi;
};
};
struct __packed atto_vda_cfg_rsp {
u16 vda_version;
u16 fw_release;
u32 fw_build;
};
struct __packed atto_vda_mgmt_rsp {
u32 length;
u16 dev_index;
u8 scan_generation;
};
union atto_vda_func_rsp {
struct atto_vda_scsi_rsp scsi_rsp;
struct atto_vda_flash_rsp flash_rsp;
struct atto_vda_ae_rsp ae_rsp;
struct atto_vda_cli_rsp cli_rsp;
struct atto_vda_ioctl_rsp ioctl_rsp;
struct atto_vda_cfg_rsp cfg_rsp;
struct atto_vda_mgmt_rsp mgt_rsp;
u32 dwords[2];
};
struct __packed atto_vda_ob_rsp {
u32 handle;
u8 req_stat;
u8 rsvd[3];
union atto_vda_func_rsp
func_rsp;
};
struct __packed atto_vda_ae_data {
u8 event_data[256];
};
struct __packed atto_vda_mgmt_data {
union {
u8 bytes[112];
struct atto_vda_devinfo dev_info;
struct atto_vda_grp_info grp_info;
struct atto_vdapart_info part_info;
struct atto_vda_dh_info dev_health_info;
struct atto_vda_metrics_info metrics_info;
struct atto_vda_schedule_info sched_info;
struct atto_vda_n_vcache_info nvcache_info;
struct atto_vda_buzzer_info buzzer_info;
} data;
};
union atto_vda_rsp_data {
struct atto_vda_ae_data ae_data;
struct atto_vda_mgmt_data mgt_data;
u8 sense_data[252];
#define SENSE_DATA_SZ 252;
u8 bytes[256];
};
#endif
/*
* linux/drivers/scsi/esas2r/esas2r.h
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include "esas2r_log.h"
#include "atioctl.h"
#include "atvda.h"
#ifndef ESAS2R_H
#define ESAS2R_H
/* Global Variables */
extern struct esas2r_adapter *esas2r_adapters[];
extern u8 *esas2r_buffered_ioctl;
extern dma_addr_t esas2r_buffered_ioctl_addr;
extern u32 esas2r_buffered_ioctl_size;
extern struct pci_dev *esas2r_buffered_ioctl_pcid;
#define SGL_PG_SZ_MIN 64
#define SGL_PG_SZ_MAX 1024
extern int sgl_page_size;
#define NUM_SGL_MIN 8
#define NUM_SGL_MAX 2048
extern int num_sg_lists;
#define NUM_REQ_MIN 4
#define NUM_REQ_MAX 256
extern int num_requests;
#define NUM_AE_MIN 2
#define NUM_AE_MAX 8
extern int num_ae_requests;
extern int cmd_per_lun;
extern int can_queue;
extern int esas2r_max_sectors;
extern int sg_tablesize;
extern int interrupt_mode;
extern int num_io_requests;
/* Macro defintions */
#define ESAS2R_MAX_ID 255
#define MAX_ADAPTERS 32
#define ESAS2R_DRVR_NAME "esas2r"
#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
#define ESAS2R_MAX_DEVICES 32
#define ATTONODE_NAME "ATTONode"
#define ESAS2R_MAJOR_REV 1
#define ESAS2R_MINOR_REV 00
#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
#define ESAS2R_DEFAULT_CMD_PER_LUN 64
#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
#define NUM_TO_STR(num) #num
#define ESAS2R_SGL_ALIGN 16
#define ESAS2R_LIST_ALIGN 16
#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
#define ESAS2R_DATA_BUF_LEN 256
#define ESAS2R_DEFAULT_TMO 5000
#define ESAS2R_DISC_BUF_LEN 512
#define ESAS2R_FWCOREDUMP_SZ 0x80000
#define ESAS2R_NUM_PHYS 8
#define ESAS2R_TARG_ID_INV 0xFFFF
#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
#define ESAS2R_INT_DIS_MASK 0
#define ESAS2R_MAX_TARGETS 256
#define ESAS2R_KOBJ_NAME_LEN 20
/* u16 (WORD) component macros */
#define LOBYTE(w) ((u8)(u16)(w))
#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
/* u32 (DWORD) component macros */
#define LOWORD(d) ((u16)(u32)(d))
#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
/* macro to get the lowest nonzero bit of a value */
#define LOBIT(x) ((x) & (0 - (x)))
/* These functions are provided to access the chip's control registers.
* The register is specified by its byte offset from the register base
* for the adapter.
*/
#define esas2r_read_register_dword(a, reg) \
readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
#define esas2r_write_register_dword(a, reg, data) \
writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
/* This function is provided to access the chip's data window. The
* register is specified by its byte offset from the window base
* for the adapter.
*/
#define esas2r_read_data_byte(a, reg) \
readb((void __iomem *)a->data_window + (reg))
/* ATTO vendor and device Ids */
#define ATTO_VENDOR_ID 0x117C
#define ATTO_DID_INTEL_IOP348 0x002C
#define ATTO_DID_MV_88RC9580 0x0049
#define ATTO_DID_MV_88RC9580TS 0x0066
#define ATTO_DID_MV_88RC9580TSE 0x0067
#define ATTO_DID_MV_88RC9580TL 0x0068
/* ATTO subsystem device Ids */
#define ATTO_SSDID_TBT 0x4000
#define ATTO_TSSC_3808 0x4066
#define ATTO_TSSC_3808E 0x4067
#define ATTO_TLSH_1068 0x4068
#define ATTO_ESAS_R680 0x0049
#define ATTO_ESAS_R608 0x004A
#define ATTO_ESAS_R60F 0x004B
#define ATTO_ESAS_R6F0 0x004C
#define ATTO_ESAS_R644 0x004D
#define ATTO_ESAS_R648 0x004E
/*
* flash definitions & structures
* define the code types
*/
#define FBT_CPYR 0xAA00
#define FBT_SETUP 0xAA02
#define FBT_FLASH_VER 0xAA04
/* offsets to various locations in flash */
#define FLS_OFFSET_BOOT (u32)(0x00700000)
#define FLS_OFFSET_NVR (u32)(0x007C0000)
#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
#define FLS_BLOCK_SIZE (u32)(0x00020000)
#define FI_NVR_2KB 0x0800
#define FI_NVR_8KB 0x2000
#define FM_BUF_SZ 0x800
/*
* marvell frey (88R9580) register definitions
* chip revision identifiers
*/
#define MVR_FREY_B2 0xB2
/*
* memory window definitions. window 0 is the data window with definitions
* of MW_DATA_XXX. window 1 is the register window with definitions of
* MW_REG_XXX.
*/
#define MW_REG_WINDOW_SIZE (u32)(0x00040000)
#define MW_REG_OFFSET_HWREG (u32)(0x00000000)
#define MW_REG_OFFSET_PCI (u32)(0x00008000)
#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
#define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
#define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
/*
* the following registers are for the communication
* list interface (AKA message unit (MU))
*/
#define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
#define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
#define MU_IN_LIST_WRITE (u32)(0x00004018)
#define MU_ILW_TOGGLE (u32)(0x00004000)
#define MU_IN_LIST_READ (u32)(0x0000401C)
#define MU_ILR_TOGGLE (u32)(0x00004000)
#define MU_ILIC_LIST (u32)(0x0000000F)
#define MU_ILIC_LIST_F0 (u32)(0x00000000)
#define MU_ILIC_DEST (u32)(0x00000F00)
#define MU_ILIC_DEST_DDR (u32)(0x00000200)
#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
#define MU_IN_LIST_CONFIG (u32)(0x0000402C)
#define MU_ILC_ENABLE (u32)(0x00000001)
#define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
#define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
#define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
#define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
#define MU_ILC_NUMBER_SHIFT 16
#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
#define MU_OUT_LIST_WRITE (u32)(0x00004068)
#define MU_OLW_TOGGLE (u32)(0x00004000)
#define MU_OUT_LIST_COPY (u32)(0x0000406C)
#define MU_OLC_TOGGLE (u32)(0x00004000)
#define MU_OLC_WRT_PTR (u32)(0x00003FFF)
#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
#define MU_OLIC_LIST (u32)(0x0000000F)
#define MU_OLIC_LIST_F0 (u32)(0x00000000)
#define MU_OLIC_SOURCE (u32)(0x00000F00)
#define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
#define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
#define MU_OLC_ENABLE (u32)(0x00000001)
#define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
#define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
#define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
#define MU_OLC_NUMBER_SHIFT 16
#define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
#define MU_OLIS_INT (u32)(0x00000001)
#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
#define MU_OLIS_MASK (u32)(0x00000001)
/*
* the maximum size of the communication lists is two greater than the
* maximum amount of VDA requests. the extra are to prevent queue overflow.
*/
#define ESAS2R_MAX_NUM_REQS 256
#define ESAS2R_NUM_EXTRA 2
#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
/*
* the following registers are for the CPU interface
*/
#define MU_CTL_STATUS_IN (u32)(0x00010108)
#define MU_CTL_IN_FULL_RST (u32)(0x00000020)
#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
#define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
#define MU_DOORBELL_IN (u32)(0x00010460)
#define DRBL_RESET_BUS (u32)(0x00000002)
#define DRBL_PAUSE_AE (u32)(0x00000004)
#define DRBL_RESUME_AE (u32)(0x00000008)
#define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
#define DRBL_FLASH_REQ (u32)(0x00000020)
#define DRBL_FLASH_DONE (u32)(0x00000040)
#define DRBL_FORCE_INT (u32)(0x00000080)
#define DRBL_MSG_IFC_INIT (u32)(0x00000100)
#define DRBL_POWER_DOWN (u32)(0x00000200)
#define DRBL_DRV_VER_1 (u32)(0x00010000)
#define DRBL_DRV_VER DRBL_DRV_VER_1
#define MU_DOORBELL_IN_ENB (u32)(0x00010464)
#define MU_DOORBELL_OUT (u32)(0x00010480)
#define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
#define DRBL_UNUSED_HANDLER (u32)(0x00100000)
#define DRBL_UNDEF_INSTR (u32)(0x00200000)
#define DRBL_PREFETCH_ABORT (u32)(0x00300000)
#define DRBL_DATA_ABORT (u32)(0x00400000)
#define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
#define DRBL_FW_RESET (u32)(0x00080000)
#define DRBL_FW_VER_MSK (u32)(0x00070000)
#define DRBL_FW_VER_0 (u32)(0x00000000)
#define DRBL_FW_VER_1 (u32)(0x00010000)
#define DRBL_FW_VER DRBL_FW_VER_1
#define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
#define DRBL_ENB_MASK (u32)(0x00F803FF)
#define MU_INT_STATUS_OUT (u32)(0x00010200)
#define MU_INTSTAT_POST_OUT (u32)(0x00000010)
#define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
#define MU_INTSTAT_DRBL (u32)(0x00001000)
#define MU_INTSTAT_MASK (u32)(0x00001010)
#define MU_INT_MASK_OUT (u32)(0x0001020C)
/* PCI express registers accessed via window 1 */
#define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
#define MVRPW1R_ENABLE (u32)(0x00000001)
/* structures */
/* inbound list dynamic source entry */
struct esas2r_inbound_list_source_entry {
u64 address;
u32 length;
#define HWILSE_INTERFACE_F0 0x00000000
u32 reserved;
};
/* PCI data structure in expansion ROM images */
struct __packed esas2r_boot_header {
char signature[4];
u16 vendor_id;
u16 device_id;
u16 VPD;
u16 struct_length;
u8 struct_revision;
u8 class_code[3];
u16 image_length;
u16 code_revision;
u8 code_type;
#define CODE_TYPE_PC 0
#define CODE_TYPE_OPEN 1
#define CODE_TYPE_EFI 3
u8 indicator;
#define INDICATOR_LAST 0x80
u8 reserved[2];
};
struct __packed esas2r_boot_image {
u16 signature;
u8 reserved[22];
u16 header_offset;
u16 pnp_offset;
};
struct __packed esas2r_pc_image {
u16 signature;
u8 length;
u8 entry_point[3];
u8 checksum;
u16 image_end;
u16 min_size;
u8 rom_flags;
u8 reserved[12];
u16 header_offset;
u16 pnp_offset;
struct esas2r_boot_header boot_image;
};
struct __packed esas2r_efi_image {
u16 signature;
u16 length;
u32 efi_signature;
#define EFI_ROM_SIG 0x00000EF1
u16 image_type;
#define EFI_IMAGE_APP 10
#define EFI_IMAGE_BSD 11
#define EFI_IMAGE_RTD 12
u16 machine_type;
#define EFI_MACHINE_IA32 0x014c
#define EFI_MACHINE_IA64 0x0200
#define EFI_MACHINE_X64 0x8664
#define EFI_MACHINE_EBC 0x0EBC
u16 compression;
#define EFI_UNCOMPRESSED 0x0000
#define EFI_COMPRESSED 0x0001
u8 reserved[8];
u16 efi_offset;
u16 header_offset;
u16 reserved2;
struct esas2r_boot_header boot_image;
};
struct esas2r_adapter;
struct esas2r_sg_context;
struct esas2r_request;
typedef void (*RQCALLBK) (struct esas2r_adapter *a,
struct esas2r_request *rq);
typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
struct esas2r_sg_context *sgc);
struct esas2r_component_header {
u8 img_type;
#define CH_IT_FW 0x00
#define CH_IT_NVR 0x01
#define CH_IT_BIOS 0x02
#define CH_IT_MAC 0x03
#define CH_IT_CFG 0x04
#define CH_IT_EFI 0x05
u8 status;
#define CH_STAT_PENDING 0xff
#define CH_STAT_FAILED 0x00
#define CH_STAT_SUCCESS 0x01
#define CH_STAT_RETRY 0x02
#define CH_STAT_INVALID 0x03
u8 pad[2];
u32 version;
u32 length;
u32 image_offset;
};
#define FI_REL_VER_SZ 16
struct esas2r_flash_img_v0 {
u8 fi_version;
#define FI_VERSION_0 00
u8 status;
u8 adap_typ;
u8 action;
u32 length;
u16 checksum;
u16 driver_error;
u16 flags;
u16 num_comps;
#define FI_NUM_COMPS_V0 5
u8 rel_version[FI_REL_VER_SZ];
struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
u8 scratch_buf[FM_BUF_SZ];
};
struct esas2r_flash_img {
u8 fi_version;
#define FI_VERSION_1 01
u8 status;
#define FI_STAT_SUCCESS 0x00
#define FI_STAT_FAILED 0x01
#define FI_STAT_REBOOT 0x02
#define FI_STAT_ADAPTYP 0x03
#define FI_STAT_INVALID 0x04
#define FI_STAT_CHKSUM 0x05
#define FI_STAT_LENGTH 0x06
#define FI_STAT_UNKNOWN 0x07
#define FI_STAT_IMG_VER 0x08
#define FI_STAT_BUSY 0x09
#define FI_STAT_DUAL 0x0A
#define FI_STAT_MISSING 0x0B
#define FI_STAT_UNSUPP 0x0C
#define FI_STAT_ERASE 0x0D
#define FI_STAT_FLASH 0x0E
#define FI_STAT_DEGRADED 0x0F
u8 adap_typ;
#define FI_AT_UNKNWN 0xFF
#define FI_AT_SUN_LAKE 0x0B
#define FI_AT_MV_9580 0x0F
u8 action;
#define FI_ACT_DOWN 0x00
#define FI_ACT_UP 0x01
#define FI_ACT_UPSZ 0x02
#define FI_ACT_MAX 0x02
#define FI_ACT_DOWN1 0x80
u32 length;
u16 checksum;
u16 driver_error;
u16 flags;
#define FI_FLG_NVR_DEF 0x0001
u16 num_comps;
#define FI_NUM_COMPS_V1 6
u8 rel_version[FI_REL_VER_SZ];
struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
u8 scratch_buf[FM_BUF_SZ];
};
/* definitions for flash script (FS) commands */
struct esas2r_ioctlfs_command {
u8 command;
#define ESAS2R_FS_CMD_ERASE 0
#define ESAS2R_FS_CMD_READ 1
#define ESAS2R_FS_CMD_BEGINW 2
#define ESAS2R_FS_CMD_WRITE 3
#define ESAS2R_FS_CMD_COMMIT 4
#define ESAS2R_FS_CMD_CANCEL 5
u8 checksum;
u8 reserved[2];
u32 flash_addr;
u32 length;
u32 image_offset;
};
struct esas2r_ioctl_fs {
u8 version;
#define ESAS2R_FS_VER 0
u8 status;
u8 driver_error;
u8 adap_type;
#define ESAS2R_FS_AT_ESASRAID2 3
#define ESAS2R_FS_AT_TSSASRAID2 4
#define ESAS2R_FS_AT_TSSASRAID2E 5
#define ESAS2R_FS_AT_TLSASHBA 6
u8 driver_ver;
u8 reserved[11];
struct esas2r_ioctlfs_command command;
u8 data[1];
};
struct esas2r_sas_nvram {
u8 signature[4];
u8 version;
#define SASNVR_VERSION_0 0x00
#define SASNVR_VERSION SASNVR_VERSION_0
u8 checksum;
#define SASNVR_CKSUM_SEED 0x5A
u8 max_lun_for_target;
u8 pci_latency;
#define SASNVR_PCILAT_DIS 0x00
#define SASNVR_PCILAT_MIN 0x10
#define SASNVR_PCILAT_MAX 0xF8
u8 options1;
#define SASNVR1_BOOT_DRVR 0x01
#define SASNVR1_BOOT_SCAN 0x02
#define SASNVR1_DIS_PCI_MWI 0x04
#define SASNVR1_FORCE_ORD_Q 0x08
#define SASNVR1_CACHELINE_0 0x10
#define SASNVR1_DIS_DEVSORT 0x20
#define SASNVR1_PWR_MGT_EN 0x40
#define SASNVR1_WIDEPORT 0x80
u8 options2;
#define SASNVR2_SINGLE_BUS 0x01
#define SASNVR2_SLOT_BIND 0x02
#define SASNVR2_EXP_PROG 0x04
#define SASNVR2_CMDTHR_LUN 0x08
#define SASNVR2_HEARTBEAT 0x10
#define SASNVR2_INT_CONNECT 0x20
#define SASNVR2_SW_MUX_CTRL 0x40
#define SASNVR2_DISABLE_NCQ 0x80
u8 int_coalescing;
#define SASNVR_COAL_DIS 0x00
#define SASNVR_COAL_LOW 0x01
#define SASNVR_COAL_MED 0x02
#define SASNVR_COAL_HI 0x03
u8 cmd_throttle;
#define SASNVR_CMDTHR_NONE 0x00
u8 dev_wait_time;
u8 dev_wait_count;
u8 spin_up_delay;
#define SASNVR_SPINUP_MAX 0x14
u8 ssp_align_rate;
u8 sas_addr[8];
u8 phy_speed[16];
#define SASNVR_SPEED_AUTO 0x00
#define SASNVR_SPEED_1_5GB 0x01
#define SASNVR_SPEED_3GB 0x02
#define SASNVR_SPEED_6GB 0x03
#define SASNVR_SPEED_12GB 0x04
u8 phy_mux[16];
#define SASNVR_MUX_DISABLED 0x00
#define SASNVR_MUX_1_5GB 0x01
#define SASNVR_MUX_3GB 0x02
#define SASNVR_MUX_6GB 0x03
u8 phy_flags[16];
#define SASNVR_PHF_DISABLED 0x01
#define SASNVR_PHF_RD_ONLY 0x02
u8 sort_type;
#define SASNVR_SORT_SAS_ADDR 0x00
#define SASNVR_SORT_H308_CONN 0x01
#define SASNVR_SORT_PHY_ID 0x02
#define SASNVR_SORT_SLOT_ID 0x03
u8 dpm_reqcmd_lmt;
u8 dpm_stndby_time;
u8 dpm_active_time;
u8 phy_target_id[16];
#define SASNVR_PTI_DISABLED 0xFF
u8 virt_ses_mode;
#define SASNVR_VSMH_DISABLED 0x00
u8 read_write_mode;
#define SASNVR_RWM_DEFAULT 0x00
u8 link_down_to;
u8 reserved[0xA1];
};
typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
struct esas2r_sg_context {
struct esas2r_adapter *adapter;
struct esas2r_request *first_req;
u32 length;
u8 *cur_offset;
PGETPHYSADDR get_phys_addr;
union {
struct {
struct atto_vda_sge *curr;
struct atto_vda_sge *last;
struct atto_vda_sge *limit;
struct atto_vda_sge *chain;
} a64;
struct {
struct atto_physical_region_description *curr;
struct atto_physical_region_description *chain;
u32 sgl_max_cnt;
u32 sge_cnt;
} prd;
} sge;
struct scatterlist *cur_sgel;
u8 *exp_offset;
int num_sgel;
int sgel_count;
};
struct esas2r_target {
u8 flags;
#define TF_PASS_THRU 0x01
#define TF_USED 0x02
u8 new_target_state;
u8 target_state;
u8 buffered_target_state;
#define TS_NOT_PRESENT 0x00
#define TS_PRESENT 0x05
#define TS_LUN_CHANGE 0x06
#define TS_INVALID 0xFF
u32 block_size;
u32 inter_block;
u32 inter_byte;
u16 virt_targ_id;
u16 phys_targ_id;
u8 identifier_len;
u64 sas_addr;
u8 identifier[60];
struct atto_vda_ae_lu lu_event;
};
struct esas2r_request {
struct list_head comp_list;
struct list_head req_list;
union atto_vda_req *vrq;
struct esas2r_mem_desc *vrq_md;
union {
void *data_buf;
union atto_vda_rsp_data *vda_rsp_data;
};
u8 *sense_buf;
struct list_head sg_table_head;
struct esas2r_mem_desc *sg_table;
u32 timeout;
#define RQ_TIMEOUT_S1 0xFFFFFFFF
#define RQ_TIMEOUT_S2 0xFFFFFFFE
#define RQ_MAX_TIMEOUT 0xFFFFFFFD
u16 target_id;
u8 req_type;
#define RT_INI_REQ 0x01
#define RT_DISC_REQ 0x02
u8 sense_len;
union atto_vda_func_rsp func_rsp;
RQCALLBK comp_cb;
RQCALLBK interrupt_cb;
void *interrupt_cx;
u8 flags;
#define RF_1ST_IBLK_BASE 0x04
#define RF_FAILURE_OK 0x08
u8 req_stat;
u16 vda_req_sz;
#define RQ_SIZE_DEFAULT 0
u64 lba;
RQCALLBK aux_req_cb;
void *aux_req_cx;
u32 blk_len;
u32 max_blk_len;
union {
struct scsi_cmnd *cmd;
u8 *task_management_status_ptr;
};
};
struct esas2r_flash_context {
struct esas2r_flash_img *fi;
RQCALLBK interrupt_cb;
u8 *sgc_offset;
u8 *scratch;
u32 fi_hdr_len;
u8 task;
#define FMTSK_ERASE_BOOT 0
#define FMTSK_WRTBIOS 1
#define FMTSK_READBIOS 2
#define FMTSK_WRTMAC 3
#define FMTSK_READMAC 4
#define FMTSK_WRTEFI 5
#define FMTSK_READEFI 6
#define FMTSK_WRTCFG 7
#define FMTSK_READCFG 8
u8 func;
u16 num_comps;
u32 cmp_len;
u32 flsh_addr;
u32 curr_len;
u8 comp_typ;
struct esas2r_sg_context sgc;
};
struct esas2r_disc_context {
u8 disc_evt;
#define DCDE_DEV_CHANGE 0x01
#define DCDE_DEV_SCAN 0x02
u8 state;
#define DCS_DEV_RMV 0x00
#define DCS_DEV_ADD 0x01
#define DCS_BLOCK_DEV_SCAN 0x02
#define DCS_RAID_GRP_INFO 0x03
#define DCS_PART_INFO 0x04
#define DCS_PT_DEV_INFO 0x05
#define DCS_PT_DEV_ADDR 0x06
#define DCS_DISC_DONE 0xFF
u16 flags;
#define DCF_DEV_CHANGE 0x0001
#define DCF_DEV_SCAN 0x0002
#define DCF_POLLED 0x8000
u32 interleave;
u32 block_size;
u16 dev_ix;
u8 part_num;
u8 raid_grp_ix;
char raid_grp_name[16];
struct esas2r_target *curr_targ;
u16 curr_virt_id;
u16 curr_phys_id;
u8 scan_gen;
u8 dev_addr_type;
u64 sas_addr;
};
struct esas2r_mem_desc {
struct list_head next_desc;
void *virt_addr;
u64 phys_addr;
void *pad;
void *esas2r_data;
u32 esas2r_param;
u32 size;
};
enum fw_event_type {
fw_event_null,
fw_event_lun_change,
fw_event_present,
fw_event_not_present,
fw_event_vda_ae
};
struct esas2r_vda_ae {
u32 signature;
#define ESAS2R_VDA_EVENT_SIG 0x4154544F
u8 bus_number;
u8 devfn;
u8 pad[2];
union atto_vda_ae vda_ae;
};
struct esas2r_fw_event_work {
struct list_head list;
struct delayed_work work;
struct esas2r_adapter *a;
enum fw_event_type type;
u8 data[sizeof(struct esas2r_vda_ae)];
};
enum state {
FW_INVALID_ST,
FW_STATUS_ST,
FW_COMMAND_ST
};
struct esas2r_firmware {
enum state state;
struct esas2r_flash_img header;
u8 *data;
u64 phys;
int orig_len;
void *header_buff;
u64 header_buff_phys;
};
struct esas2r_adapter {
struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
struct esas2r_target *targetdb_end;
unsigned char *regs;
unsigned char *data_window;
u32 volatile flags;
#define AF_PORT_CHANGE (u32)(0x00000001)
#define AF_CHPRST_NEEDED (u32)(0x00000004)
#define AF_CHPRST_PENDING (u32)(0x00000008)
#define AF_CHPRST_DETECTED (u32)(0x00000010)
#define AF_BUSRST_NEEDED (u32)(0x00000020)
#define AF_BUSRST_PENDING (u32)(0x00000040)
#define AF_BUSRST_DETECTED (u32)(0x00000080)
#define AF_DISABLED (u32)(0x00000100)
#define AF_FLASH_LOCK (u32)(0x00000200)
#define AF_OS_RESET (u32)(0x00002000)
#define AF_FLASHING (u32)(0x00004000)
#define AF_POWER_MGT (u32)(0x00008000)
#define AF_NVR_VALID (u32)(0x00010000)
#define AF_DEGRADED_MODE (u32)(0x00020000)
#define AF_DISC_PENDING (u32)(0x00040000)
#define AF_TASKLET_SCHEDULED (u32)(0x00080000)
#define AF_HEARTBEAT (u32)(0x00200000)
#define AF_HEARTBEAT_ENB (u32)(0x00400000)
#define AF_NOT_PRESENT (u32)(0x00800000)
#define AF_CHPRST_STARTED (u32)(0x01000000)
#define AF_FIRST_INIT (u32)(0x02000000)
#define AF_POWER_DOWN (u32)(0x04000000)
#define AF_DISC_IN_PROG (u32)(0x08000000)
#define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
#define AF_LEGACY_SGE_MODE (u32)(0x20000000)
#define AF_DISC_POLLED (u32)(0x40000000)
u32 volatile flags2;
#define AF2_SERIAL_FLASH (u32)(0x00000001)
#define AF2_DEV_SCAN (u32)(0x00000002)
#define AF2_DEV_CNT_OK (u32)(0x00000004)
#define AF2_COREDUMP_AVAIL (u32)(0x00000008)
#define AF2_COREDUMP_SAVED (u32)(0x00000010)
#define AF2_VDA_POWER_DOWN (u32)(0x00000100)
#define AF2_THUNDERLINK (u32)(0x00000200)
#define AF2_THUNDERBOLT (u32)(0x00000400)
#define AF2_INIT_DONE (u32)(0x00000800)
#define AF2_INT_PENDING (u32)(0x00001000)
#define AF2_TIMER_TICK (u32)(0x00002000)
#define AF2_IRQ_CLAIMED (u32)(0x00004000)
#define AF2_MSI_ENABLED (u32)(0x00008000)
atomic_t disable_cnt;
atomic_t dis_ints_cnt;
u32 int_stat;
u32 int_mask;
u32 volatile *outbound_copy;
struct list_head avail_request;
spinlock_t request_lock;
spinlock_t sg_list_lock;
spinlock_t queue_lock;
spinlock_t mem_lock;
struct list_head free_sg_list_head;
struct esas2r_mem_desc *sg_list_mds;
struct list_head active_list;
struct list_head defer_list;
struct esas2r_request **req_table;
union {
u16 prev_dev_cnt;
u32 heartbeat_time;
#define ESAS2R_HEARTBEAT_TIME (3000)
};
u32 chip_uptime;
#define ESAS2R_CHP_UPTIME_MAX (60000)
#define ESAS2R_CHP_UPTIME_CNT (20000)
u64 uncached_phys;
u8 *uncached;
struct esas2r_sas_nvram *nvram;
struct esas2r_request general_req;
u8 init_msg;
#define ESAS2R_INIT_MSG_START 1
#define ESAS2R_INIT_MSG_INIT 2
#define ESAS2R_INIT_MSG_GET_INIT 3
#define ESAS2R_INIT_MSG_REINIT 4
u16 cmd_ref_no;
u32 fw_version;
u32 fw_build;
u32 chip_init_time;
#define ESAS2R_CHPRST_TIME (180000)
#define ESAS2R_CHPRST_WAIT_TIME (2000)
u32 last_tick_time;
u32 window_base;
RQBUILDSGL build_sgl;
struct esas2r_request *first_ae_req;
u32 list_size;
u32 last_write;
u32 last_read;
u16 max_vdareq_size;
u16 disc_wait_cnt;
struct esas2r_mem_desc inbound_list_md;
struct esas2r_mem_desc outbound_list_md;
struct esas2r_disc_context disc_ctx;
u8 *disc_buffer;
u32 disc_start_time;
u32 disc_wait_time;
u32 flash_ver;
char flash_rev[16];
char fw_rev[16];
char image_type[16];
struct esas2r_flash_context flash_context;
u32 num_targets_backend;
u32 ioctl_tunnel;
struct tasklet_struct tasklet;
struct pci_dev *pcid;
struct Scsi_Host *host;
unsigned int index;
char name[32];
struct timer_list timer;
struct esas2r_firmware firmware;
wait_queue_head_t nvram_waiter;
int nvram_command_done;
wait_queue_head_t fm_api_waiter;
int fm_api_command_done;
wait_queue_head_t vda_waiter;
int vda_command_done;
u8 *vda_buffer;
u64 ppvda_buffer;
#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
wait_queue_head_t fs_api_waiter;
int fs_api_command_done;
u64 ppfs_api_buffer;
u8 *fs_api_buffer;
u32 fs_api_buffer_size;
wait_queue_head_t buffered_ioctl_waiter;
int buffered_ioctl_done;
int uncached_size;
struct workqueue_struct *fw_event_q;
struct list_head fw_event_list;
spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */
char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
/*
* intr_mode stores the interrupt mode currently being used by this
* adapter. it is based on the interrupt_mode module parameter, but
* can be changed based on the ability (or not) to utilize the
* mode requested by the parameter.
*/
int intr_mode;
#define INTR_MODE_LEGACY 0
#define INTR_MODE_MSI 1
#define INTR_MODE_MSIX 2
struct esas2r_sg_context fm_api_sgc;
u8 *save_offset;
struct list_head vrq_mds_head;
struct esas2r_mem_desc *vrq_mds;
int num_vrqs;
struct semaphore fm_api_semaphore;
struct semaphore fs_api_semaphore;
struct semaphore nvram_semaphore;
struct atto_ioctl *local_atto_ioctl;
u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
unsigned int sysfs_fw_created:1;
unsigned int sysfs_fs_created:1;
unsigned int sysfs_vda_created:1;
unsigned int sysfs_hw_created:1;
unsigned int sysfs_live_nvram_created:1;
unsigned int sysfs_default_nvram_created:1;
};
/*
* Function Declarations
* SCSI functions
*/
int esas2r_release(struct Scsi_Host *);
const char *esas2r_info(struct Scsi_Host *);
int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
struct esas2r_sas_nvram *data);
int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
u8 handle_hba_ioctl(struct esas2r_adapter *a,
struct atto_ioctl *ioctl_hba);
int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
int esas2r_slave_alloc(struct scsi_device *dev);
int esas2r_slave_configure(struct scsi_device *dev);
void esas2r_slave_destroy(struct scsi_device *dev);
int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
int esas2r_change_queue_type(struct scsi_device *dev, int type);
long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
/* SCSI error handler (eh) functions */
int esas2r_eh_abort(struct scsi_cmnd *cmd);
int esas2r_device_reset(struct scsi_cmnd *cmd);
int esas2r_host_reset(struct scsi_cmnd *cmd);
int esas2r_bus_reset(struct scsi_cmnd *cmd);
int esas2r_target_reset(struct scsi_cmnd *cmd);
/* Internal functions */
int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
int index);
int esas2r_cleanup(struct Scsi_Host *host);
int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
int count);
int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
int count);
int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
int count);
void esas2r_adapter_tasklet(unsigned long context);
irqreturn_t esas2r_interrupt(int irq, void *dev_id);
irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
void esas2r_kickoff_timer(struct esas2r_adapter *a);
int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
int esas2r_resume(struct pci_dev *pcid);
void esas2r_fw_event_off(struct esas2r_adapter *a);
void esas2r_fw_event_on(struct esas2r_adapter *a);
bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
struct esas2r_sas_nvram *nvram);
void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
struct esas2r_sas_nvram *nvram);
void esas2r_complete_request_cb(struct esas2r_adapter *a,
struct esas2r_request *rq);
void esas2r_reset_detected(struct esas2r_adapter *a);
void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
u8 state);
int esas2r_req_status_to_error(u8 req_stat);
void esas2r_kill_adapter(int i);
void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
void **uncached_area);
bool esas2r_check_adapter(struct esas2r_adapter *a);
bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
struct esas2r_request *rqaux, u8 task_mgt_func);
void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
void esas2r_adapter_interrupt(struct esas2r_adapter *a);
void esas2r_do_deferred_processes(struct esas2r_adapter *a);
void esas2r_reset_bus(struct esas2r_adapter *a);
void esas2r_reset_adapter(struct esas2r_adapter *a);
void esas2r_timer_tick(struct esas2r_adapter *a);
const char *esas2r_get_model_name(struct esas2r_adapter *a);
const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
u32 *delay);
void esas2r_build_flash_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u8 sub_func,
u8 cksum,
u32 addr,
u32 length);
void esas2r_build_mgt_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u8 sub_func,
u8 scan_gen,
u16 dev_index,
u32 length,
void *data);
void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
void esas2r_build_cli_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u32 length,
u32 cmd_rsp_len);
void esas2r_build_ioctl_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u32 length,
u8 sub_func);
void esas2r_build_cfg_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u8 sub_func,
u32 length,
void *data);
void esas2r_power_down(struct esas2r_adapter *a);
bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
struct esas2r_ioctl_fs *fs,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc);
bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
u32 size);
bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
u32 size);
bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
struct esas2r_request *rq, struct esas2r_sg_context *sgc);
void esas2r_force_interrupt(struct esas2r_adapter *a);
void esas2r_local_start_request(struct esas2r_adapter *a,
struct esas2r_request *rq);
void esas2r_process_adapter_reset(struct esas2r_adapter *a);
void esas2r_complete_request(struct esas2r_adapter *a,
struct esas2r_request *rq);
void esas2r_dummy_complete(struct esas2r_adapter *a,
struct esas2r_request *rq);
void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
void esas2r_start_vda_request(struct esas2r_adapter *a,
struct esas2r_request *rq);
bool esas2r_read_flash_rev(struct esas2r_adapter *a);
bool esas2r_read_image_type(struct esas2r_adapter *a);
bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
bool esas2r_nvram_validate(struct esas2r_adapter *a);
void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
bool esas2r_print_flash_rev(struct esas2r_adapter *a);
void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
bool esas2r_init_msgs(struct esas2r_adapter *a);
bool esas2r_is_adapter_present(struct esas2r_adapter *a);
void esas2r_nuxi_mgt_data(u8 function, void *data);
void esas2r_nuxi_cfg_data(u8 function, void *data);
void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
void esas2r_reset_chip(struct esas2r_adapter *a);
void esas2r_log_request_failure(struct esas2r_adapter *a,
struct esas2r_request *rq);
void esas2r_polled_interrupt(struct esas2r_adapter *a);
bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
u8 status);
bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
struct esas2r_sg_context *sgc);
bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
struct esas2r_sg_context *sgc);
void esas2r_targ_db_initialize(struct esas2r_adapter *a);
void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
struct esas2r_disc_context *dc);
struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
struct esas2r_disc_context *dc,
u8 *ident,
u8 ident_len);
void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
u64 *sas_addr);
struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
void *identifier,
u8 ident_len);
u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
u16 virt_id);
u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
void esas2r_disc_initialize(struct esas2r_adapter *a);
void esas2r_disc_start_waiting(struct esas2r_adapter *a);
void esas2r_disc_check_for_work(struct esas2r_adapter *a);
void esas2r_disc_check_complete(struct esas2r_adapter *a);
void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
bool esas2r_disc_start_port(struct esas2r_adapter *a);
void esas2r_disc_local_start_request(struct esas2r_adapter *a,
struct esas2r_request *rq);
bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
struct atto_ioctl_vda *vi,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc);
void esas2r_queue_fw_event(struct esas2r_adapter *a,
enum fw_event_type type,
void *data,
int data_sz);
/* Inline functions */
static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
{
return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
}
static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
{
return test_and_clear_bit(ilog2(bits),
(volatile unsigned long *)flags);
}
/* Allocate a chip scatter/gather list entry */
static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
{
unsigned long flags;
struct list_head *sgl;
struct esas2r_mem_desc *result = NULL;
spin_lock_irqsave(&a->sg_list_lock, flags);
if (likely(!list_empty(&a->free_sg_list_head))) {
sgl = a->free_sg_list_head.next;
result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
list_del_init(sgl);
}
spin_unlock_irqrestore(&a->sg_list_lock, flags);
return result;
}
/* Initialize a scatter/gather context */
static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
struct esas2r_adapter *a,
struct esas2r_request *rq,
struct atto_vda_sge *first)
{
sgc->adapter = a;
sgc->first_req = rq;
/*
* set the limit pointer such that an SGE pointer above this value
* would be the first one to overflow the SGL.
*/
sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
+ (sizeof(union
atto_vda_req) /
8)
- sizeof(struct
atto_vda_sge));
if (first) {
sgc->sge.a64.last =
sgc->sge.a64.curr = first;
rq->vrq->scsi.sg_list_offset = (u8)
((u8 *)first -
(u8 *)rq->vrq);
} else {
sgc->sge.a64.last =
sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
rq->vrq->scsi.sg_list_offset =
(u8)offsetof(struct atto_vda_scsi_req, u.sge);
}
sgc->sge.a64.chain = NULL;
}
static inline void esas2r_rq_init_request(struct esas2r_request *rq,
struct esas2r_adapter *a)
{
union atto_vda_req *vrq = rq->vrq;
u32 handle;
INIT_LIST_HEAD(&rq->sg_table_head);
rq->data_buf = (void *)(vrq + 1);
rq->interrupt_cb = NULL;
rq->comp_cb = esas2r_complete_request_cb;
rq->flags = 0;
rq->timeout = 0;
rq->req_stat = RS_PENDING;
rq->req_type = RT_INI_REQ;
/* clear the outbound response */
rq->func_rsp.dwords[0] = 0;
rq->func_rsp.dwords[1] = 0;
/*
* clear the size of the VDA request. esas2r_build_sg_list() will
* only allow the size of the request to grow. there are some
* management requests that go through there twice and the second
* time through sets a smaller request size. if this is not modified
* at all we'll set it to the size of the entire VDA request.
*/
rq->vda_req_sz = RQ_SIZE_DEFAULT;
/* req_table entry should be NULL at this point - if not, halt */
if (a->req_table[LOWORD(vrq->scsi.handle)])
esas2r_bugon();
/* fill in the table for this handle so we can get back to the
* request.
*/
a->req_table[LOWORD(vrq->scsi.handle)] = rq;
/*
* add a reference number to the handle to make it unique (until it
* wraps of course) while preserving the upper word
*/
handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
/*
* the following formats a SCSI request. the caller can override as
* necessary. clear_vda_request can be called to clear the VDA
* request for another type of request.
*/
vrq->scsi.function = VDA_FUNC_SCSI;
vrq->scsi.sense_len = SENSE_DATA_SZ;
/* clear out sg_list_offset and chain_offset */
vrq->scsi.sg_list_offset = 0;
vrq->scsi.chain_offset = 0;
vrq->scsi.flags = 0;
vrq->scsi.reserved = 0;
/* set the sense buffer to be the data payload buffer */
vrq->scsi.ppsense_buf
= cpu_to_le64(rq->vrq_md->phys_addr +
sizeof(union atto_vda_req));
}
static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
struct esas2r_adapter *a)
{
unsigned long flags;
if (list_empty(&rq->sg_table_head))
return;
spin_lock_irqsave(&a->sg_list_lock, flags);
list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
spin_unlock_irqrestore(&a->sg_list_lock, flags);
}
static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
struct esas2r_adapter *a)
{
esas2r_rq_free_sg_lists(rq, a);
a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
rq->data_buf = NULL;
}
static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
{
return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
| AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
| AF_PORT_CHANGE))
? true : false;
}
/*
* Build the scatter/gather list for an I/O request according to the
* specifications placed in the esas2r_sg_context. The caller must initialize
* struct esas2r_sg_context prior to the initial call by calling
* esas2r_sgc_init()
*/
static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc)
{
if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
return true;
return (*a->build_sgl)(a, sgc);
}
static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
{
if (atomic_inc_return(&a->dis_ints_cnt) == 1)
esas2r_write_register_dword(a, MU_INT_MASK_OUT,
ESAS2R_INT_DIS_MASK);
}
static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
{
if (atomic_dec_return(&a->dis_ints_cnt) == 0)
esas2r_write_register_dword(a, MU_INT_MASK_OUT,
ESAS2R_INT_ENB_MASK);
}
/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
* or long completion times.
*/
static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
{
/* make sure we don't schedule twice */
if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
ilog2(AF_TASKLET_SCHEDULED)))
tasklet_hi_schedule(&a->tasklet);
}
static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
{
if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
&& (a->nvram->options2 & SASNVR2_HEARTBEAT))
esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
else
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
}
static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
{
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
}
/* Set the initial state for resetting the adapter on the next pass through
* esas2r_do_deferred.
*/
static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
{
esas2r_disable_heartbeat(a);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
}
/* See if an interrupt is pending on the adapter. */
static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
{
u32 intstat;
if (a->int_mask == 0)
return false;
intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
if ((intstat & a->int_mask) == 0)
return false;
esas2r_disable_chip_interrupts(a);
a->int_stat = intstat;
a->int_mask = 0;
return true;
}
static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
struct esas2r_adapter *a)
{
return (u16)(uintptr_t)(t - a->targetdb);
}
/* Build and start an asynchronous event request */
static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
unsigned long flags;
esas2r_build_ae_req(a, rq);
spin_lock_irqsave(&a->queue_lock, flags);
esas2r_start_vda_request(a, rq);
spin_unlock_irqrestore(&a->queue_lock, flags);
}
static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
struct list_head *comp_list)
{
struct esas2r_request *rq;
struct list_head *element, *next;
list_for_each_safe(element, next, comp_list) {
rq = list_entry(element, struct esas2r_request, comp_list);
list_del_init(element);
esas2r_complete_request(a, rq);
}
}
/* sysfs handlers */
extern struct bin_attribute bin_attr_fw;
extern struct bin_attribute bin_attr_fs;
extern struct bin_attribute bin_attr_vda;
extern struct bin_attribute bin_attr_hw;
extern struct bin_attribute bin_attr_live_nvram;
extern struct bin_attribute bin_attr_default_nvram;
#endif /* ESAS2R_H */
/*
* linux/drivers/scsi/esas2r/esas2r_disc.c
* esas2r device discovery routines
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include "esas2r.h"
/* Miscellaneous internal discovery routines */
static void esas2r_disc_abort(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_continue(struct esas2r_adapter *a,
struct esas2r_request *rq);
static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
static bool esas2r_disc_start_request(struct esas2r_adapter *a,
struct esas2r_request *rq);
/* Internal discovery routines that process the states */
static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
struct esas2r_request *rq);
static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_part_info(struct esas2r_adapter *a,
struct esas2r_request *rq);
static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
struct esas2r_request *rq);
static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
struct esas2r_request *rq);
static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
struct esas2r_request *rq);
static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
struct esas2r_request *rq);
static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
struct esas2r_request *rq);
void esas2r_disc_initialize(struct esas2r_adapter *a)
{
struct esas2r_sas_nvram *nvr = a->nvram;
esas2r_trace_enter();
esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
a->disc_start_time = jiffies_to_msecs(jiffies);
a->disc_wait_time = nvr->dev_wait_time * 1000;
a->disc_wait_cnt = nvr->dev_wait_count;
if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
/*
* If we are doing chip reset or power management processing, always
* wait for devices. use the NVRAM device count if it is greater than
* previously discovered devices.
*/
esas2r_hdebug("starting discovery...");
a->general_req.interrupt_cx = NULL;
if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
if (a->prev_dev_cnt == 0) {
/* Don't bother waiting if there is nothing to wait
* for.
*/
a->disc_wait_time = 0;
} else {
/*
* Set the device wait count to what was previously
* found. We don't care if the user only configured
* a time because we know the exact count to wait for.
* There is no need to honor the user's wishes to
* always wait the full time.
*/
a->disc_wait_cnt = a->prev_dev_cnt;
/*
* bump the minimum wait time to 15 seconds since the
* default is 3 (system boot or the boot driver usually
* buys us more time).
*/
if (a->disc_wait_time < 15000)
a->disc_wait_time = 15000;
}
}
esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
esas2r_trace("disc wait time: %d", a->disc_wait_time);
if (a->disc_wait_time == 0)
esas2r_disc_check_complete(a);
esas2r_trace_exit();
}
void esas2r_disc_start_waiting(struct esas2r_adapter *a)
{
unsigned long flags;
spin_lock_irqsave(&a->mem_lock, flags);
if (a->disc_ctx.disc_evt)
esas2r_disc_start_port(a);
spin_unlock_irqrestore(&a->mem_lock, flags);
}
void esas2r_disc_check_for_work(struct esas2r_adapter *a)
{
struct esas2r_request *rq = &a->general_req;
/* service any pending interrupts first */
esas2r_polled_interrupt(a);
/*
* now, interrupt processing may have queued up a discovery event. go
* see if we have one to start. we couldn't start it in the ISR since
* polled discovery would cause a deadlock.
*/
esas2r_disc_start_waiting(a);
if (rq->interrupt_cx == NULL)
return;
if (rq->req_stat == RS_STARTED
&& rq->timeout <= RQ_MAX_TIMEOUT) {
/* wait for the current discovery request to complete. */
esas2r_wait_request(a, rq);
if (rq->req_stat == RS_TIMEOUT) {
esas2r_disc_abort(a, rq);
esas2r_local_reset_adapter(a);
return;
}
}
if (rq->req_stat == RS_PENDING
|| rq->req_stat == RS_STARTED)
return;
esas2r_disc_continue(a, rq);
}
void esas2r_disc_check_complete(struct esas2r_adapter *a)
{
unsigned long flags;
esas2r_trace_enter();
/* check to see if we should be waiting for devices */
if (a->disc_wait_time) {
u32 currtime = jiffies_to_msecs(jiffies);
u32 time = currtime - a->disc_start_time;
/*
* Wait until the device wait time is exhausted or the device
* wait count is satisfied.
*/
if (time < a->disc_wait_time
&& (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
|| a->disc_wait_cnt == 0)) {
/* After three seconds of waiting, schedule a scan. */
if (time >= 3000
&& !(esas2r_lock_set_flags(&a->flags2,
AF2_DEV_SCAN) &
ilog2(AF2_DEV_SCAN))) {
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags);
}
esas2r_trace_exit();
return;
}
/*
* We are done waiting...we think. Adjust the wait time to
* consume events after the count is met.
*/
if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
& ilog2(AF2_DEV_CNT_OK)))
a->disc_wait_time = time + 3000;
/* If we haven't done a full scan yet, do it now. */
if (!(esas2r_lock_set_flags(&a->flags2,
AF2_DEV_SCAN) &
ilog2(AF2_DEV_SCAN))) {
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit();
return;
}
/*
* Now, if there is still time left to consume events, continue
* waiting.
*/
if (time < a->disc_wait_time) {
esas2r_trace_exit();
return;
}
} else {
if (!(esas2r_lock_set_flags(&a->flags2,
AF2_DEV_SCAN) &
ilog2(AF2_DEV_SCAN))) {
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags);
}
}
/* We want to stop waiting for devices. */
a->disc_wait_time = 0;
if ((a->flags & AF_DISC_POLLED)
&& (a->flags & AF_DISC_IN_PROG)) {
/*
* Polled discovery is still pending so continue the active
* discovery until it is done. At that point, we will stop
* polled discovery and transition to interrupt driven
* discovery.
*/
} else {
/*
* Done waiting for devices. Note that we get here immediately
* after deferred waiting completes because that is interrupt
* driven; i.e. There is no transition.
*/
esas2r_disc_fix_curr_requests(a);
esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
/*
* We have deferred target state changes until now because we
* don't want to report any removals (due to the first arrival)
* until the device wait time expires.
*/
esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
}
esas2r_trace_exit();
}
void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
{
struct esas2r_disc_context *dc = &a->disc_ctx;
esas2r_trace_enter();
esas2r_trace("disc_event: %d", disc_evt);
/* Initialize the discovery context */
dc->disc_evt |= disc_evt;
/*
* Don't start discovery before or during polled discovery. if we did,
* we would have a deadlock if we are in the ISR already.
*/
if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
esas2r_disc_start_port(a);
esas2r_trace_exit();
}
bool esas2r_disc_start_port(struct esas2r_adapter *a)
{
struct esas2r_request *rq = &a->general_req;
struct esas2r_disc_context *dc = &a->disc_ctx;
bool ret;
esas2r_trace_enter();
if (a->flags & AF_DISC_IN_PROG) {
esas2r_trace_exit();
return false;
}
/* If there is a discovery waiting, process it. */
if (dc->disc_evt) {
if ((a->flags & AF_DISC_POLLED)
&& a->disc_wait_time == 0) {
/*
* We are doing polled discovery, but we no longer want
* to wait for devices. Stop polled discovery and
* transition to interrupt driven discovery.
*/
esas2r_trace_exit();
return false;
}
} else {
/* Discovery is complete. */
esas2r_hdebug("disc done");
esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
esas2r_trace_exit();
return false;
}
/* Handle the discovery context */
esas2r_trace("disc_evt: %d", dc->disc_evt);
esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
dc->flags = 0;
if (a->flags & AF_DISC_POLLED)
dc->flags |= DCF_POLLED;
rq->interrupt_cx = dc;
rq->req_stat = RS_SUCCESS;
/* Decode the event code */
if (dc->disc_evt & DCDE_DEV_SCAN) {
dc->disc_evt &= ~DCDE_DEV_SCAN;
dc->flags |= DCF_DEV_SCAN;
dc->state = DCS_BLOCK_DEV_SCAN;
} else if (dc->disc_evt & DCDE_DEV_CHANGE) {
dc->disc_evt &= ~DCDE_DEV_CHANGE;
dc->flags |= DCF_DEV_CHANGE;
dc->state = DCS_DEV_RMV;
}
/* Continue interrupt driven discovery */
if (!(a->flags & AF_DISC_POLLED))
ret = esas2r_disc_continue(a, rq);
else
ret = true;
esas2r_trace_exit();
return ret;
}
static bool esas2r_disc_continue(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
bool rslt;
/* Device discovery/removal */
while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
rslt = false;
switch (dc->state) {
case DCS_DEV_RMV:
rslt = esas2r_disc_dev_remove(a, rq);
break;
case DCS_DEV_ADD:
rslt = esas2r_disc_dev_add(a, rq);
break;
case DCS_BLOCK_DEV_SCAN:
rslt = esas2r_disc_block_dev_scan(a, rq);
break;
case DCS_RAID_GRP_INFO:
rslt = esas2r_disc_raid_grp_info(a, rq);
break;
case DCS_PART_INFO:
rslt = esas2r_disc_part_info(a, rq);
break;
case DCS_PT_DEV_INFO:
rslt = esas2r_disc_passthru_dev_info(a, rq);
break;
case DCS_PT_DEV_ADDR:
rslt = esas2r_disc_passthru_dev_addr(a, rq);
break;
case DCS_DISC_DONE:
dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
break;
default:
esas2r_bugon();
dc->state = DCS_DISC_DONE;
break;
}
if (rslt)
return true;
}
/* Discovery is done...for now. */
rq->interrupt_cx = NULL;
if (!(a->flags & AF_DISC_PENDING))
esas2r_disc_fix_curr_requests(a);
esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
/* Start the next discovery. */
return esas2r_disc_start_port(a);
}
static bool esas2r_disc_start_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
unsigned long flags;
/* Set the timeout to a minimum value. */
if (rq->timeout < ESAS2R_DEFAULT_TMO)
rq->timeout = ESAS2R_DEFAULT_TMO;
/*
* Override the request type to distinguish discovery requests. If we
* end up deferring the request, esas2r_disc_local_start_request()
* will be called to restart it.
*/
rq->req_type = RT_DISC_REQ;
spin_lock_irqsave(&a->queue_lock, flags);
if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
esas2r_disc_local_start_request(a, rq);
else
list_add_tail(&rq->req_list, &a->defer_list);
spin_unlock_irqrestore(&a->queue_lock, flags);
return true;
}
void esas2r_disc_local_start_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
esas2r_trace_enter();
list_add_tail(&rq->req_list, &a->active_list);
esas2r_start_vda_request(a, rq);
esas2r_trace_exit();
return;
}
static void esas2r_disc_abort(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
esas2r_trace_enter();
/* abort the current discovery */
dc->state = DCS_DISC_DONE;
esas2r_trace_exit();
}
static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
bool rslt;
esas2r_trace_enter();
esas2r_rq_init_request(rq, a);
esas2r_build_mgt_req(a,
rq,
VDAMGT_DEV_SCAN,
0,
0,
0,
NULL);
rq->comp_cb = esas2r_disc_block_dev_scan_cb;
rq->timeout = 30000;
rq->interrupt_cx = dc;
rslt = esas2r_disc_start_request(a, rq);
esas2r_trace_exit();
return rslt;
}
static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
unsigned long flags;
esas2r_trace_enter();
spin_lock_irqsave(&a->mem_lock, flags);
if (rq->req_stat == RS_SUCCESS)
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
dc->state = DCS_RAID_GRP_INFO;
dc->raid_grp_ix = 0;
esas2r_rq_destroy_request(rq, a);
/* continue discovery if it's interrupt driven */
if (!(dc->flags & DCF_POLLED))
esas2r_disc_continue(a, rq);
spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit();
}
static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
bool rslt;
struct atto_vda_grp_info *grpinfo;
esas2r_trace_enter();
esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
dc->state = DCS_DISC_DONE;
esas2r_trace_exit();
return false;
}
esas2r_rq_init_request(rq, a);
grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
esas2r_build_mgt_req(a,
rq,
VDAMGT_GRP_INFO,
dc->scan_gen,
0,
sizeof(struct atto_vda_grp_info),
NULL);
grpinfo->grp_index = dc->raid_grp_ix;
rq->comp_cb = esas2r_disc_raid_grp_info_cb;
rq->interrupt_cx = dc;
rslt = esas2r_disc_start_request(a, rq);
esas2r_trace_exit();
return rslt;
}
static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
unsigned long flags;
struct atto_vda_grp_info *grpinfo;
esas2r_trace_enter();
spin_lock_irqsave(&a->mem_lock, flags);
if (rq->req_stat == RS_SCAN_GEN) {
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
dc->raid_grp_ix = 0;
goto done;
}
if (rq->req_stat == RS_SUCCESS) {
grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
if (grpinfo->status != VDA_GRP_STAT_ONLINE
&& grpinfo->status != VDA_GRP_STAT_DEGRADED) {
/* go to the next group. */
dc->raid_grp_ix++;
} else {
memcpy(&dc->raid_grp_name[0],
&grpinfo->grp_name[0],
sizeof(grpinfo->grp_name));
dc->interleave = le32_to_cpu(grpinfo->interleave);
dc->block_size = le32_to_cpu(grpinfo->block_size);
dc->state = DCS_PART_INFO;
dc->part_num = 0;
}
} else {
if (!(rq->req_stat == RS_GRP_INVALID)) {
esas2r_log(ESAS2R_LOG_WARN,
"A request for RAID group info failed - "
"returned with %x",
rq->req_stat);
}
dc->dev_ix = 0;
dc->state = DCS_PT_DEV_INFO;
}
done:
esas2r_rq_destroy_request(rq, a);
/* continue discovery if it's interrupt driven */
if (!(dc->flags & DCF_POLLED))
esas2r_disc_continue(a, rq);
spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit();
}
static bool esas2r_disc_part_info(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
bool rslt;
struct atto_vdapart_info *partinfo;
esas2r_trace_enter();
esas2r_trace("part_num: %d", dc->part_num);
if (dc->part_num >= VDA_MAX_PARTITIONS) {
dc->state = DCS_RAID_GRP_INFO;
dc->raid_grp_ix++;
esas2r_trace_exit();
return false;
}
esas2r_rq_init_request(rq, a);
partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
memset(partinfo, 0, sizeof(struct atto_vdapart_info));
esas2r_build_mgt_req(a,
rq,
VDAMGT_PART_INFO,
dc->scan_gen,
0,
sizeof(struct atto_vdapart_info),
NULL);
partinfo->part_no = dc->part_num;
memcpy(&partinfo->grp_name[0],
&dc->raid_grp_name[0],
sizeof(partinfo->grp_name));
rq->comp_cb = esas2r_disc_part_info_cb;
rq->interrupt_cx = dc;
rslt = esas2r_disc_start_request(a, rq);
esas2r_trace_exit();
return rslt;
}
static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
unsigned long flags;
struct atto_vdapart_info *partinfo;
esas2r_trace_enter();
spin_lock_irqsave(&a->mem_lock, flags);
if (rq->req_stat == RS_SCAN_GEN) {
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
dc->raid_grp_ix = 0;
dc->state = DCS_RAID_GRP_INFO;
} else if (rq->req_stat == RS_SUCCESS) {
partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
dc->part_num = partinfo->part_no;
dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
esas2r_targ_db_add_raid(a, dc);
dc->part_num++;
} else {
if (!(rq->req_stat == RS_PART_LAST)) {
esas2r_log(ESAS2R_LOG_WARN,
"A request for RAID group partition info "
"failed - status:%d", rq->req_stat);
}
dc->state = DCS_RAID_GRP_INFO;
dc->raid_grp_ix++;
}
esas2r_rq_destroy_request(rq, a);
/* continue discovery if it's interrupt driven */
if (!(dc->flags & DCF_POLLED))
esas2r_disc_continue(a, rq);
spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit();
}
static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
bool rslt;
struct atto_vda_devinfo *devinfo;
esas2r_trace_enter();
esas2r_trace("dev_ix: %d", dc->dev_ix);
esas2r_rq_init_request(rq, a);
devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
esas2r_build_mgt_req(a,
rq,
VDAMGT_DEV_PT_INFO,
dc->scan_gen,
dc->dev_ix,
sizeof(struct atto_vda_devinfo),
NULL);
rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
rq->interrupt_cx = dc;
rslt = esas2r_disc_start_request(a, rq);
esas2r_trace_exit();
return rslt;
}
static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
unsigned long flags;
struct atto_vda_devinfo *devinfo;
esas2r_trace_enter();
spin_lock_irqsave(&a->mem_lock, flags);
if (rq->req_stat == RS_SCAN_GEN) {
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
dc->dev_ix = 0;
dc->state = DCS_PT_DEV_INFO;
} else if (rq->req_stat == RS_SUCCESS) {
devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
dc->curr_phys_id =
le16_to_cpu(devinfo->phys_target_id);
dc->dev_addr_type = ATTO_GDA_AT_PORT;
dc->state = DCS_PT_DEV_ADDR;
esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
} else {
dc->dev_ix++;
}
} else {
if (!(rq->req_stat == RS_DEV_INVALID)) {
esas2r_log(ESAS2R_LOG_WARN,
"A request for device information failed - "
"status:%d", rq->req_stat);
}
dc->state = DCS_DISC_DONE;
}
esas2r_rq_destroy_request(rq, a);
/* continue discovery if it's interrupt driven */
if (!(dc->flags & DCF_POLLED))
esas2r_disc_continue(a, rq);
spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit();
}
static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
bool rslt;
struct atto_ioctl *hi;
struct esas2r_sg_context sgc;
esas2r_trace_enter();
esas2r_rq_init_request(rq, a);
/* format the request. */
sgc.cur_offset = NULL;
sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
sgc.length = offsetof(struct atto_ioctl, data)
+ sizeof(struct atto_hba_get_device_address);
esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
if (!esas2r_build_sg_list(a, rq, &sgc)) {
esas2r_rq_destroy_request(rq, a);
esas2r_trace_exit();
return false;
}
rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
rq->interrupt_cx = dc;
/* format the IOCTL data. */
hi = (struct atto_ioctl *)a->disc_buffer;
memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
hi->version = ATTO_VER_GET_DEV_ADDR0;
hi->function = ATTO_FUNC_GET_DEV_ADDR;
hi->flags = HBAF_TUNNEL;
hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
/* start it up. */
rslt = esas2r_disc_start_request(a, rq);
esas2r_trace_exit();
return rslt;
}
static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
struct esas2r_target *t = NULL;
unsigned long flags;
struct atto_ioctl *hi;
u16 addrlen;
esas2r_trace_enter();
spin_lock_irqsave(&a->mem_lock, flags);
hi = (struct atto_ioctl *)a->disc_buffer;
if (rq->req_stat == RS_SUCCESS
&& hi->status == ATTO_STS_SUCCESS) {
addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
if (addrlen == sizeof(u64))
memcpy(&dc->sas_addr,
&hi->data.get_dev_addr.address[0],
addrlen);
else
memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
/* Get the unique identifier. */
dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
goto next_dev_addr;
} else {
/* Add the pass through target. */
if (HIBYTE(addrlen) == 0) {
t = esas2r_targ_db_add_pthru(a,
dc,
&hi->data.
get_dev_addr.
address[0],
(u8)hi->data.
get_dev_addr.
addr_len);
if (t)
memcpy(&t->sas_addr, &dc->sas_addr,
sizeof(t->sas_addr));
} else {
/* getting the back end data failed */
esas2r_log(ESAS2R_LOG_WARN,
"an error occurred retrieving the "
"back end data (%s:%d)",
__func__,
__LINE__);
}
}
} else {
/* getting the back end data failed */
esas2r_log(ESAS2R_LOG_WARN,
"an error occurred retrieving the back end data - "
"rq->req_stat:%d hi->status:%d",
rq->req_stat, hi->status);
}
/* proceed to the next device. */
if (dc->flags & DCF_DEV_SCAN) {
dc->dev_ix++;
dc->state = DCS_PT_DEV_INFO;
} else if (dc->flags & DCF_DEV_CHANGE) {
dc->curr_targ++;
dc->state = DCS_DEV_ADD;
} else {
esas2r_bugon();
}
next_dev_addr:
esas2r_rq_destroy_request(rq, a);
/* continue discovery if it's interrupt driven */
if (!(dc->flags & DCF_POLLED))
esas2r_disc_continue(a, rq);
spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit();
}
static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = sgc->adapter;
if (sgc->length > ESAS2R_DISC_BUF_LEN)
esas2r_bugon();
*addr = a->uncached_phys
+ (u64)((u8 *)a->disc_buffer - a->uncached);
return sgc->length;
}
static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
struct esas2r_target *t;
struct esas2r_target *t2;
esas2r_trace_enter();
/* process removals. */
for (t = a->targetdb; t < a->targetdb_end; t++) {
if (t->new_target_state != TS_NOT_PRESENT)
continue;
t->new_target_state = TS_INVALID;
/* remove the right target! */
t2 =
esas2r_targ_db_find_by_virt_id(a,
esas2r_targ_get_id(t,
a));
if (t2)
esas2r_targ_db_remove(a, t2);
}
/* removals complete. process arrivals. */
dc->state = DCS_DEV_ADD;
dc->curr_targ = a->targetdb;
esas2r_trace_exit();
return false;
}
static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_disc_context *dc =
(struct esas2r_disc_context *)rq->interrupt_cx;
struct esas2r_target *t = dc->curr_targ;
if (t >= a->targetdb_end) {
/* done processing state changes. */
dc->state = DCS_DISC_DONE;
} else if (t->new_target_state == TS_PRESENT) {
struct atto_vda_ae_lu *luevt = &t->lu_event;
esas2r_trace_enter();
/* clear this now in case more events come in. */
t->new_target_state = TS_INVALID;
/* setup the discovery context for adding this device. */
dc->curr_virt_id = esas2r_targ_get_id(t, a);
if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
&& !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
} else {
dc->block_size = 0;
dc->interleave = 0;
}
/* determine the device type being added. */
if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
dc->state = DCS_PT_DEV_ADDR;
dc->dev_addr_type = ATTO_GDA_AT_PORT;
dc->curr_phys_id = luevt->wphys_target_id;
} else {
esas2r_log(ESAS2R_LOG_WARN,
"luevt->dwevent does not have the "
"VDAAE_LU_PHYS_ID bit set (%s:%d)",
__func__, __LINE__);
}
} else {
dc->raid_grp_name[0] = 0;
esas2r_targ_db_add_raid(a, dc);
}
esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
esas2r_trace("dwevent: %d", luevt->dwevent);
esas2r_trace_exit();
}
if (dc->state == DCS_DEV_ADD) {
/* go to the next device. */
dc->curr_targ++;
}
return false;
}
/*
* When discovery is done, find all requests on defer queue and
* test if they need to be modified. If a target is no longer present
* then complete the request with RS_SEL. Otherwise, update the
* target_id since after a hibernate it can be a different value.
* VDA does not make passthrough target IDs persistent.
*/
static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
{
unsigned long flags;
struct esas2r_target *t;
struct esas2r_request *rq;
struct list_head *element;
/* update virt_targ_id in any outstanding esas2r_requests */
spin_lock_irqsave(&a->queue_lock, flags);
list_for_each(element, &a->defer_list) {
rq = list_entry(element, struct esas2r_request, req_list);
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
t = a->targetdb + rq->target_id;
if (t->target_state == TS_PRESENT)
rq->vrq->scsi.target_id = le16_to_cpu(
t->virt_targ_id);
else
rq->req_stat = RS_SEL;
}
}
spin_unlock_irqrestore(&a->queue_lock, flags);
}
/*
* linux/drivers/scsi/esas2r/esas2r_flash.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
/* local macro defs */
#define esas2r_nvramcalc_cksum(n) \
(esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
SASNVR_CKSUM_SEED))
#define esas2r_nvramcalc_xor_cksum(n) \
(esas2r_calc_byte_xor_cksum((u8 *)(n), \
sizeof(struct esas2r_sas_nvram), 0))
#define ESAS2R_FS_DRVR_VER 2
static struct esas2r_sas_nvram default_sas_nvram = {
{ 'E', 'S', 'A', 'S' }, /* signature */
SASNVR_VERSION, /* version */
0, /* checksum */
31, /* max_lun_for_target */
SASNVR_PCILAT_MAX, /* pci_latency */
SASNVR1_BOOT_DRVR, /* options1 */
SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */
| SASNVR2_SW_MUX_CTRL,
SASNVR_COAL_DIS, /* int_coalescing */
SASNVR_CMDTHR_NONE, /* cmd_throttle */
3, /* dev_wait_time */
1, /* dev_wait_count */
0, /* spin_up_delay */
0, /* ssp_align_rate */
{ 0x50, 0x01, 0x08, 0x60, /* sas_addr */
0x00, 0x00, 0x00, 0x00 },
{ SASNVR_SPEED_AUTO }, /* phy_speed */
{ SASNVR_MUX_DISABLED }, /* SAS multiplexing */
{ 0 }, /* phy_flags */
SASNVR_SORT_SAS_ADDR, /* sort_type */
3, /* dpm_reqcmd_lmt */
3, /* dpm_stndby_time */
0, /* dpm_active_time */
{ 0 }, /* phy_target_id */
SASNVR_VSMH_DISABLED, /* virt_ses_mode */
SASNVR_RWM_DEFAULT, /* read_write_mode */
0, /* link down timeout */
{ 0 } /* reserved */
};
static u8 cmd_to_fls_func[] = {
0xFF,
VDA_FLASH_READ,
VDA_FLASH_BEGINW,
VDA_FLASH_WRITE,
VDA_FLASH_COMMIT,
VDA_FLASH_CANCEL
};
static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
{
u32 cksum = seed;
u8 *p = (u8 *)&cksum;
while (len) {
if (((uintptr_t)addr & 3) == 0)
break;
cksum = cksum ^ *addr;
addr++;
len--;
}
while (len >= sizeof(u32)) {
cksum = cksum ^ *(u32 *)addr;
addr += 4;
len -= 4;
}
while (len--) {
cksum = cksum ^ *addr;
addr++;
}
return p[0] ^ p[1] ^ p[2] ^ p[3];
}
static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
{
u8 *p = (u8 *)addr;
u8 cksum = seed;
while (len--)
cksum = cksum + p[len];
return cksum;
}
/* Interrupt callback to process FM API write requests. */
static void esas2r_fmapi_callback(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
struct esas2r_flash_context *fc =
(struct esas2r_flash_context *)rq->interrupt_cx;
if (rq->req_stat == RS_SUCCESS) {
/* Last request was successful. See what to do now. */
switch (vrq->sub_func) {
case VDA_FLASH_BEGINW:
if (fc->sgc.cur_offset == NULL)
goto commit;
vrq->sub_func = VDA_FLASH_WRITE;
rq->req_stat = RS_PENDING;
break;
case VDA_FLASH_WRITE:
commit:
vrq->sub_func = VDA_FLASH_COMMIT;
rq->req_stat = RS_PENDING;
rq->interrupt_cb = fc->interrupt_cb;
break;
default:
break;
}
}
if (rq->req_stat != RS_PENDING)
/*
* All done. call the real callback to complete the FM API
* request. We should only get here if a BEGINW or WRITE
* operation failed.
*/
(*fc->interrupt_cb)(a, rq);
}
/*
* Build a flash request based on the flash context. The request status
* is filled in on an error.
*/
static void build_flash_msg(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_flash_context *fc =
(struct esas2r_flash_context *)rq->interrupt_cx;
struct esas2r_sg_context *sgc = &fc->sgc;
u8 cksum = 0;
/* calculate the checksum */
if (fc->func == VDA_FLASH_BEGINW) {
if (sgc->cur_offset)
cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
sgc->length,
0);
rq->interrupt_cb = esas2r_fmapi_callback;
} else {
rq->interrupt_cb = fc->interrupt_cb;
}
esas2r_build_flash_req(a,
rq,
fc->func,
cksum,
fc->flsh_addr,
sgc->length);
esas2r_rq_free_sg_lists(rq, a);
/*
* remember the length we asked for. we have to keep track of
* the current amount done so we know how much to compare when
* doing the verification phase.
*/
fc->curr_len = fc->sgc.length;
if (sgc->cur_offset) {
/* setup the S/G context to build the S/G table */
esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
if (!esas2r_build_sg_list(a, rq, sgc)) {
rq->req_stat = RS_BUSY;
return;
}
} else {
fc->sgc.length = 0;
}
/* update the flsh_addr to the next one to write to */
fc->flsh_addr += fc->curr_len;
}
/* determine the method to process the flash request */
static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
{
/*
* assume we have more to do. if we return with the status set to
* RS_PENDING, FM API tasks will continue.
*/
rq->req_stat = RS_PENDING;
if (a->flags & AF_DEGRADED_MODE)
/* not suppported for now */;
else
build_flash_msg(a, rq);
return rq->req_stat == RS_PENDING;
}
/* boot image fixer uppers called before downloading the image. */
static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
{
struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
struct esas2r_pc_image *pi;
struct esas2r_boot_header *bh;
pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
bh =
(struct esas2r_boot_header *)((u8 *)pi +
le16_to_cpu(pi->header_offset));
bh->device_id = cpu_to_le16(a->pcid->device);
/* Recalculate the checksum in the PNP header if there */
if (pi->pnp_offset) {
u8 *pnp_header_bytes =
((u8 *)pi + le16_to_cpu(pi->pnp_offset));
/* Identifier - dword that starts at byte 10 */
*((u32 *)&pnp_header_bytes[10]) =
cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
a->pcid->subsystem_device));
/* Checksum - byte 9 */
pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
32, 0);
}
/* Recalculate the checksum needed by the PC */
pi->checksum = pi->checksum -
esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
}
static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
{
struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
u32 len = ch->length;
u32 offset = ch->image_offset;
struct esas2r_efi_image *ei;
struct esas2r_boot_header *bh;
while (len) {
u32 thislen;
ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
bh = (struct esas2r_boot_header *)((u8 *)ei +
le16_to_cpu(
ei->header_offset));
bh->device_id = cpu_to_le16(a->pcid->device);
thislen = (u32)le16_to_cpu(bh->image_length) * 512;
if (thislen > len)
break;
len -= thislen;
offset += thislen;
}
}
/* Complete a FM API request with the specified status. */
static bool complete_fmapi_req(struct esas2r_adapter *a,
struct esas2r_request *rq, u8 fi_stat)
{
struct esas2r_flash_context *fc =
(struct esas2r_flash_context *)rq->interrupt_cx;
struct esas2r_flash_img *fi = fc->fi;
fi->status = fi_stat;
fi->driver_error = rq->req_stat;
rq->interrupt_cb = NULL;
rq->req_stat = RS_SUCCESS;
if (fi_stat != FI_STAT_IMG_VER)
memset(fc->scratch, 0, FM_BUF_SZ);
esas2r_enable_heartbeat(a);
esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
return false;
}
/* Process each phase of the flash download process. */
static void fw_download_proc(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_flash_context *fc =
(struct esas2r_flash_context *)rq->interrupt_cx;
struct esas2r_flash_img *fi = fc->fi;
struct esas2r_component_header *ch;
u32 len;
u8 *p, *q;
/* If the previous operation failed, just return. */
if (rq->req_stat != RS_SUCCESS)
goto error;
/*
* If an upload just completed and the compare length is non-zero,
* then we just read back part of the image we just wrote. verify the
* section and continue reading until the entire image is verified.
*/
if (fc->func == VDA_FLASH_READ
&& fc->cmp_len) {
ch = &fi->cmp_hdr[fc->comp_typ];
p = fc->scratch;
q = (u8 *)fi /* start of the whole gob */
+ ch->image_offset /* start of the current image */
+ ch->length /* end of the current image */
- fc->cmp_len; /* where we are now */
/*
* NOTE - curr_len is the exact count of bytes for the read
* even when the end is read and its not a full buffer
*/
for (len = fc->curr_len; len; len--)
if (*p++ != *q++)
goto error;
fc->cmp_len -= fc->curr_len; /* # left to compare */
/* Update fc and determine the length for the next upload */
if (fc->cmp_len > FM_BUF_SZ)
fc->sgc.length = FM_BUF_SZ;
else
fc->sgc.length = fc->cmp_len;
fc->sgc.cur_offset = fc->sgc_offset +
((u8 *)fc->scratch - (u8 *)fi);
}
/*
* This code uses a 'while' statement since the next component may
* have a length = zero. This can happen since some components are
* not required. At the end of this 'while' we set up the length
* for the next request and therefore sgc.length can be = 0.
*/
while (fc->sgc.length == 0) {
ch = &fi->cmp_hdr[fc->comp_typ];
switch (fc->task) {
case FMTSK_ERASE_BOOT:
/* the BIOS image is written next */
ch = &fi->cmp_hdr[CH_IT_BIOS];
if (ch->length == 0)
goto no_bios;
fc->task = FMTSK_WRTBIOS;
fc->func = VDA_FLASH_BEGINW;
fc->comp_typ = CH_IT_BIOS;
fc->flsh_addr = FLS_OFFSET_BOOT;
fc->sgc.length = ch->length;
fc->sgc.cur_offset = fc->sgc_offset +
ch->image_offset;
break;
case FMTSK_WRTBIOS:
/*
* The BIOS image has been written - read it and
* verify it
*/
fc->task = FMTSK_READBIOS;
fc->func = VDA_FLASH_READ;
fc->flsh_addr = FLS_OFFSET_BOOT;
fc->cmp_len = ch->length;
fc->sgc.length = FM_BUF_SZ;
fc->sgc.cur_offset = fc->sgc_offset
+ ((u8 *)fc->scratch -
(u8 *)fi);
break;
case FMTSK_READBIOS:
no_bios:
/*
* Mark the component header status for the image
* completed
*/
ch->status = CH_STAT_SUCCESS;
/* The MAC image is written next */
ch = &fi->cmp_hdr[CH_IT_MAC];
if (ch->length == 0)
goto no_mac;
fc->task = FMTSK_WRTMAC;
fc->func = VDA_FLASH_BEGINW;
fc->comp_typ = CH_IT_MAC;
fc->flsh_addr = FLS_OFFSET_BOOT
+ fi->cmp_hdr[CH_IT_BIOS].length;
fc->sgc.length = ch->length;
fc->sgc.cur_offset = fc->sgc_offset +
ch->image_offset;
break;
case FMTSK_WRTMAC:
/* The MAC image has been written - read and verify */
fc->task = FMTSK_READMAC;
fc->func = VDA_FLASH_READ;
fc->flsh_addr -= ch->length;
fc->cmp_len = ch->length;
fc->sgc.length = FM_BUF_SZ;
fc->sgc.cur_offset = fc->sgc_offset
+ ((u8 *)fc->scratch -
(u8 *)fi);
break;
case FMTSK_READMAC:
no_mac:
/*
* Mark the component header status for the image
* completed
*/
ch->status = CH_STAT_SUCCESS;
/* The EFI image is written next */
ch = &fi->cmp_hdr[CH_IT_EFI];
if (ch->length == 0)
goto no_efi;
fc->task = FMTSK_WRTEFI;
fc->func = VDA_FLASH_BEGINW;
fc->comp_typ = CH_IT_EFI;
fc->flsh_addr = FLS_OFFSET_BOOT
+ fi->cmp_hdr[CH_IT_BIOS].length
+ fi->cmp_hdr[CH_IT_MAC].length;
fc->sgc.length = ch->length;
fc->sgc.cur_offset = fc->sgc_offset +
ch->image_offset;
break;
case FMTSK_WRTEFI:
/* The EFI image has been written - read and verify */
fc->task = FMTSK_READEFI;
fc->func = VDA_FLASH_READ;
fc->flsh_addr -= ch->length;
fc->cmp_len = ch->length;
fc->sgc.length = FM_BUF_SZ;
fc->sgc.cur_offset = fc->sgc_offset
+ ((u8 *)fc->scratch -
(u8 *)fi);
break;
case FMTSK_READEFI:
no_efi:
/*
* Mark the component header status for the image
* completed
*/
ch->status = CH_STAT_SUCCESS;
/* The CFG image is written next */
ch = &fi->cmp_hdr[CH_IT_CFG];
if (ch->length == 0)
goto no_cfg;
fc->task = FMTSK_WRTCFG;
fc->func = VDA_FLASH_BEGINW;
fc->comp_typ = CH_IT_CFG;
fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
fc->sgc.length = ch->length;
fc->sgc.cur_offset = fc->sgc_offset +
ch->image_offset;
break;
case FMTSK_WRTCFG:
/* The CFG image has been written - read and verify */
fc->task = FMTSK_READCFG;
fc->func = VDA_FLASH_READ;
fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
fc->cmp_len = ch->length;
fc->sgc.length = FM_BUF_SZ;
fc->sgc.cur_offset = fc->sgc_offset
+ ((u8 *)fc->scratch -
(u8 *)fi);
break;
case FMTSK_READCFG:
no_cfg:
/*
* Mark the component header status for the image
* completed
*/
ch->status = CH_STAT_SUCCESS;
/*
* The download is complete. If in degraded mode,
* attempt a chip reset.
*/
if (a->flags & AF_DEGRADED_MODE)
esas2r_local_reset_adapter(a);
a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
esas2r_print_flash_rev(a);
/* Update the type of boot image on the card */
memcpy(a->image_type, fi->rel_version,
sizeof(fi->rel_version));
complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
return;
}
/* If verifying, don't try reading more than what's there */
if (fc->func == VDA_FLASH_READ
&& fc->sgc.length > fc->cmp_len)
fc->sgc.length = fc->cmp_len;
}
/* Build the request to perform the next action */
if (!load_image(a, rq)) {
error:
if (fc->comp_typ < fi->num_comps) {
ch = &fi->cmp_hdr[fc->comp_typ];
ch->status = CH_STAT_FAILED;
}
complete_fmapi_req(a, rq, FI_STAT_FAILED);
}
}
/* Determine the flash image adaptyp for this adapter */
static u8 get_fi_adap_type(struct esas2r_adapter *a)
{
u8 type;
/* use the device ID to get the correct adap_typ for this HBA */
switch (a->pcid->device) {
case ATTO_DID_INTEL_IOP348:
type = FI_AT_SUN_LAKE;
break;
case ATTO_DID_MV_88RC9580:
case ATTO_DID_MV_88RC9580TS:
case ATTO_DID_MV_88RC9580TSE:
case ATTO_DID_MV_88RC9580TL:
type = FI_AT_MV_9580;
break;
default:
type = FI_AT_UNKNWN;
break;
}
return type;
}
/* Size of config + copyright + flash_ver images, 0 for failure. */
static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
{
u16 *pw = (u16 *)cfg - 1;
u32 sz = 0;
u32 len = length;
if (len == 0)
len = FM_BUF_SZ;
if (flash_ver)
*flash_ver = 0;
while (true) {
u16 type;
u16 size;
type = le16_to_cpu(*pw--);
size = le16_to_cpu(*pw--);
if (type != FBT_CPYR
&& type != FBT_SETUP
&& type != FBT_FLASH_VER)
break;
if (type == FBT_FLASH_VER
&& flash_ver)
*flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
sz += size + (2 * sizeof(u16));
pw -= size / sizeof(u16);
if (sz > len - (2 * sizeof(u16)))
break;
}
/* See if we are comparing the size to the specified length */
if (length && sz != length)
return 0;
return sz;
}
/* Verify that the boot image is valid */
static u8 chk_boot(u8 *boot_img, u32 length)
{
struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
u16 hdroffset = le16_to_cpu(bi->header_offset);
struct esas2r_boot_header *bh;
if (bi->signature != le16_to_cpu(0xaa55)
|| (long)hdroffset >
(long)(65536L - sizeof(struct esas2r_boot_header))
|| (hdroffset & 3)
|| (hdroffset < sizeof(struct esas2r_boot_image))
|| ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
return 0xff;
bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
if (bh->signature[0] != 'P'
|| bh->signature[1] != 'C'
|| bh->signature[2] != 'I'
|| bh->signature[3] != 'R'
|| le16_to_cpu(bh->struct_length) <
(u16)sizeof(struct esas2r_boot_header)
|| bh->class_code[2] != 0x01
|| bh->class_code[1] != 0x04
|| bh->class_code[0] != 0x00
|| (bh->code_type != CODE_TYPE_PC
&& bh->code_type != CODE_TYPE_OPEN
&& bh->code_type != CODE_TYPE_EFI))
return 0xff;
return bh->code_type;
}
/* The sum of all the WORDS of the image */
static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
{
struct esas2r_flash_img *fi = fc->fi;
u16 cksum;
u32 len;
u16 *pw;
for (len = (fi->length - fc->fi_hdr_len) / 2,
pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
cksum = 0;
len;
len--, pw++)
cksum = cksum + le16_to_cpu(*pw);
return cksum;
}
/*
* Verify the flash image structure. The following verifications will
* be performed:
* 1) verify the fi_version is correct
* 2) verify the checksum of the entire image.
* 3) validate the adap_typ, action and length fields.
* 4) valdiate each component header. check the img_type and
* length fields
* 5) valdiate each component image. validate signatures and
* local checksums
*/
static bool verify_fi(struct esas2r_adapter *a,
struct esas2r_flash_context *fc)
{
struct esas2r_flash_img *fi = fc->fi;
u8 type;
bool imgerr;
u16 i;
u32 len;
struct esas2r_component_header *ch;
/* Verify the length - length must even since we do a word checksum */
len = fi->length;
if ((len & 1)
|| len < fc->fi_hdr_len) {
fi->status = FI_STAT_LENGTH;
return false;
}
/* Get adapter type and verify type in flash image */
type = get_fi_adap_type(a);
if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
fi->status = FI_STAT_ADAPTYP;
return false;
}
/*
* Loop through each component and verify the img_type and length
* fields. Keep a running count of the sizes sooze we can verify total
* size to additive size.
*/
imgerr = false;
for (i = 0, len = 0, ch = fi->cmp_hdr;
i < fi->num_comps;
i++, ch++) {
bool cmperr = false;
/*
* Verify that the component header has the same index as the
* image type. The headers must be ordered correctly
*/
if (i != ch->img_type) {
imgerr = true;
ch->status = CH_STAT_INVALID;
continue;
}
switch (ch->img_type) {
case CH_IT_BIOS:
type = CODE_TYPE_PC;
break;
case CH_IT_MAC:
type = CODE_TYPE_OPEN;
break;
case CH_IT_EFI:
type = CODE_TYPE_EFI;
break;
}
switch (ch->img_type) {
case CH_IT_FW:
case CH_IT_NVR:
break;
case CH_IT_BIOS:
case CH_IT_MAC:
case CH_IT_EFI:
if (ch->length & 0x1ff)
cmperr = true;
/* Test if component image is present */
if (ch->length == 0)
break;
/* Image is present - verify the image */
if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
!= type)
cmperr = true;
break;
case CH_IT_CFG:
/* Test if component image is present */
if (ch->length == 0) {
cmperr = true;
break;
}
/* Image is present - verify the image */
if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
ch->length, NULL))
cmperr = true;
break;
default:
fi->status = FI_STAT_UNKNOWN;
return false;
}
if (cmperr) {
imgerr = true;
ch->status = CH_STAT_INVALID;
} else {
ch->status = CH_STAT_PENDING;
len += ch->length;
}
}
if (imgerr) {
fi->status = FI_STAT_MISSING;
return false;
}
/* Compare fi->length to the sum of ch->length fields */
if (len != fi->length - fc->fi_hdr_len) {
fi->status = FI_STAT_LENGTH;
return false;
}
/* Compute the checksum - it should come out zero */
if (fi->checksum != calc_fi_checksum(fc)) {
fi->status = FI_STAT_CHKSUM;
return false;
}
return true;
}
/* Fill in the FS IOCTL response data from a completed request. */
static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_ioctl_fs *fs =
(struct esas2r_ioctl_fs *)rq->interrupt_cx;
if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
esas2r_enable_heartbeat(a);
fs->driver_error = rq->req_stat;
if (fs->driver_error == RS_SUCCESS)
fs->status = ATTO_STS_SUCCESS;
else
fs->status = ATTO_STS_FAILED;
}
/* Prepare an FS IOCTL request to be sent to the firmware. */
bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
struct esas2r_ioctl_fs *fs,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc)
{
u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
struct esas2r_ioctlfs_command *fsc = &fs->command;
u8 func = 0;
u32 datalen;
fs->status = ATTO_STS_FAILED;
fs->driver_error = RS_PENDING;
if (fs->version > ESAS2R_FS_VER) {
fs->status = ATTO_STS_INV_VERSION;
return false;
}
func = cmd_to_fls_func[fsc->command];
if (fsc->command >= cmdcnt || func == 0xFF) {
fs->status = ATTO_STS_INV_FUNC;
return false;
}
if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
if ((a->pcid->device != ATTO_DID_MV_88RC9580
|| fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
&& (a->pcid->device != ATTO_DID_MV_88RC9580TS
|| fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
&& (a->pcid->device != ATTO_DID_MV_88RC9580TSE
|| fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
&& (a->pcid->device != ATTO_DID_MV_88RC9580TL
|| fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
fs->status = ATTO_STS_INV_ADAPTER;
return false;
}
if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
fs->status = ATTO_STS_INV_DRVR_VER;
return false;
}
}
if (a->flags & AF_DEGRADED_MODE) {
fs->status = ATTO_STS_DEGRADED;
return false;
}
rq->interrupt_cb = esas2r_complete_fs_ioctl;
rq->interrupt_cx = fs;
datalen = le32_to_cpu(fsc->length);
esas2r_build_flash_req(a,
rq,
func,
fsc->checksum,
le32_to_cpu(fsc->flash_addr),
datalen);
if (func == VDA_FLASH_WRITE
|| func == VDA_FLASH_READ) {
if (datalen == 0) {
fs->status = ATTO_STS_INV_FUNC;
return false;
}
esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
sgc->length = datalen;
if (!esas2r_build_sg_list(a, rq, sgc)) {
fs->status = ATTO_STS_OUT_OF_RSRC;
return false;
}
}
if (func == VDA_FLASH_COMMIT)
esas2r_disable_heartbeat(a);
esas2r_start_request(a, rq);
return true;
}
static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
{
u32 starttime;
u32 timeout;
u32 intstat;
u32 doorbell;
/* Disable chip interrupts awhile */
if (function == DRBL_FLASH_REQ)
esas2r_disable_chip_interrupts(a);
/* Issue the request to the firmware */
esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
/* Now wait for the firmware to process it */
starttime = jiffies_to_msecs(jiffies);
timeout = a->flags &
(AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
while (true) {
intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
if (intstat & MU_INTSTAT_DRBL) {
/* Got a doorbell interrupt. Check for the function */
doorbell =
esas2r_read_register_dword(a, MU_DOORBELL_OUT);
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
if (doorbell & function)
break;
}
schedule_timeout_interruptible(msecs_to_jiffies(100));
if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
/*
* Iimeout. If we were requesting flash access,
* indicate we are done so the firmware knows we gave
* up. If this was a REQ, we also need to re-enable
* chip interrupts.
*/
if (function == DRBL_FLASH_REQ) {
esas2r_hdebug("flash access timeout");
esas2r_write_register_dword(a, MU_DOORBELL_IN,
DRBL_FLASH_DONE);
esas2r_enable_chip_interrupts(a);
} else {
esas2r_hdebug("flash release timeout");
}
return false;
}
}
/* if we're done, re-enable chip interrupts */
if (function == DRBL_FLASH_DONE)
esas2r_enable_chip_interrupts(a);
return true;
}
#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
bool esas2r_read_flash_block(struct esas2r_adapter *a,
void *to,
u32 from,
u32 size)
{
u8 *end = (u8 *)to;
/* Try to acquire access to the flash */
if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
return false;
while (size) {
u32 len;
u32 offset;
u32 iatvr;
if (a->flags2 & AF2_SERIAL_FLASH)
iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
else
iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
esas2r_map_data_window(a, iatvr);
offset = from & (WINDOW_SIZE - 1);
len = size;
if (len > WINDOW_SIZE - offset)
len = WINDOW_SIZE - offset;
from += len;
size -= len;
while (len--) {
*end++ = esas2r_read_data_byte(a, offset);
offset++;
}
}
/* Release flash access */
esas2r_flash_access(a, DRBL_FLASH_DONE);
return true;
}
bool esas2r_read_flash_rev(struct esas2r_adapter *a)
{
u8 bytes[256];
u16 *pw;
u16 *pwstart;
u16 type;
u16 size;
u32 sz;
sz = sizeof(bytes);
pw = (u16 *)(bytes + sz);
pwstart = (u16 *)bytes + 2;
if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
goto invalid_rev;
while (pw >= pwstart) {
pw--;
type = le16_to_cpu(*pw);
pw--;
size = le16_to_cpu(*pw);
pw -= size / 2;
if (type == FBT_CPYR
|| type == FBT_SETUP
|| pw < pwstart)
continue;
if (type == FBT_FLASH_VER)
a->flash_ver = le32_to_cpu(*(u32 *)pw);
break;
}
invalid_rev:
return esas2r_print_flash_rev(a);
}
bool esas2r_print_flash_rev(struct esas2r_adapter *a)
{
u16 year = LOWORD(a->flash_ver);
u8 day = LOBYTE(HIWORD(a->flash_ver));
u8 month = HIBYTE(HIWORD(a->flash_ver));
if (day == 0
|| month == 0
|| day > 31
|| month > 12
|| year < 2006
|| year > 9999) {
strcpy(a->flash_rev, "not found");
a->flash_ver = 0;
return false;
}
sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
esas2r_hdebug("flash version: %s", a->flash_rev);
return true;
}
/*
* Find the type of boot image type that is currently in the flash.
* The chip only has a 64 KB PCI-e expansion ROM
* size so only one image can be flashed at a time.
*/
bool esas2r_read_image_type(struct esas2r_adapter *a)
{
u8 bytes[256];
struct esas2r_boot_image *bi;
struct esas2r_boot_header *bh;
u32 sz;
u32 len;
u32 offset;
/* Start at the base of the boot images and look for a valid image */
sz = sizeof(bytes);
len = FLS_LENGTH_BOOT;
offset = 0;
while (true) {
if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
offset,
sz))
goto invalid_rev;
bi = (struct esas2r_boot_image *)bytes;
bh = (struct esas2r_boot_header *)((u8 *)bi +
le16_to_cpu(
bi->header_offset));
if (bi->signature != cpu_to_le16(0xAA55))
goto invalid_rev;
if (bh->code_type == CODE_TYPE_PC) {
strcpy(a->image_type, "BIOS");
return true;
} else if (bh->code_type == CODE_TYPE_EFI) {
struct esas2r_efi_image *ei;
/*
* So we have an EFI image. There are several types
* so see which architecture we have.
*/
ei = (struct esas2r_efi_image *)bytes;
switch (le16_to_cpu(ei->machine_type)) {
case EFI_MACHINE_IA32:
strcpy(a->image_type, "EFI 32-bit");
return true;
case EFI_MACHINE_IA64:
strcpy(a->image_type, "EFI itanium");
return true;
case EFI_MACHINE_X64:
strcpy(a->image_type, "EFI 64-bit");
return true;
case EFI_MACHINE_EBC:
strcpy(a->image_type, "EFI EBC");
return true;
default:
goto invalid_rev;
}
} else {
u32 thislen;
/* jump to the next image */
thislen = (u32)le16_to_cpu(bh->image_length) * 512;
if (thislen == 0
|| thislen + offset > len
|| bh->indicator == INDICATOR_LAST)
break;
offset += thislen;
}
}
invalid_rev:
strcpy(a->image_type, "no boot images");
return false;
}
/*
* Read and validate current NVRAM parameters by accessing
* physical NVRAM directly. if currently stored parameters are
* invalid, use the defaults.
*/
bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
{
bool result;
if (down_interruptible(&a->nvram_semaphore))
return false;
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
return false;
}
result = esas2r_nvram_validate(a);
up(&a->nvram_semaphore);
return result;
}
/* Interrupt callback to process NVRAM completions. */
static void esas2r_nvram_callback(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
if (rq->req_stat == RS_SUCCESS) {
/* last request was successful. see what to do now. */
switch (vrq->sub_func) {
case VDA_FLASH_BEGINW:
vrq->sub_func = VDA_FLASH_WRITE;
rq->req_stat = RS_PENDING;
break;
case VDA_FLASH_WRITE:
vrq->sub_func = VDA_FLASH_COMMIT;
rq->req_stat = RS_PENDING;
break;
case VDA_FLASH_READ:
esas2r_nvram_validate(a);
break;
case VDA_FLASH_COMMIT:
default:
break;
}
}
if (rq->req_stat != RS_PENDING) {
/* update the NVRAM state */
if (rq->req_stat == RS_SUCCESS)
esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
else
esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
esas2r_enable_heartbeat(a);
up(&a->nvram_semaphore);
}
}
/*
* Write the contents of nvram to the adapter's physical NVRAM.
* The cached copy of the NVRAM is also updated.
*/
bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
struct esas2r_sas_nvram *nvram)
{
struct esas2r_sas_nvram *n = nvram;
u8 sas_address_bytes[8];
u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
if (a->flags & AF_DEGRADED_MODE)
return false;
if (down_interruptible(&a->nvram_semaphore))
return false;
if (n == NULL)
n = a->nvram;
/* check the validity of the settings */
if (n->version > SASNVR_VERSION) {
up(&a->nvram_semaphore);
return false;
}
memcpy(&sas_address_bytes[0], n->sas_addr, 8);
if (sas_address_bytes[0] != 0x50
|| sas_address_bytes[1] != 0x01
|| sas_address_bytes[2] != 0x08
|| (sas_address_bytes[3] & 0xF0) != 0x60
|| ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
up(&a->nvram_semaphore);
return false;
}
if (n->spin_up_delay > SASNVR_SPINUP_MAX)
n->spin_up_delay = SASNVR_SPINUP_MAX;
n->version = SASNVR_VERSION;
n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
/* write the NVRAM */
n = a->nvram;
esas2r_disable_heartbeat(a);
esas2r_build_flash_req(a,
rq,
VDA_FLASH_BEGINW,
esas2r_nvramcalc_xor_cksum(n),
FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram));
if (a->flags & AF_LEGACY_SGE_MODE) {
vrq->data.sge[0].length =
cpu_to_le32(SGE_LAST |
sizeof(struct esas2r_sas_nvram));
vrq->data.sge[0].address = cpu_to_le64(
a->uncached_phys + (u64)((u8 *)n - a->uncached));
} else {
vrq->data.prde[0].ctl_len =
cpu_to_le32(sizeof(struct esas2r_sas_nvram));
vrq->data.prde[0].address = cpu_to_le64(
a->uncached_phys
+ (u64)((u8 *)n - a->uncached));
}
rq->interrupt_cb = esas2r_nvram_callback;
esas2r_start_request(a, rq);
return true;
}
/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */
bool esas2r_nvram_validate(struct esas2r_adapter *a)
{
struct esas2r_sas_nvram *n = a->nvram;
bool rslt = false;
if (n->signature[0] != 'E'
|| n->signature[1] != 'S'
|| n->signature[2] != 'A'
|| n->signature[3] != 'S') {
esas2r_hdebug("invalid NVRAM signature");
} else if (esas2r_nvramcalc_cksum(n)) {
esas2r_hdebug("invalid NVRAM checksum");
} else if (n->version > SASNVR_VERSION) {
esas2r_hdebug("invalid NVRAM version");
} else {
esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
rslt = true;
}
if (rslt == false) {
esas2r_hdebug("using defaults");
esas2r_nvram_set_defaults(a);
}
return rslt;
}
/*
* Set the cached NVRAM to defaults. note that this function sets the default
* NVRAM when it has been determined that the physical NVRAM is invalid.
* In this case, the SAS address is fabricated.
*/
void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
{
struct esas2r_sas_nvram *n = a->nvram;
u32 time = jiffies_to_msecs(jiffies);
esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
n->sas_addr[3] |= 0x0F;
n->sas_addr[4] = HIBYTE(LOWORD(time));
n->sas_addr[5] = LOBYTE(LOWORD(time));
n->sas_addr[6] = a->pcid->bus->number;
n->sas_addr[7] = a->pcid->devfn;
}
void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
struct esas2r_sas_nvram *nvram)
{
u8 sas_addr[8];
/*
* in case we are copying the defaults into the adapter, copy the SAS
* address out first.
*/
memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
}
bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
struct esas2r_request *rq, struct esas2r_sg_context *sgc)
{
struct esas2r_flash_context *fc = &a->flash_context;
u8 j;
struct esas2r_component_header *ch;
if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
/* flag was already set */
fi->status = FI_STAT_BUSY;
return false;
}
memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
sgc = &fc->sgc;
fc->fi = fi;
fc->sgc_offset = sgc->cur_offset;
rq->req_stat = RS_SUCCESS;
rq->interrupt_cx = fc;
switch (fi->fi_version) {
case FI_VERSION_1:
fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
fc->num_comps = FI_NUM_COMPS_V1;
fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
break;
default:
return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
}
if (a->flags & AF_DEGRADED_MODE)
return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
switch (fi->action) {
case FI_ACT_DOWN: /* Download the components */
/* Verify the format of the flash image */
if (!verify_fi(a, fc))
return complete_fmapi_req(a, rq, fi->status);
/* Adjust the BIOS fields that are dependent on the HBA */
ch = &fi->cmp_hdr[CH_IT_BIOS];
if (ch->length)
fix_bios(a, fi);
/* Adjust the EFI fields that are dependent on the HBA */
ch = &fi->cmp_hdr[CH_IT_EFI];
if (ch->length)
fix_efi(a, fi);
/*
* Since the image was just modified, compute the checksum on
* the modified image. First update the CRC for the composite
* expansion ROM image.
*/
fi->checksum = calc_fi_checksum(fc);
/* Disable the heartbeat */
esas2r_disable_heartbeat(a);
/* Now start up the download sequence */
fc->task = FMTSK_ERASE_BOOT;
fc->func = VDA_FLASH_BEGINW;
fc->comp_typ = CH_IT_CFG;
fc->flsh_addr = FLS_OFFSET_BOOT;
fc->sgc.length = FLS_LENGTH_BOOT;
fc->sgc.cur_offset = NULL;
/* Setup the callback address */
fc->interrupt_cb = fw_download_proc;
break;
case FI_ACT_UPSZ: /* Get upload sizes */
fi->adap_typ = get_fi_adap_type(a);
fi->flags = 0;
fi->num_comps = fc->num_comps;
fi->length = fc->fi_hdr_len;
/* Report the type of boot image in the rel_version string */
memcpy(fi->rel_version, a->image_type,
sizeof(fi->rel_version));
/* Build the component headers */
for (j = 0, ch = fi->cmp_hdr;
j < fi->num_comps;
j++, ch++) {
ch->img_type = j;
ch->status = CH_STAT_PENDING;
ch->length = 0;
ch->version = 0xffffffff;
ch->image_offset = 0;
ch->pad[0] = 0;
ch->pad[1] = 0;
}
if (a->flash_ver != 0) {
fi->cmp_hdr[CH_IT_BIOS].version =
fi->cmp_hdr[CH_IT_MAC].version =
fi->cmp_hdr[CH_IT_EFI].version =
fi->cmp_hdr[CH_IT_CFG].version
= a->flash_ver;
fi->cmp_hdr[CH_IT_BIOS].status =
fi->cmp_hdr[CH_IT_MAC].status =
fi->cmp_hdr[CH_IT_EFI].status =
fi->cmp_hdr[CH_IT_CFG].status =
CH_STAT_SUCCESS;
return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
}
/* fall through */
case FI_ACT_UP: /* Upload the components */
default:
return complete_fmapi_req(a, rq, FI_STAT_INVALID);
}
/*
* If we make it here, fc has been setup to do the first task. Call
* load_image to format the request, start it, and get out. The
* interrupt code will call the callback when the first message is
* complete.
*/
if (!load_image(a, rq))
return complete_fmapi_req(a, rq, FI_STAT_FAILED);
esas2r_start_request(a, rq);
return true;
}
/*
* linux/drivers/scsi/esas2r/esas2r_init.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
struct esas2r_mem_desc *mem_desc,
u32 align)
{
mem_desc->esas2r_param = mem_desc->size + align;
mem_desc->virt_addr = NULL;
mem_desc->phys_addr = 0;
mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
(size_t)mem_desc->
esas2r_param,
(dma_addr_t *)&mem_desc->
phys_addr,
GFP_KERNEL);
if (mem_desc->esas2r_data == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to allocate %lu bytes of consistent memory!",
(long
unsigned
int)mem_desc->esas2r_param);
return false;
}
mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
memset(mem_desc->virt_addr, 0, mem_desc->size);
return true;
}
static void esas2r_initmem_free(struct esas2r_adapter *a,
struct esas2r_mem_desc *mem_desc)
{
if (mem_desc->virt_addr == NULL)
return;
/*
* Careful! phys_addr and virt_addr may have been adjusted from the
* original allocation in order to return the desired alignment. That
* means we have to use the original address (in esas2r_data) and size
* (esas2r_param) and calculate the original physical address based on
* the difference between the requested and actual allocation size.
*/
if (mem_desc->phys_addr) {
int unalign = ((u8 *)mem_desc->virt_addr) -
((u8 *)mem_desc->esas2r_data);
dma_free_coherent(&a->pcid->dev,
(size_t)mem_desc->esas2r_param,
mem_desc->esas2r_data,
(dma_addr_t)(mem_desc->phys_addr - unalign));
} else {
kfree(mem_desc->esas2r_data);
}
mem_desc->virt_addr = NULL;
}
static bool alloc_vda_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_mem_desc *memdesc = kzalloc(
sizeof(struct esas2r_mem_desc), GFP_KERNEL);
if (memdesc == NULL) {
esas2r_hdebug("could not alloc mem for vda request memdesc\n");
return false;
}
memdesc->size = sizeof(union atto_vda_req) +
ESAS2R_DATA_BUF_LEN;
if (!esas2r_initmem_alloc(a, memdesc, 256)) {
esas2r_hdebug("could not alloc mem for vda request\n");
kfree(memdesc);
return false;
}
a->num_vrqs++;
list_add(&memdesc->next_desc, &a->vrq_mds_head);
rq->vrq_md = memdesc;
rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
rq->vrq->scsi.handle = a->num_vrqs;
return true;
}
static void esas2r_unmap_regions(struct esas2r_adapter *a)
{
if (a->regs)
iounmap((void __iomem *)a->regs);
a->regs = NULL;
pci_release_region(a->pcid, 2);
if (a->data_window)
iounmap((void __iomem *)a->data_window);
a->data_window = NULL;
pci_release_region(a->pcid, 0);
}
static int esas2r_map_regions(struct esas2r_adapter *a)
{
int error;
a->regs = NULL;
a->data_window = NULL;
error = pci_request_region(a->pcid, 2, a->name);
if (error != 0) {
esas2r_log(ESAS2R_LOG_CRIT,
"pci_request_region(2) failed, error %d",
error);
return error;
}
a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
pci_resource_len(a->pcid, 2));
if (a->regs == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"ioremap failed for regs mem region\n");
pci_release_region(a->pcid, 2);
return -EFAULT;
}
error = pci_request_region(a->pcid, 0, a->name);
if (error != 0) {
esas2r_log(ESAS2R_LOG_CRIT,
"pci_request_region(2) failed, error %d",
error);
esas2r_unmap_regions(a);
return error;
}
a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
0),
pci_resource_len(a->pcid, 0));
if (a->data_window == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"ioremap failed for data_window mem region\n");
esas2r_unmap_regions(a);
return -EFAULT;
}
return 0;
}
static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
{
int i;
/* Set up interrupt mode based on the requested value */
switch (intr_mode) {
case INTR_MODE_LEGACY:
use_legacy_interrupts:
a->intr_mode = INTR_MODE_LEGACY;
break;
case INTR_MODE_MSI:
i = pci_enable_msi(a->pcid);
if (i != 0) {
esas2r_log(ESAS2R_LOG_WARN,
"failed to enable MSI for adapter %d, "
"falling back to legacy interrupts "
"(err=%d)", a->index,
i);
goto use_legacy_interrupts;
}
a->intr_mode = INTR_MODE_MSI;
esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
break;
default:
esas2r_log(ESAS2R_LOG_WARN,
"unknown interrupt_mode %d requested, "
"falling back to legacy interrupt",
interrupt_mode);
goto use_legacy_interrupts;
}
}
static void esas2r_claim_interrupts(struct esas2r_adapter *a)
{
unsigned long flags = IRQF_DISABLED;
if (a->intr_mode == INTR_MODE_LEGACY)
flags |= IRQF_SHARED;
esas2r_log(ESAS2R_LOG_INFO,
"esas2r_claim_interrupts irq=%d (%p, %s, %x)",
a->pcid->irq, a, a->name, flags);
if (request_irq(a->pcid->irq,
(a->intr_mode ==
INTR_MODE_LEGACY) ? esas2r_interrupt :
esas2r_msi_interrupt,
flags,
a->name,
a)) {
esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
a->pcid->irq);
return;
}
esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
esas2r_log(ESAS2R_LOG_INFO,
"claimed IRQ %d flags: 0x%lx",
a->pcid->irq, flags);
}
int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
int index)
{
struct esas2r_adapter *a;
u64 bus_addr = 0;
int i;
void *next_uncached;
struct esas2r_request *first_request, *last_request;
if (index >= MAX_ADAPTERS) {
esas2r_log(ESAS2R_LOG_CRIT,
"tried to init invalid adapter index %u!",
index);
return 0;
}
if (esas2r_adapters[index]) {
esas2r_log(ESAS2R_LOG_CRIT,
"tried to init existing adapter index %u!",
index);
return 0;
}
a = (struct esas2r_adapter *)host->hostdata;
memset(a, 0, sizeof(struct esas2r_adapter));
a->pcid = pcid;
a->host = host;
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pcid->dev);
if (required_mask > DMA_BIT_MASK(32)
&& !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pcid,
DMA_BIT_MASK(64))) {
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"64-bit PCI addressing enabled\n");
} else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pcid,
DMA_BIT_MASK(32))) {
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"32-bit PCI addressing enabled\n");
} else {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to set DMA mask");
esas2r_kill_adapter(index);
return 0;
}
} else {
if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pcid,
DMA_BIT_MASK(32))) {
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"32-bit PCI addressing enabled\n");
} else {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to set DMA mask");
esas2r_kill_adapter(index);
return 0;
}
}
esas2r_adapters[index] = a;
sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
esas2r_debug("new adapter %p, name %s", a, a->name);
spin_lock_init(&a->request_lock);
spin_lock_init(&a->fw_event_lock);
sema_init(&a->fm_api_semaphore, 1);
sema_init(&a->fs_api_semaphore, 1);
sema_init(&a->nvram_semaphore, 1);
esas2r_fw_event_off(a);
snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
a->index);
a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
init_waitqueue_head(&a->buffered_ioctl_waiter);
init_waitqueue_head(&a->nvram_waiter);
init_waitqueue_head(&a->fm_api_waiter);
init_waitqueue_head(&a->fs_api_waiter);
init_waitqueue_head(&a->vda_waiter);
INIT_LIST_HEAD(&a->general_req.req_list);
INIT_LIST_HEAD(&a->active_list);
INIT_LIST_HEAD(&a->defer_list);
INIT_LIST_HEAD(&a->free_sg_list_head);
INIT_LIST_HEAD(&a->avail_request);
INIT_LIST_HEAD(&a->vrq_mds_head);
INIT_LIST_HEAD(&a->fw_event_list);
first_request = (struct esas2r_request *)((u8 *)(a + 1));
for (last_request = first_request, i = 1; i < num_requests;
last_request++, i++) {
INIT_LIST_HEAD(&last_request->req_list);
list_add_tail(&last_request->comp_list, &a->avail_request);
if (!alloc_vda_req(a, last_request)) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to allocate a VDA request!");
esas2r_kill_adapter(index);
return 0;
}
}
esas2r_debug("requests: %p to %p (%d, %d)", first_request,
last_request,
sizeof(*first_request),
num_requests);
if (esas2r_map_regions(a) != 0) {
esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
esas2r_kill_adapter(index);
return 0;
}
a->index = index;
/* interrupts will be disabled until we are done with init */
atomic_inc(&a->dis_ints_cnt);
atomic_inc(&a->disable_cnt);
a->flags |= AF_CHPRST_PENDING
| AF_DISC_PENDING
| AF_FIRST_INIT
| AF_LEGACY_SGE_MODE;
a->init_msg = ESAS2R_INIT_MSG_START;
a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge;
esas2r_setup_interrupts(a, interrupt_mode);
a->uncached_size = esas2r_get_uncached_size(a);
a->uncached = dma_alloc_coherent(&pcid->dev,
(size_t)a->uncached_size,
(dma_addr_t *)&bus_addr,
GFP_KERNEL);
if (a->uncached == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to allocate %d bytes of consistent memory!",
a->uncached_size);
esas2r_kill_adapter(index);
return 0;
}
a->uncached_phys = bus_addr;
esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
a->uncached_size,
a->uncached,
upper_32_bits(bus_addr),
lower_32_bits(bus_addr));
memset(a->uncached, 0, a->uncached_size);
next_uncached = a->uncached;
if (!esas2r_init_adapter_struct(a,
&next_uncached)) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to initialize adapter structure (2)!");
esas2r_kill_adapter(index);
return 0;
}
tasklet_init(&a->tasklet,
esas2r_adapter_tasklet,
(unsigned long)a);
/*
* Disable chip interrupts to prevent spurious interrupts
* until we claim the IRQ.
*/
esas2r_disable_chip_interrupts(a);
esas2r_check_adapter(a);
if (!esas2r_init_adapter_hw(a, true))
esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
else
esas2r_debug("esas2r_init_adapter ok");
esas2r_claim_interrupts(a);
if (a->flags2 & AF2_IRQ_CLAIMED)
esas2r_enable_chip_interrupts(a);
esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
if (!(a->flags & AF_DEGRADED_MODE))
esas2r_kickoff_timer(a);
esas2r_debug("esas2r_init_adapter done for %p (%d)",
a, a->disable_cnt);
return 1;
}
static void esas2r_adapter_power_down(struct esas2r_adapter *a,
int power_management)
{
struct esas2r_mem_desc *memdesc, *next;
if ((a->flags2 & AF2_INIT_DONE)
&& (!(a->flags & AF_DEGRADED_MODE))) {
if (!power_management) {
del_timer_sync(&a->timer);
tasklet_kill(&a->tasklet);
}
esas2r_power_down(a);
/*
* There are versions of firmware that do not handle the sync
* cache command correctly. Stall here to ensure that the
* cache is lazily flushed.
*/
mdelay(500);
esas2r_debug("chip halted");
}
/* Remove sysfs binary files */
if (a->sysfs_fw_created) {
sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
a->sysfs_fw_created = 0;
}
if (a->sysfs_fs_created) {
sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
a->sysfs_fs_created = 0;
}
if (a->sysfs_vda_created) {
sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
a->sysfs_vda_created = 0;
}
if (a->sysfs_hw_created) {
sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
a->sysfs_hw_created = 0;
}
if (a->sysfs_live_nvram_created) {
sysfs_remove_bin_file(&a->host->shost_dev.kobj,
&bin_attr_live_nvram);
a->sysfs_live_nvram_created = 0;
}
if (a->sysfs_default_nvram_created) {
sysfs_remove_bin_file(&a->host->shost_dev.kobj,
&bin_attr_default_nvram);
a->sysfs_default_nvram_created = 0;
}
/* Clean up interrupts */
if (a->flags2 & AF2_IRQ_CLAIMED) {
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"free_irq(%d) called", a->pcid->irq);
free_irq(a->pcid->irq, a);
esas2r_debug("IRQ released");
esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
}
if (a->flags2 & AF2_MSI_ENABLED) {
pci_disable_msi(a->pcid);
esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
esas2r_debug("MSI disabled");
}
if (a->inbound_list_md.virt_addr)
esas2r_initmem_free(a, &a->inbound_list_md);
if (a->outbound_list_md.virt_addr)
esas2r_initmem_free(a, &a->outbound_list_md);
list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
next_desc) {
esas2r_initmem_free(a, memdesc);
}
/* Following frees everything allocated via alloc_vda_req */
list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
esas2r_initmem_free(a, memdesc);
list_del(&memdesc->next_desc);
kfree(memdesc);
}
kfree(a->first_ae_req);
a->first_ae_req = NULL;
kfree(a->sg_list_mds);
a->sg_list_mds = NULL;
kfree(a->req_table);
a->req_table = NULL;
if (a->regs) {
esas2r_unmap_regions(a);
a->regs = NULL;
a->data_window = NULL;
esas2r_debug("regions unmapped");
}
}
/* Release/free allocated resources for specified adapters. */
void esas2r_kill_adapter(int i)
{
struct esas2r_adapter *a = esas2r_adapters[i];
if (a) {
unsigned long flags;
struct workqueue_struct *wq;
esas2r_debug("killing adapter %p [%d] ", a, i);
esas2r_fw_event_off(a);
esas2r_adapter_power_down(a, 0);
if (esas2r_buffered_ioctl &&
(a->pcid == esas2r_buffered_ioctl_pcid)) {
dma_free_coherent(&a->pcid->dev,
(size_t)esas2r_buffered_ioctl_size,
esas2r_buffered_ioctl,
esas2r_buffered_ioctl_addr);
esas2r_buffered_ioctl = NULL;
}
if (a->vda_buffer) {
dma_free_coherent(&a->pcid->dev,
(size_t)VDA_MAX_BUFFER_SIZE,
a->vda_buffer,
(dma_addr_t)a->ppvda_buffer);
a->vda_buffer = NULL;
}
if (a->fs_api_buffer) {
dma_free_coherent(&a->pcid->dev,
(size_t)a->fs_api_buffer_size,
a->fs_api_buffer,
(dma_addr_t)a->ppfs_api_buffer);
a->fs_api_buffer = NULL;
}
kfree(a->local_atto_ioctl);
a->local_atto_ioctl = NULL;
spin_lock_irqsave(&a->fw_event_lock, flags);
wq = a->fw_event_q;
a->fw_event_q = NULL;
spin_unlock_irqrestore(&a->fw_event_lock, flags);
if (wq)
destroy_workqueue(wq);
if (a->uncached) {
dma_free_coherent(&a->pcid->dev,
(size_t)a->uncached_size,
a->uncached,
(dma_addr_t)a->uncached_phys);
a->uncached = NULL;
esas2r_debug("uncached area freed");
}
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"pci_disable_device() called. msix_enabled: %d "
"msi_enabled: %d irq: %d pin: %d",
a->pcid->msix_enabled,
a->pcid->msi_enabled,
a->pcid->irq,
a->pcid->pin);
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"before pci_disable_device() enable_cnt: %d",
a->pcid->enable_cnt.counter);
pci_disable_device(a->pcid);
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"after pci_disable_device() enable_cnt: %d",
a->pcid->enable_cnt.counter);
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"pci_set_drv_data(%p, NULL) called",
a->pcid);
pci_set_drvdata(a->pcid, NULL);
esas2r_adapters[i] = NULL;
if (a->flags2 & AF2_INIT_DONE) {
esas2r_lock_clear_flags(&a->flags2,
AF2_INIT_DONE);
esas2r_lock_set_flags(&a->flags,
AF_DEGRADED_MODE);
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->host->shost_gendev),
"scsi_remove_host() called");
scsi_remove_host(a->host);
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->host->shost_gendev),
"scsi_host_put() called");
scsi_host_put(a->host);
}
}
}
int esas2r_cleanup(struct Scsi_Host *host)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
int index;
if (host == NULL) {
int i;
esas2r_debug("esas2r_cleanup everything");
for (i = 0; i < MAX_ADAPTERS; i++)
esas2r_kill_adapter(i);
return -1;
}
esas2r_debug("esas2r_cleanup called for host %p", host);
index = a->index;
esas2r_kill_adapter(index);
return index;
}
int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
u32 device_state;
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
if (!a)
return -ENODEV;
esas2r_adapter_power_down(a, 1);
device_state = pci_choose_state(pdev, state);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_save_state() called");
pci_save_state(pdev);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_disable_device() called");
pci_disable_device(pdev);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_set_power_state() called");
pci_set_power_state(pdev, device_state);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
return 0;
}
int esas2r_resume(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
int rez;
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_set_power_state(PCI_D0) "
"called");
pci_set_power_state(pdev, PCI_D0);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_enable_wake(PCI_D0, 0) "
"called");
pci_enable_wake(pdev, PCI_D0, 0);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_restore_state() called");
pci_restore_state(pdev);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"pci_enable_device() called");
rez = pci_enable_device(pdev);
pci_set_master(pdev);
if (!a) {
rez = -ENODEV;
goto error_exit;
}
if (esas2r_map_regions(a) != 0) {
esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
rez = -ENOMEM;
goto error_exit;
}
/* Set up interupt mode */
esas2r_setup_interrupts(a, a->intr_mode);
/*
* Disable chip interrupts to prevent spurious interrupts until we
* claim the IRQ.
*/
esas2r_disable_chip_interrupts(a);
if (!esas2r_power_up(a, true)) {
esas2r_debug("yikes, esas2r_power_up failed");
rez = -ENOMEM;
goto error_exit;
}
esas2r_claim_interrupts(a);
if (a->flags2 & AF2_IRQ_CLAIMED) {
/*
* Now that system interrupt(s) are claimed, we can enable
* chip interrupts.
*/
esas2r_enable_chip_interrupts(a);
esas2r_kickoff_timer(a);
} else {
esas2r_debug("yikes, unable to claim IRQ");
esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
rez = -ENOMEM;
goto error_exit;
}
error_exit:
esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
rez);
return rez;
}
bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
{
esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
esas2r_log(ESAS2R_LOG_CRIT,
"setting adapter to degraded mode: %s\n", error_str);
return false;
}
u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
{
return sizeof(struct esas2r_sas_nvram)
+ ALIGN(ESAS2R_DISC_BUF_LEN, 8)
+ ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
+ 8
+ (num_sg_lists * (u16)sgl_page_size)
+ ALIGN((num_requests + num_ae_requests + 1 +
ESAS2R_LIST_EXTRA) *
sizeof(struct esas2r_inbound_list_source_entry),
8)
+ ALIGN((num_requests + num_ae_requests + 1 +
ESAS2R_LIST_EXTRA) *
sizeof(struct atto_vda_ob_rsp), 8)
+ 256; /* VDA request and buffer align */
}
static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
{
int pcie_cap_reg;
pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
if (0xffff && pcie_cap_reg) {
u16 devcontrol;
pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
&devcontrol);
if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
esas2r_log(ESAS2R_LOG_INFO,
"max read request size > 512B");
devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
devcontrol |= 0x2000;
pci_write_config_word(a->pcid,
pcie_cap_reg + PCI_EXP_DEVCTL,
devcontrol);
}
}
}
/*
* Determine the organization of the uncached data area and
* finish initializing the adapter structure
*/
bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
void **uncached_area)
{
u32 i;
u8 *high;
struct esas2r_inbound_list_source_entry *element;
struct esas2r_request *rq;
struct esas2r_mem_desc *sgl;
spin_lock_init(&a->sg_list_lock);
spin_lock_init(&a->mem_lock);
spin_lock_init(&a->queue_lock);
a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
if (!alloc_vda_req(a, &a->general_req)) {
esas2r_hdebug(
"failed to allocate a VDA request for the general req!");
return false;
}
/* allocate requests for asynchronous events */
a->first_ae_req =
kzalloc(num_ae_requests * sizeof(struct esas2r_request),
GFP_KERNEL);
if (a->first_ae_req == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to allocate memory for asynchronous events");
return false;
}
/* allocate the S/G list memory descriptors */
a->sg_list_mds = kzalloc(
num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
if (a->sg_list_mds == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to allocate memory for s/g list descriptors");
return false;
}
/* allocate the request table */
a->req_table =
kzalloc((num_requests + num_ae_requests +
1) * sizeof(struct esas2r_request *), GFP_KERNEL);
if (a->req_table == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"failed to allocate memory for the request table");
return false;
}
/* initialize PCI configuration space */
esas2r_init_pci_cfg_space(a);
/*
* the thunder_stream boards all have a serial flash part that has a
* different base address on the AHB bus.
*/
if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
&& (a->pcid->subsystem_device & ATTO_SSDID_TBT))
a->flags2 |= AF2_THUNDERBOLT;
if (a->flags2 & AF2_THUNDERBOLT)
a->flags2 |= AF2_SERIAL_FLASH;
if (a->pcid->subsystem_device == ATTO_TLSH_1068)
a->flags2 |= AF2_THUNDERLINK;
/* Uncached Area */
high = (u8 *)*uncached_area;
/* initialize the scatter/gather table pages */
for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
sgl->size = sgl_page_size;
list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
/* Allow the driver to load if the minimum count met. */
if (i < NUM_SGL_MIN)
return false;
break;
}
}
/* compute the size of the lists */
a->list_size = num_requests + ESAS2R_LIST_EXTRA;
/* allocate the inbound list */
a->inbound_list_md.size = a->list_size *
sizeof(struct
esas2r_inbound_list_source_entry);
if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
esas2r_hdebug("failed to allocate IB list");
return false;
}
/* allocate the outbound list */
a->outbound_list_md.size = a->list_size *
sizeof(struct atto_vda_ob_rsp);
if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
ESAS2R_LIST_ALIGN)) {
esas2r_hdebug("failed to allocate IB list");
return false;
}
/* allocate the NVRAM structure */
a->nvram = (struct esas2r_sas_nvram *)high;
high += sizeof(struct esas2r_sas_nvram);
/* allocate the discovery buffer */
a->disc_buffer = high;
high += ESAS2R_DISC_BUF_LEN;
high = PTR_ALIGN(high, 8);
/* allocate the outbound list copy pointer */
a->outbound_copy = (u32 volatile *)high;
high += sizeof(u32);
if (!(a->flags & AF_NVR_VALID))
esas2r_nvram_set_defaults(a);
/* update the caller's uncached memory area pointer */
*uncached_area = (void *)high;
/* initialize the allocated memory */
if (a->flags & AF_FIRST_INIT) {
memset(a->req_table, 0,
(num_requests + num_ae_requests +
1) * sizeof(struct esas2r_request *));
esas2r_targ_db_initialize(a);
/* prime parts of the inbound list */
element =
(struct esas2r_inbound_list_source_entry *)a->
inbound_list_md.
virt_addr;
for (i = 0; i < a->list_size; i++) {
element->address = 0;
element->reserved = 0;
element->length = cpu_to_le32(HWILSE_INTERFACE_F0
| (sizeof(union
atto_vda_req)
/
sizeof(u32)));
element++;
}
/* init the AE requests */
for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
i++) {
INIT_LIST_HEAD(&rq->req_list);
if (!alloc_vda_req(a, rq)) {
esas2r_hdebug(
"failed to allocate a VDA request!");
return false;
}
esas2r_rq_init_request(rq, a);
/* override the completion function */
rq->comp_cb = esas2r_ae_complete;
}
}
return true;
}
/* This code will verify that the chip is operational. */
bool esas2r_check_adapter(struct esas2r_adapter *a)
{
u32 starttime;
u32 doorbell;
u64 ppaddr;
u32 dw;
/*
* if the chip reset detected flag is set, we can bypass a bunch of
* stuff.
*/
if (a->flags & AF_CHPRST_DETECTED)
goto skip_chip_reset;
/*
* BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
* may have left them enabled or we may be recovering from a fault.
*/
esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
/*
* wait for the firmware to become ready by forcing an interrupt and
* waiting for a response.
*/
starttime = jiffies_to_msecs(jiffies);
while (true) {
esas2r_force_interrupt(a);
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell == 0xFFFFFFFF) {
/*
* Give the firmware up to two seconds to enable
* register access after a reset.
*/
if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
return esas2r_set_degraded_mode(a,
"unable to access registers");
} else if (doorbell & DRBL_FORCE_INT) {
u32 ver = (doorbell & DRBL_FW_VER_MSK);
/*
* This driver supports version 0 and version 1 of
* the API
*/
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
if (ver == DRBL_FW_VER_0) {
esas2r_lock_set_flags(&a->flags,
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge;
} else if (ver == DRBL_FW_VER_1) {
esas2r_lock_clear_flags(&a->flags,
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 1024;
a->build_sgl = esas2r_build_sg_list_prd;
} else {
return esas2r_set_degraded_mode(a,
"unknown firmware version");
}
break;
}
schedule_timeout_interruptible(msecs_to_jiffies(100));
if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
esas2r_hdebug("FW ready TMO");
esas2r_bugon();
return esas2r_set_degraded_mode(a,
"firmware start has timed out");
}
}
/* purge any asynchronous events since we will repost them later */
esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell & DRBL_MSG_IFC_DOWN) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
break;
}
schedule_timeout_interruptible(msecs_to_jiffies(50));
if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
esas2r_hdebug("timeout waiting for interface down");
break;
}
}
skip_chip_reset:
/*
* first things first, before we go changing any of these registers
* disable the communication lists.
*/
dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
dw &= ~MU_ILC_ENABLE;
esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
dw &= ~MU_OLC_ENABLE;
esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
/* configure the communication list addresses */
ppaddr = a->inbound_list_md.phys_addr;
esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
lower_32_bits(ppaddr));
esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
upper_32_bits(ppaddr));
ppaddr = a->outbound_list_md.phys_addr;
esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
lower_32_bits(ppaddr));
esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
upper_32_bits(ppaddr));
ppaddr = a->uncached_phys +
((u8 *)a->outbound_copy - a->uncached);
esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
lower_32_bits(ppaddr));
esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
upper_32_bits(ppaddr));
/* reset the read and write pointers */
*a->outbound_copy =
a->last_write =
a->last_read = a->list_size - 1;
esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
a->last_write);
esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
a->last_write);
esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
a->last_write);
esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
MU_OLW_TOGGLE | a->last_write);
/* configure the interface select fields */
dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
(dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
(dw | MU_OLIC_LIST_F0 |
MU_OLIC_SOURCE_DDR));
/* finish configuring the communication lists */
dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
| (a->list_size << MU_ILC_NUMBER_SHIFT);
esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
/*
* notify the firmware that we're done setting up the communication
* list registers. wait here until the firmware is done configuring
* its lists. it will signal that it is done by enabling the lists.
*/
esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell & DRBL_MSG_IFC_INIT) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
break;
}
schedule_timeout_interruptible(msecs_to_jiffies(100));
if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
esas2r_hdebug(
"timeout waiting for communication list init");
esas2r_bugon();
return esas2r_set_degraded_mode(a,
"timeout waiting for communication list init");
}
}
/*
* flag whether the firmware supports the power down doorbell. we
* determine this by reading the inbound doorbell enable mask.
*/
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
if (doorbell & DRBL_POWER_DOWN)
esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
else
esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
/*
* enable assertion of outbound queue and doorbell interrupts in the
* main interrupt cause register.
*/
esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
return true;
}
/* Process the initialization message just completed and format the next one. */
static bool esas2r_format_init_msg(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
u32 msg = a->init_msg;
struct atto_vda_cfg_init *ci;
a->init_msg = 0;
switch (msg) {
case ESAS2R_INIT_MSG_START:
case ESAS2R_INIT_MSG_REINIT:
{
struct timeval now;
do_gettimeofday(&now);
esas2r_hdebug("CFG init");
esas2r_build_cfg_req(a,
rq,
VDA_CFG_INIT,
0,
NULL);
ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
ci->sgl_page_size = sgl_page_size;
ci->epoch_time = now.tv_sec;
rq->flags |= RF_FAILURE_OK;
a->init_msg = ESAS2R_INIT_MSG_INIT;
break;
}
case ESAS2R_INIT_MSG_INIT:
if (rq->req_stat == RS_SUCCESS) {
u32 major;
u32 minor;
a->fw_version = le16_to_cpu(
rq->func_rsp.cfg_rsp.vda_version);
a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
a->fw_version += (major << 16) + (minor << 24);
} else {
esas2r_hdebug("FAILED");
}
/*
* the 2.71 and earlier releases of R6xx firmware did not error
* unsupported config requests correctly.
*/
if ((a->flags2 & AF2_THUNDERBOLT)
|| (be32_to_cpu(a->fw_version) >
be32_to_cpu(0x47020052))) {
esas2r_hdebug("CFG get init");
esas2r_build_cfg_req(a,
rq,
VDA_CFG_GET_INIT2,
sizeof(struct atto_vda_cfg_init),
NULL);
rq->vrq->cfg.sg_list_offset = offsetof(
struct atto_vda_cfg_req,
data.sge);
rq->vrq->cfg.data.prde.ctl_len =
cpu_to_le32(sizeof(struct atto_vda_cfg_init));
rq->vrq->cfg.data.prde.address = cpu_to_le64(
rq->vrq_md->phys_addr +
sizeof(union atto_vda_req));
rq->flags |= RF_FAILURE_OK;
a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
break;
}
case ESAS2R_INIT_MSG_GET_INIT:
if (msg == ESAS2R_INIT_MSG_GET_INIT) {
ci = (struct atto_vda_cfg_init *)rq->data_buf;
if (rq->req_stat == RS_SUCCESS) {
a->num_targets_backend =
le32_to_cpu(ci->num_targets_backend);
a->ioctl_tunnel =
le32_to_cpu(ci->ioctl_tunnel);
} else {
esas2r_hdebug("FAILED");
}
}
/* fall through */
default:
rq->req_stat = RS_SUCCESS;
return false;
}
return true;
}
/*
* Perform initialization messages via the request queue. Messages are
* performed with interrupts disabled.
*/
bool esas2r_init_msgs(struct esas2r_adapter *a)
{
bool success = true;
struct esas2r_request *rq = &a->general_req;
esas2r_rq_init_request(rq, a);
rq->comp_cb = esas2r_dummy_complete;
if (a->init_msg == 0)
a->init_msg = ESAS2R_INIT_MSG_REINIT;
while (a->init_msg) {
if (esas2r_format_init_msg(a, rq)) {
unsigned long flags;
while (true) {
spin_lock_irqsave(&a->queue_lock, flags);
esas2r_start_vda_request(a, rq);
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_wait_request(a, rq);
if (rq->req_stat != RS_PENDING)
break;
}
}
if (rq->req_stat == RS_SUCCESS
|| ((rq->flags & RF_FAILURE_OK)
&& rq->req_stat != RS_TIMEOUT))
continue;
esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
a->init_msg, rq->req_stat, rq->flags);
a->init_msg = ESAS2R_INIT_MSG_START;
success = false;
break;
}
esas2r_rq_destroy_request(rq, a);
return success;
}
/* Initialize the adapter chip */
bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
{
bool rslt = false;
struct esas2r_request *rq;
u32 i;
if (a->flags & AF_DEGRADED_MODE)
goto exit;
if (!(a->flags & AF_NVR_VALID)) {
if (!esas2r_nvram_read_direct(a))
esas2r_log(ESAS2R_LOG_WARN,
"invalid/missing NVRAM parameters");
}
if (!esas2r_init_msgs(a)) {
esas2r_set_degraded_mode(a, "init messages failed");
goto exit;
}
/* The firmware is ready. */
esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
/* Post all the async event requests */
for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
esas2r_start_ae_request(a, rq);
if (!a->flash_rev[0])
esas2r_read_flash_rev(a);
if (!a->image_type[0])
esas2r_read_image_type(a);
if (a->fw_version == 0)
a->fw_rev[0] = 0;
else
sprintf(a->fw_rev, "%1d.%02d",
(int)LOBYTE(HIWORD(a->fw_version)),
(int)HIBYTE(HIWORD(a->fw_version)));
esas2r_hdebug("firmware revision: %s", a->fw_rev);
if ((a->flags & AF_CHPRST_DETECTED)
&& (a->flags & AF_FIRST_INIT)) {
esas2r_enable_chip_interrupts(a);
return true;
}
/* initialize discovery */
esas2r_disc_initialize(a);
/*
* wait for the device wait time to expire here if requested. this is
* usually requested during initial driver load and possibly when
* resuming from a low power state. deferred device waiting will use
* interrupts. chip reset recovery always defers device waiting to
* avoid being in a TASKLET too long.
*/
if (init_poll) {
u32 currtime = a->disc_start_time;
u32 nexttick = 100;
u32 deltatime;
/*
* Block Tasklets from getting scheduled and indicate this is
* polled discovery.
*/
esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
/*
* Temporarily bring the disable count to zero to enable
* deferred processing. Note that the count is already zero
* after the first initialization.
*/
if (a->flags & AF_FIRST_INIT)
atomic_dec(&a->disable_cnt);
while (a->flags & AF_DISC_PENDING) {
schedule_timeout_interruptible(msecs_to_jiffies(100));
/*
* Determine the need for a timer tick based on the
* delta time between this and the last iteration of
* this loop. We don't use the absolute time because
* then we would have to worry about when nexttick
* wraps and currtime hasn't yet.
*/
deltatime = jiffies_to_msecs(jiffies) - currtime;
currtime += deltatime;
/*
* Process any waiting discovery as long as the chip is
* up. If a chip reset happens during initial polling,
* we have to make sure the timer tick processes the
* doorbell indicating the firmware is ready.
*/
if (!(a->flags & AF_CHPRST_PENDING))
esas2r_disc_check_for_work(a);
/* Simulate a timer tick. */
if (nexttick <= deltatime) {
/* Time for a timer tick */
nexttick += 100;
esas2r_timer_tick(a);
}
if (nexttick > deltatime)
nexttick -= deltatime;
/* Do any deferred processing */
if (esas2r_is_tasklet_pending(a))
esas2r_do_tasklet_tasks(a);
}
if (a->flags & AF_FIRST_INIT)
atomic_inc(&a->disable_cnt);
esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
}
esas2r_targ_db_report_changes(a);
/*
* For cases where (a) the initialization messages processing may
* handle an interrupt for a port event and a discovery is waiting, but
* we are not waiting for devices, or (b) the device wait time has been
* exhausted but there is still discovery pending, start any leftover
* discovery in interrupt driven mode.
*/
esas2r_disc_start_waiting(a);
/* Enable chip interrupts */
a->int_mask = ESAS2R_INT_STS_MASK;
esas2r_enable_chip_interrupts(a);
esas2r_enable_heartbeat(a);
rslt = true;
exit:
/*
* Regardless of whether initialization was successful, certain things
* need to get done before we exit.
*/
if ((a->flags & AF_CHPRST_DETECTED)
&& (a->flags & AF_FIRST_INIT)) {
/*
* Reinitialization was performed during the first
* initialization. Only clear the chip reset flag so the
* original device polling is not cancelled.
*/
if (!rslt)
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
} else {
/* First initialization or a subsequent re-init is complete. */
if (!rslt) {
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
}
/* Enable deferred processing after the first initialization. */
if (a->flags & AF_FIRST_INIT) {
esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
if (atomic_dec_return(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
}
}
return rslt;
}
void esas2r_reset_adapter(struct esas2r_adapter *a)
{
esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
esas2r_local_reset_adapter(a);
esas2r_schedule_tasklet(a);
}
void esas2r_reset_chip(struct esas2r_adapter *a)
{
if (!esas2r_is_adapter_present(a))
return;
/*
* Before we reset the chip, save off the VDA core dump. The VDA core
* dump is located in the upper 512KB of the onchip SRAM. Make sure
* to not overwrite a previous crash that was saved.
*/
if ((a->flags2 & AF2_COREDUMP_AVAIL)
&& !(a->flags2 & AF2_COREDUMP_SAVED)
&& a->fw_coredump_buff) {
esas2r_read_mem_block(a,
a->fw_coredump_buff,
MW_DATA_ADDR_SRAM + 0x80000,
ESAS2R_FWCOREDUMP_SZ);
esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
}
esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
/* Reset the chip */
if (a->pcid->revision == MVR_FREY_B2)
esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
MU_CTL_IN_FULL_RST2);
else
esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
MU_CTL_IN_FULL_RST);
/* Stall a little while to let the reset condition clear */
mdelay(10);
}
static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
{
u32 starttime;
u32 doorbell;
esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell & DRBL_POWER_DOWN) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
break;
}
schedule_timeout_interruptible(msecs_to_jiffies(100));
if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
esas2r_hdebug("Timeout waiting for power down");
break;
}
}
}
/*
* Perform power management processing including managing device states, adapter
* states, interrupts, and I/O.
*/
void esas2r_power_down(struct esas2r_adapter *a)
{
esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
if (!(a->flags & AF_DEGRADED_MODE)) {
u32 starttime;
u32 doorbell;
/*
* We are currently running OK and will be reinitializing later.
* increment the disable count to coordinate with
* esas2r_init_adapter. We don't have to do this in degraded
* mode since we never enabled interrupts in the first place.
*/
esas2r_disable_chip_interrupts(a);
esas2r_disable_heartbeat(a);
/* wait for any VDA activity to clear before continuing */
esas2r_write_register_dword(a, MU_DOORBELL_IN,
DRBL_MSG_IFC_DOWN);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell =
esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell & DRBL_MSG_IFC_DOWN) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
break;
}
schedule_timeout_interruptible(msecs_to_jiffies(100));
if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
esas2r_hdebug(
"timeout waiting for interface down");
break;
}
}
/*
* For versions of firmware that support it tell them the driver
* is powering down.
*/
if (a->flags2 & AF2_VDA_POWER_DOWN)
esas2r_power_down_notify_firmware(a);
}
/* Suspend I/O processing. */
esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
esas2r_process_adapter_reset(a);
/* Remove devices now that I/O is cleaned up. */
a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
esas2r_targ_db_remove_all(a, false);
}
/*
* Perform power management processing including managing device states, adapter
* states, interrupts, and I/O.
*/
bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
{
bool ret;
esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
esas2r_init_pci_cfg_space(a);
esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
atomic_inc(&a->disable_cnt);
/* reinitialize the adapter */
ret = esas2r_check_adapter(a);
if (!esas2r_init_adapter_hw(a, init_poll))
ret = false;
/* send the reset asynchronous event */
esas2r_send_reset_ae(a, true);
/* clear this flag after initialization. */
esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
return ret;
}
bool esas2r_is_adapter_present(struct esas2r_adapter *a)
{
if (a->flags & AF_NOT_PRESENT)
return false;
if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
return false;
}
return true;
}
const char *esas2r_get_model_name(struct esas2r_adapter *a)
{
switch (a->pcid->subsystem_device) {
case ATTO_ESAS_R680:
return "ATTO ExpressSAS R680";
case ATTO_ESAS_R608:
return "ATTO ExpressSAS R608";
case ATTO_ESAS_R60F:
return "ATTO ExpressSAS R60F";
case ATTO_ESAS_R6F0:
return "ATTO ExpressSAS R6F0";
case ATTO_ESAS_R644:
return "ATTO ExpressSAS R644";
case ATTO_ESAS_R648:
return "ATTO ExpressSAS R648";
case ATTO_TSSC_3808:
return "ATTO ThunderStream SC 3808D";
case ATTO_TSSC_3808E:
return "ATTO ThunderStream SC 3808E";
case ATTO_TLSH_1068:
return "ATTO ThunderLink SH 1068";
}
return "ATTO SAS Controller";
}
const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
{
switch (a->pcid->subsystem_device) {
case ATTO_ESAS_R680:
return "R680";
case ATTO_ESAS_R608:
return "R608";
case ATTO_ESAS_R60F:
return "R60F";
case ATTO_ESAS_R6F0:
return "R6F0";
case ATTO_ESAS_R644:
return "R644";
case ATTO_ESAS_R648:
return "R648";
case ATTO_TSSC_3808:
return "SC 3808D";
case ATTO_TSSC_3808E:
return "SC 3808E";
case ATTO_TLSH_1068:
return "SH 1068";
}
return "unknown";
}
/*
* linux/drivers/scsi/esas2r/esas2r_int.c
* esas2r interrupt handling
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include "esas2r.h"
/* Local function prototypes */
static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
static void esas2r_process_bus_reset(struct esas2r_adapter *a);
/*
* Poll the adapter for interrupts and service them.
* This function handles both legacy interrupts and MSI.
*/
void esas2r_polled_interrupt(struct esas2r_adapter *a)
{
u32 intstat;
u32 doorbell;
esas2r_disable_chip_interrupts(a);
intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
if (intstat & MU_INTSTAT_POST_OUT) {
/* clear the interrupt */
esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
MU_OLIS_INT);
esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
esas2r_get_outbound_responses(a);
}
if (intstat & MU_INTSTAT_DRBL) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell != 0)
esas2r_doorbell_interrupt(a, doorbell);
}
esas2r_enable_chip_interrupts(a);
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
}
/*
* Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
* schedules a TASKLET to process events, whereas the MSI handler just
* processes interrupt events directly.
*/
irqreturn_t esas2r_interrupt(int irq, void *dev_id)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
if (!esas2r_adapter_interrupt_pending(a))
return IRQ_NONE;
esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
esas2r_schedule_tasklet(a);
return IRQ_HANDLED;
}
void esas2r_adapter_interrupt(struct esas2r_adapter *a)
{
u32 doorbell;
if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
/* clear the interrupt */
esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
MU_OLIS_INT);
esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
esas2r_get_outbound_responses(a);
}
if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell != 0)
esas2r_doorbell_interrupt(a, doorbell);
}
a->int_mask = ESAS2R_INT_STS_MASK;
esas2r_enable_chip_interrupts(a);
if (likely(atomic_read(&a->disable_cnt) == 0))
esas2r_do_deferred_processes(a);
}
irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
u32 intstat;
u32 doorbell;
intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
if (likely(intstat & MU_INTSTAT_POST_OUT)) {
/* clear the interrupt */
esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
MU_OLIS_INT);
esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
esas2r_get_outbound_responses(a);
}
if (unlikely(intstat & MU_INTSTAT_DRBL)) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell != 0)
esas2r_doorbell_interrupt(a, doorbell);
}
/*
* Work around a chip bug and force a new MSI to be sent if one is
* still pending.
*/
esas2r_disable_chip_interrupts(a);
esas2r_enable_chip_interrupts(a);
if (likely(atomic_read(&a->disable_cnt) == 0))
esas2r_do_deferred_processes(a);
esas2r_do_tasklet_tasks(a);
return 1;
}
static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
struct esas2r_request *rq,
struct atto_vda_ob_rsp *rsp)
{
/*
* For I/O requests, only copy the response if an error
* occurred and setup a callback to do error processing.
*/
if (unlikely(rq->req_stat != RS_SUCCESS)) {
memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
if (rq->req_stat == RS_ABORTED) {
if (rq->timeout > RQ_MAX_TIMEOUT)
rq->req_stat = RS_TIMEOUT;
} else if (rq->req_stat == RS_SCSI_ERROR) {
u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
esas2r_trace("scsistatus: %x", scsistatus);
/* Any of these are a good result. */
if (scsistatus == SAM_STAT_GOOD || scsistatus ==
SAM_STAT_CONDITION_MET || scsistatus ==
SAM_STAT_INTERMEDIATE || scsistatus ==
SAM_STAT_INTERMEDIATE_CONDITION_MET) {
rq->req_stat = RS_SUCCESS;
rq->func_rsp.scsi_rsp.scsi_stat =
SAM_STAT_GOOD;
}
}
}
}
static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
{
struct atto_vda_ob_rsp *rsp;
u32 rspput_ptr;
u32 rspget_ptr;
struct esas2r_request *rq;
u32 handle;
unsigned long flags;
LIST_HEAD(comp_list);
esas2r_trace_enter();
spin_lock_irqsave(&a->queue_lock, flags);
/* Get the outbound limit and pointers */
rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
rspget_ptr = a->last_read;
esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
/* If we don't have anything to process, get out */
if (unlikely(rspget_ptr == rspput_ptr)) {
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_trace_exit();
return;
}
/* Make sure the firmware is healthy */
if (unlikely(rspput_ptr >= a->list_size)) {
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_bugon();
esas2r_local_reset_adapter(a);
esas2r_trace_exit();
return;
}
do {
rspget_ptr++;
if (rspget_ptr >= a->list_size)
rspget_ptr = 0;
rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
+ rspget_ptr;
handle = rsp->handle;
/* Verify the handle range */
if (unlikely(LOWORD(handle) == 0
|| LOWORD(handle) > num_requests +
num_ae_requests + 1)) {
esas2r_bugon();
continue;
}
/* Get the request for this handle */
rq = a->req_table[LOWORD(handle)];
if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
esas2r_bugon();
continue;
}
list_del(&rq->req_list);
/* Get the completion status */
rq->req_stat = rsp->req_stat;
esas2r_trace("handle: %x", handle);
esas2r_trace("rq: %p", rq);
esas2r_trace("req_status: %x", rq->req_stat);
if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
esas2r_handle_outbound_rsp_err(a, rq, rsp);
} else {
/*
* Copy the outbound completion struct for non-I/O
* requests.
*/
memcpy(&rq->func_rsp, &rsp->func_rsp,
sizeof(rsp->func_rsp));
}
/* Queue the request for completion. */
list_add_tail(&rq->comp_list, &comp_list);
} while (rspget_ptr != rspput_ptr);
a->last_read = rspget_ptr;
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_comp_list_drain(a, &comp_list);
esas2r_trace_exit();
}
/*
* Perform all deferred processes for the adapter. Deferred
* processes can only be done while the current interrupt
* disable_cnt for the adapter is zero.
*/
void esas2r_do_deferred_processes(struct esas2r_adapter *a)
{
int startreqs = 2;
struct esas2r_request *rq;
unsigned long flags;
/*
* startreqs is used to control starting requests
* that are on the deferred queue
* = 0 - do not start any requests
* = 1 - can start discovery requests
* = 2 - can start any request
*/
if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
startreqs = 0;
else if (a->flags & AF_DISC_PENDING)
startreqs = 1;
atomic_inc(&a->disable_cnt);
/* Clear off the completed list to be processed later. */
if (esas2r_is_tasklet_pending(a)) {
esas2r_schedule_tasklet(a);
startreqs = 0;
}
/*
* If we can start requests then traverse the defer queue
* looking for requests to start or complete
*/
if (startreqs && !list_empty(&a->defer_list)) {
LIST_HEAD(comp_list);
struct list_head *element, *next;
spin_lock_irqsave(&a->queue_lock, flags);
list_for_each_safe(element, next, &a->defer_list) {
rq = list_entry(element, struct esas2r_request,
req_list);
if (rq->req_stat != RS_PENDING) {
list_del(element);
list_add_tail(&rq->comp_list, &comp_list);
}
/*
* Process discovery and OS requests separately. We
* can't hold up discovery requests when discovery is
* pending. In general, there may be different sets of
* conditions for starting different types of requests.
*/
else if (rq->req_type == RT_DISC_REQ) {
list_del(element);
esas2r_disc_local_start_request(a, rq);
} else if (startreqs == 2) {
list_del(element);
esas2r_local_start_request(a, rq);
/*
* Flashing could have been set by last local
* start
*/
if (a->flags & AF_FLASHING)
break;
}
}
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_comp_list_drain(a, &comp_list);
}
atomic_dec(&a->disable_cnt);
}
/*
* Process an adapter reset (or one that is about to happen)
* by making sure all outstanding requests are completed that
* haven't been already.
*/
void esas2r_process_adapter_reset(struct esas2r_adapter *a)
{
struct esas2r_request *rq = &a->general_req;
unsigned long flags;
struct esas2r_disc_context *dc;
LIST_HEAD(comp_list);
struct list_head *element;
esas2r_trace_enter();
spin_lock_irqsave(&a->queue_lock, flags);
/* abort the active discovery, if any. */
if (rq->interrupt_cx) {
dc = (struct esas2r_disc_context *)rq->interrupt_cx;
dc->disc_evt = 0;
esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
}
/*
* just clear the interrupt callback for now. it will be dequeued if
* and when we find it on the active queue and we don't want the
* callback called. also set the dummy completion callback in case we
* were doing an I/O request.
*/
rq->interrupt_cx = NULL;
rq->interrupt_cb = NULL;
rq->comp_cb = esas2r_dummy_complete;
/* Reset the read and write pointers */
*a->outbound_copy =
a->last_write =
a->last_read = a->list_size - 1;
esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
/* Kill all the requests on the active list */
list_for_each(element, &a->defer_list) {
rq = list_entry(element, struct esas2r_request, req_list);
if (rq->req_stat == RS_STARTED)
if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
list_add_tail(&rq->comp_list, &comp_list);
}
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_comp_list_drain(a, &comp_list);
esas2r_process_bus_reset(a);
esas2r_trace_exit();
}
static void esas2r_process_bus_reset(struct esas2r_adapter *a)
{
struct esas2r_request *rq;
struct list_head *element;
unsigned long flags;
LIST_HEAD(comp_list);
esas2r_trace_enter();
esas2r_hdebug("reset detected");
spin_lock_irqsave(&a->queue_lock, flags);
/* kill all the requests on the deferred queue */
list_for_each(element, &a->defer_list) {
rq = list_entry(element, struct esas2r_request, req_list);
if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
list_add_tail(&rq->comp_list, &comp_list);
}
spin_unlock_irqrestore(&a->queue_lock, flags);
esas2r_comp_list_drain(a, &comp_list);
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
esas2r_trace_exit();
}
static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
{
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
/*
* Make sure we don't get attempt more than 3 resets
* when the uptime between resets does not exceed one
* minute. This will stop any situation where there is
* really something wrong with the hardware. The way
* this works is that we start with uptime ticks at 0.
* Each time we do a reset, we add 20 seconds worth to
* the count. Each time a timer tick occurs, as long
* as a chip reset is not pending, we decrement the
* tick count. If the uptime ticks ever gets to 60
* seconds worth, we disable the adapter from that
* point forward. Three strikes, you're out.
*/
if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
ESAS2R_CHP_UPTIME_MAX)) {
esas2r_hdebug("*** adapter disabled ***");
/*
* Ok, some kind of hard failure. Make sure we
* exit this loop with chip interrupts
* permanently disabled so we don't lock up the
* entire system. Also flag degraded mode to
* prevent the heartbeat from trying to recover.
*/
esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
esas2r_lock_set_flags(&a->flags, AF_DISABLED);
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
esas2r_disable_chip_interrupts(a);
a->int_mask = 0;
esas2r_process_adapter_reset(a);
esas2r_log(ESAS2R_LOG_CRIT,
"Adapter disabled because of hardware failure");
} else {
u32 flags =
esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
if (!(flags & AF_CHPRST_STARTED))
/*
* Only disable interrupts if this is
* the first reset attempt.
*/
esas2r_disable_chip_interrupts(a);
if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
!(flags & AF_CHPRST_STARTED)) {
/*
* Don't reset the chip on the first
* deferred power up attempt.
*/
} else {
esas2r_hdebug("*** resetting chip ***");
esas2r_reset_chip(a);
}
/* Kick off the reinitialization */
a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
a->chip_init_time = jiffies_to_msecs(jiffies);
if (!(a->flags & AF_POWER_MGT)) {
esas2r_process_adapter_reset(a);
if (!(flags & AF_CHPRST_STARTED)) {
/* Remove devices now that I/O is cleaned up. */
a->prev_dev_cnt =
esas2r_targ_db_get_tgt_cnt(a);
esas2r_targ_db_remove_all(a, false);
}
}
a->int_mask = 0;
}
}
static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
{
while (a->flags & AF_CHPRST_DETECTED) {
/*
* Balance the enable in esas2r_initadapter_hw.
* Esas2r_power_down already took care of it for power
* management.
*/
if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
AF_POWER_MGT))
esas2r_disable_chip_interrupts(a);
/* Reinitialize the chip. */
esas2r_check_adapter(a);
esas2r_init_adapter_hw(a, 0);
if (a->flags & AF_CHPRST_NEEDED)
break;
if (a->flags & AF_POWER_MGT) {
/* Recovery from power management. */
if (a->flags & AF_FIRST_INIT) {
/* Chip reset during normal power up */
esas2r_log(ESAS2R_LOG_CRIT,
"The firmware was reset during a normal power-up sequence");
} else {
/* Deferred power up complete. */
esas2r_lock_clear_flags(&a->flags,
AF_POWER_MGT);
esas2r_send_reset_ae(a, true);
}
} else {
/* Recovery from online chip reset. */
if (a->flags & AF_FIRST_INIT) {
/* Chip reset during driver load */
} else {
/* Chip reset after driver load */
esas2r_send_reset_ae(a, false);
}
esas2r_log(ESAS2R_LOG_CRIT,
"Recovering from a chip reset while the chip was online");
}
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
esas2r_enable_chip_interrupts(a);
/*
* Clear this flag last! this indicates that the chip has been
* reset already during initialization.
*/
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
}
}
/* Perform deferred tasks when chip interrupts are disabled */
void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
{
if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
if (a->flags & AF_CHPRST_NEEDED)
esas2r_chip_rst_needed_during_tasklet(a);
esas2r_handle_chip_rst_during_tasklet(a);
}
if (a->flags & AF_BUSRST_NEEDED) {
esas2r_hdebug("hard resetting bus");
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
if (a->flags & AF_FLASHING)
esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
else
esas2r_write_register_dword(a, MU_DOORBELL_IN,
DRBL_RESET_BUS);
}
if (a->flags & AF_BUSRST_DETECTED) {
esas2r_process_bus_reset(a);
esas2r_log_dev(ESAS2R_LOG_WARN,
&(a->host->shost_gendev),
"scsi_report_bus_reset() called");
scsi_report_bus_reset(a->host, 0);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
}
if (a->flags & AF_PORT_CHANGE) {
esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
esas2r_targ_db_report_changes(a);
}
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
}
static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
{
if (!(doorbell & DRBL_FORCE_INT)) {
esas2r_trace_enter();
esas2r_trace("doorbell: %x", doorbell);
}
/* First clear the doorbell bits */
esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
if (doorbell & DRBL_RESET_BUS)
esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
if (doorbell & DRBL_FORCE_INT)
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
if (doorbell & DRBL_PANIC_REASON_MASK) {
esas2r_hdebug("*** Firmware Panic ***");
esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
}
if (doorbell & DRBL_FW_RESET) {
esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
esas2r_local_reset_adapter(a);
}
if (!(doorbell & DRBL_FORCE_INT))
esas2r_trace_exit();
}
void esas2r_force_interrupt(struct esas2r_adapter *a)
{
esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
DRBL_DRV_VER);
}
static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
u16 target, u32 length)
{
struct esas2r_target *t = a->targetdb + target;
u32 cplen = length;
unsigned long flags;
if (cplen > sizeof(t->lu_event))
cplen = sizeof(t->lu_event);
esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
spin_lock_irqsave(&a->mem_lock, flags);
t->new_target_state = TS_INVALID;
if (ae->lu.dwevent & VDAAE_LU_LOST) {
t->new_target_state = TS_NOT_PRESENT;
} else {
switch (ae->lu.bystate) {
case VDAAE_LU_NOT_PRESENT:
case VDAAE_LU_OFFLINE:
case VDAAE_LU_DELETED:
case VDAAE_LU_FACTORY_DISABLED:
t->new_target_state = TS_NOT_PRESENT;
break;
case VDAAE_LU_ONLINE:
case VDAAE_LU_DEGRADED:
t->new_target_state = TS_PRESENT;
break;
}
}
if (t->new_target_state != TS_INVALID) {
memcpy(&t->lu_event, &ae->lu, cplen);
esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
}
spin_unlock_irqrestore(&a->mem_lock, flags);
}
void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
{
union atto_vda_ae *ae =
(union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
union atto_vda_ae *last =
(union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
+ length);
esas2r_trace_enter();
esas2r_trace("length: %d", length);
if (length > sizeof(struct atto_vda_ae_data)
|| (length & 3) != 0
|| length == 0) {
esas2r_log(ESAS2R_LOG_WARN,
"The AE request response length (%p) is too long: %d",
rq, length);
esas2r_hdebug("aereq->length (0x%x) too long", length);
esas2r_bugon();
last = ae;
}
while (ae < last) {
u16 target;
esas2r_trace("ae: %p", ae);
esas2r_trace("ae->hdr: %p", &(ae->hdr));
length = ae->hdr.bylength;
if (length > (u32)((u8 *)last - (u8 *)ae)
|| (length & 3) != 0
|| length == 0) {
esas2r_log(ESAS2R_LOG_CRIT,
"the async event length is invalid (%p): %d",
ae, length);
esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
esas2r_bugon();
break;
}
esas2r_nuxi_ae_data(ae);
esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
sizeof(union atto_vda_ae));
switch (ae->hdr.bytype) {
case VDAAE_HDR_TYPE_RAID:
if (ae->raid.dwflags & (VDAAE_GROUP_STATE
| VDAAE_RBLD_STATE
| VDAAE_MEMBER_CHG
| VDAAE_PART_CHG)) {
esas2r_log(ESAS2R_LOG_INFO,
"RAID event received - name:%s rebuild_state:%d group_state:%d",
ae->raid.acname,
ae->raid.byrebuild_state,
ae->raid.bygroup_state);
}
break;
case VDAAE_HDR_TYPE_LU:
esas2r_log(ESAS2R_LOG_INFO,
"LUN event received: event:%d target_id:%d LUN:%d state:%d",
ae->lu.dwevent,
ae->lu.id.tgtlun.wtarget_id,
ae->lu.id.tgtlun.bylun,
ae->lu.bystate);
target = ae->lu.id.tgtlun.wtarget_id;
if (target < ESAS2R_MAX_TARGETS)
esas2r_lun_event(a, ae, target, length);
break;
case VDAAE_HDR_TYPE_DISK:
esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
break;
default:
/* Silently ignore the rest and let the apps deal with
* them.
*/
break;
}
ae = (union atto_vda_ae *)((u8 *)ae + length);
}
/* Now requeue it. */
esas2r_start_ae_request(a, rq);
esas2r_trace_exit();
}
/* Send an asynchronous event for a chip reset or power management. */
void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
{
struct atto_vda_ae_hdr ae;
if (pwr_mgt)
ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
else
ae.bytype = VDAAE_HDR_TYPE_RESET;
ae.byversion = VDAAE_HDR_VER_0;
ae.byflags = 0;
ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
if (pwr_mgt)
esas2r_hdebug("*** sending power management AE ***");
else
esas2r_hdebug("*** sending reset AE ***");
esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
sizeof(union atto_vda_ae));
}
void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
{}
static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
u8 snslen, snslen2;
snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
if (snslen > rq->sense_len)
snslen = rq->sense_len;
if (snslen) {
if (rq->sense_buf)
memcpy(rq->sense_buf, rq->data_buf, snslen);
else
rq->sense_buf = (u8 *)rq->data_buf;
/* See about possible sense data */
if (snslen2 > 0x0c) {
u8 *s = (u8 *)rq->data_buf;
esas2r_trace_enter();
/* Report LUNS data has changed */
if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
esas2r_trace("rq->target_id: %d",
rq->target_id);
esas2r_target_state_changed(a, rq->target_id,
TS_LUN_CHANGE);
}
esas2r_trace("add_sense_key=%x", s[0x0c]);
esas2r_trace("add_sense_qual=%x", s[0x0d]);
esas2r_trace_exit();
}
}
rq->sense_len = snslen;
}
void esas2r_complete_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
if (rq->vrq->scsi.function == VDA_FUNC_FLASH
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
/* See if we setup a callback to do special processing */
if (rq->interrupt_cb) {
(*rq->interrupt_cb)(a, rq);
if (rq->req_stat == RS_PENDING) {
esas2r_start_request(a, rq);
return;
}
}
if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
&& unlikely(rq->req_stat != RS_SUCCESS)) {
esas2r_check_req_rsp_sense(a, rq);
esas2r_log_request_failure(a, rq);
}
(*rq->comp_cb)(a, rq);
}
/*
* linux/drivers/scsi/esas2r/esas2r_io.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
{
struct esas2r_target *t = NULL;
struct esas2r_request *startrq = rq;
unsigned long flags;
if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
rq->req_stat = RS_SEL2;
else
rq->req_stat = RS_DEGRADED;
} else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
t = a->targetdb + rq->target_id;
if (unlikely(t >= a->targetdb_end
|| !(t->flags & TF_USED))) {
rq->req_stat = RS_SEL;
} else {
/* copy in the target ID. */
rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
/*
* Test if we want to report RS_SEL for missing target.
* Note that if AF_DISC_PENDING is set than this will
* go on the defer queue.
*/
if (unlikely(t->target_state != TS_PRESENT
&& !(a->flags & AF_DISC_PENDING)))
rq->req_stat = RS_SEL;
}
}
if (unlikely(rq->req_stat != RS_PENDING)) {
esas2r_complete_request(a, rq);
return;
}
esas2r_trace("rq=%p", rq);
esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
esas2r_trace("rq->target_id=%d", rq->target_id);
esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
}
spin_lock_irqsave(&a->queue_lock, flags);
if (likely(list_empty(&a->defer_list) &&
!(a->flags &
(AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
esas2r_local_start_request(a, startrq);
else
list_add_tail(&startrq->req_list, &a->defer_list);
spin_unlock_irqrestore(&a->queue_lock, flags);
}
/*
* Starts the specified request. all requests have RS_PENDING set when this
* routine is called. The caller is usually esas2r_start_request, but
* esas2r_do_deferred_processes will start request that are deferred.
*
* The caller must ensure that requests can be started.
*
* esas2r_start_request will defer a request if there are already requests
* waiting or there is a chip reset pending. once the reset condition clears,
* esas2r_do_deferred_processes will call this function to start the request.
*
* When a request is started, it is placed on the active list and queued to
* the controller.
*/
void esas2r_local_start_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
esas2r_trace_enter();
esas2r_trace("rq=%p", rq);
esas2r_trace("rq->vrq:%p", rq->vrq);
esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
esas2r_lock_set_flags(&a->flags, AF_FLASHING);
list_add_tail(&rq->req_list, &a->active_list);
esas2r_start_vda_request(a, rq);
esas2r_trace_exit();
return;
}
void esas2r_start_vda_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct esas2r_inbound_list_source_entry *element;
u32 dw;
rq->req_stat = RS_STARTED;
/*
* Calculate the inbound list entry location and the current state of
* toggle bit.
*/
a->last_write++;
if (a->last_write >= a->list_size) {
a->last_write = 0;
/* update the toggle bit */
if (a->flags & AF_COMM_LIST_TOGGLE)
esas2r_lock_clear_flags(&a->flags,
AF_COMM_LIST_TOGGLE);
else
esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
}
element =
(struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
virt_addr
+ a->last_write;
/* Set the VDA request size if it was never modified */
if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
element->address = cpu_to_le64(rq->vrq_md->phys_addr);
element->length = cpu_to_le32(rq->vda_req_sz);
/* Update the write pointer */
dw = a->last_write;
if (a->flags & AF_COMM_LIST_TOGGLE)
dw |= MU_ILW_TOGGLE;
esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
esas2r_trace("dw:%x", dw);
esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
}
/*
* Build the scatter/gather list for an I/O request according to the
* specifications placed in the s/g context. The caller must initialize
* context prior to the initial call by calling esas2r_sgc_init().
*/
bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
struct esas2r_sg_context *sgc)
{
struct esas2r_request *rq = sgc->first_req;
union atto_vda_req *vrq = rq->vrq;
while (sgc->length) {
u32 rem = 0;
u64 addr;
u32 len;
len = (*sgc->get_phys_addr)(sgc, &addr);
if (unlikely(len == 0))
return false;
/* if current length is more than what's left, stop there */
if (unlikely(len > sgc->length))
len = sgc->length;
another_entry:
/* limit to a round number less than the maximum length */
if (len > SGE_LEN_MAX) {
/*
* Save the remainder of the split. Whenever we limit
* an entry we come back around to build entries out
* of the leftover. We do this to prevent multiple
* calls to the get_phys_addr() function for an SGE
* that is too large.
*/
rem = len - SGE_LEN_MAX;
len = SGE_LEN_MAX;
}
/* See if we need to allocate a new SGL */
if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
u8 sgelen;
struct esas2r_mem_desc *sgl;
/*
* If no SGls are available, return failure. The
* caller can call us later with the current context
* to pick up here.
*/
sgl = esas2r_alloc_sgl(a);
if (unlikely(sgl == NULL))
return false;
/* Calculate the length of the last SGE filled in */
sgelen = (u8)((u8 *)sgc->sge.a64.curr
- (u8 *)sgc->sge.a64.last);
/*
* Copy the last SGE filled in to the first entry of
* the new SGL to make room for the chain entry.
*/
memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
/* Figure out the new curr pointer in the new segment */
sgc->sge.a64.curr =
(struct atto_vda_sge *)((u8 *)sgl->virt_addr +
sgelen);
/* Set the limit pointer and build the chain entry */
sgc->sge.a64.limit =
(struct atto_vda_sge *)((u8 *)sgl->virt_addr
+ sgl_page_size
- sizeof(struct
atto_vda_sge));
sgc->sge.a64.last->length = cpu_to_le32(
SGE_CHAIN | SGE_ADDR_64);
sgc->sge.a64.last->address =
cpu_to_le64(sgl->phys_addr);
/*
* Now, if there was a previous chain entry, then
* update it to contain the length of this segment
* and size of this chain. otherwise this is the
* first SGL, so set the chain_offset in the request.
*/
if (sgc->sge.a64.chain) {
sgc->sge.a64.chain->length |=
cpu_to_le32(
((u8 *)(sgc->sge.a64.
last + 1)
- (u8 *)rq->sg_table->
virt_addr)
+ sizeof(struct atto_vda_sge) *
LOBIT(SGE_CHAIN_SZ));
} else {
vrq->scsi.chain_offset = (u8)
((u8 *)sgc->
sge.a64.last -
(u8 *)vrq);
/*
* This is the first SGL, so set the
* chain_offset and the VDA request size in
* the request.
*/
rq->vda_req_sz =
(vrq->scsi.chain_offset +
sizeof(struct atto_vda_sge) +
3)
/ sizeof(u32);
}
/*
* Remember this so when we get a new SGL filled in we
* can update the length of this chain entry.
*/
sgc->sge.a64.chain = sgc->sge.a64.last;
/* Now link the new SGL onto the primary request. */
list_add(&sgl->next_desc, &rq->sg_table_head);
}
/* Update last one filled in */
sgc->sge.a64.last = sgc->sge.a64.curr;
/* Build the new SGE and update the S/G context */
sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
sgc->sge.a64.curr->address = cpu_to_le32(addr);
sgc->sge.a64.curr++;
sgc->cur_offset += len;
sgc->length -= len;
/*
* Check if we previously split an entry. If so we have to
* pick up where we left off.
*/
if (rem) {
addr += len;
len = rem;
rem = 0;
goto another_entry;
}
}
/* Mark the end of the SGL */
sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
/*
* If there was a previous chain entry, update the length to indicate
* the length of this last segment.
*/
if (sgc->sge.a64.chain) {
sgc->sge.a64.chain->length |= cpu_to_le32(
((u8 *)(sgc->sge.a64.curr) -
(u8 *)rq->sg_table->virt_addr));
} else {
u16 reqsize;
/*
* The entire VDA request was not used so lets
* set the size of the VDA request to be DMA'd
*/
reqsize =
((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
+ sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
/*
* Only update the request size if it is bigger than what is
* already there. We can come in here twice for some management
* commands.
*/
if (reqsize > rq->vda_req_sz)
rq->vda_req_sz = reqsize;
}
return true;
}
/*
* Create PRD list for each I-block consumed by the command. This routine
* determines how much data is required from each I-block being consumed
* by the command. The first and last I-blocks can be partials and all of
* the I-blocks in between are for a full I-block of data.
*
* The interleave size is used to determine the number of bytes in the 1st
* I-block and the remaining I-blocks are what remeains.
*/
static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
struct esas2r_sg_context *sgc)
{
struct esas2r_request *rq = sgc->first_req;
u64 addr;
u32 len;
struct esas2r_mem_desc *sgl;
u32 numchain = 1;
u32 rem = 0;
while (sgc->length) {
/* Get the next address/length pair */
len = (*sgc->get_phys_addr)(sgc, &addr);
if (unlikely(len == 0))
return false;
/* If current length is more than what's left, stop there */
if (unlikely(len > sgc->length))
len = sgc->length;
another_entry:
/* Limit to a round number less than the maximum length */
if (len > PRD_LEN_MAX) {
/*
* Save the remainder of the split. whenever we limit
* an entry we come back around to build entries out
* of the leftover. We do this to prevent multiple
* calls to the get_phys_addr() function for an SGE
* that is too large.
*/
rem = len - PRD_LEN_MAX;
len = PRD_LEN_MAX;
}
/* See if we need to allocate a new SGL */
if (sgc->sge.prd.sge_cnt == 0) {
if (len == sgc->length) {
/*
* We only have 1 PRD entry left.
* It can be placed where the chain
* entry would have gone
*/
/* Build the simple SGE */
sgc->sge.prd.curr->ctl_len = cpu_to_le32(
PRD_DATA | len);
sgc->sge.prd.curr->address = cpu_to_le64(addr);
/* Adjust length related fields */
sgc->cur_offset += len;
sgc->length -= len;
/* We use the reserved chain entry for data */
numchain = 0;
break;
}
if (sgc->sge.prd.chain) {
/*
* Fill # of entries of current SGL in previous
* chain the length of this current SGL may not
* full.
*/
sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
sgc->sge.prd.sgl_max_cnt);
}
/*
* If no SGls are available, return failure. The
* caller can call us later with the current context
* to pick up here.
*/
sgl = esas2r_alloc_sgl(a);
if (unlikely(sgl == NULL))
return false;
/*
* Link the new SGL onto the chain
* They are in reverse order
*/
list_add(&sgl->next_desc, &rq->sg_table_head);
/*
* An SGL was just filled in and we are starting
* a new SGL. Prime the chain of the ending SGL with
* info that points to the new SGL. The length gets
* filled in when the new SGL is filled or ended
*/
sgc->sge.prd.chain = sgc->sge.prd.curr;
sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
sgc->sge.prd.chain->address =
cpu_to_le64(sgl->phys_addr);
/*
* Start a new segment.
* Take one away and save for chain SGE
*/
sgc->sge.prd.curr =
(struct atto_physical_region_description *)sgl
->
virt_addr;
sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
}
sgc->sge.prd.sge_cnt--;
/* Build the simple SGE */
sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
sgc->sge.prd.curr->address = cpu_to_le64(addr);
/* Used another element. Point to the next one */
sgc->sge.prd.curr++;
/* Adjust length related fields */
sgc->cur_offset += len;
sgc->length -= len;
/*
* Check if we previously split an entry. If so we have to
* pick up where we left off.
*/
if (rem) {
addr += len;
len = rem;
rem = 0;
goto another_entry;
}
}
if (!list_empty(&rq->sg_table_head)) {
if (sgc->sge.prd.chain) {
sgc->sge.prd.chain->ctl_len |=
cpu_to_le32(sgc->sge.prd.sgl_max_cnt
- sgc->sge.prd.sge_cnt
- numchain);
}
}
return true;
}
bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
struct esas2r_sg_context *sgc)
{
struct esas2r_request *rq = sgc->first_req;
u32 len = sgc->length;
struct esas2r_target *t = a->targetdb + rq->target_id;
u8 is_i_o = 0;
u16 reqsize;
struct atto_physical_region_description *curr_iblk_chn;
u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
/*
* extract LBA from command so we can determine
* the I-Block boundary
*/
if (rq->vrq->scsi.function == VDA_FUNC_SCSI
&& t->target_state == TS_PRESENT
&& !(t->flags & TF_PASS_THRU)) {
u32 lbalo = 0;
switch (rq->vrq->scsi.cdb[0]) {
case READ_16:
case WRITE_16:
{
lbalo =
MAKEDWORD(MAKEWORD(cdb[9],
cdb[8]),
MAKEWORD(cdb[7],
cdb[6]));
is_i_o = 1;
break;
}
case READ_12:
case WRITE_12:
case READ_10:
case WRITE_10:
{
lbalo =
MAKEDWORD(MAKEWORD(cdb[5],
cdb[4]),
MAKEWORD(cdb[3],
cdb[2]));
is_i_o = 1;
break;
}
case READ_6:
case WRITE_6:
{
lbalo =
MAKEDWORD(MAKEWORD(cdb[3],
cdb[2]),
MAKEWORD(cdb[1] & 0x1F,
0));
is_i_o = 1;
break;
}
default:
break;
}
if (is_i_o) {
u32 startlba;
rq->vrq->scsi.iblk_cnt_prd = 0;
/* Determine size of 1st I-block PRD list */
startlba = t->inter_block - (lbalo & (t->inter_block -
1));
sgc->length = startlba * t->block_size;
/* Chk if the 1st iblk chain starts at base of Iblock */
if ((lbalo & (t->inter_block - 1)) == 0)
rq->flags |= RF_1ST_IBLK_BASE;
if (sgc->length > len)
sgc->length = len;
} else {
sgc->length = len;
}
} else {
sgc->length = len;
}
/* get our starting chain address */
curr_iblk_chn =
(struct atto_physical_region_description *)sgc->sge.a64.curr;
sgc->sge.prd.sgl_max_cnt = sgl_page_size /
sizeof(struct
atto_physical_region_description);
/* create all of the I-block PRD lists */
while (len) {
sgc->sge.prd.sge_cnt = 0;
sgc->sge.prd.chain = NULL;
sgc->sge.prd.curr = curr_iblk_chn;
/* increment to next I-Block */
len -= sgc->length;
/* go build the next I-Block PRD list */
if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
return false;
curr_iblk_chn++;
if (is_i_o) {
rq->vrq->scsi.iblk_cnt_prd++;
if (len > t->inter_byte)
sgc->length = t->inter_byte;
else
sgc->length = len;
}
}
/* figure out the size used of the VDA request */
reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
/ sizeof(u32);
/*
* only update the request size if it is bigger than what is
* already there. we can come in here twice for some management
* commands.
*/
if (reqsize > rq->vda_req_sz)
rq->vda_req_sz = reqsize;
return true;
}
static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
{
u32 delta = currtime - a->chip_init_time;
if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
/* Wait before accessing registers */
} else if (delta >= ESAS2R_CHPRST_TIME) {
/*
* The last reset failed so try again. Reset
* processing will give up after three tries.
*/
esas2r_local_reset_adapter(a);
} else {
/* We can now see if the firmware is ready */
u32 doorbell;
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
esas2r_force_interrupt(a);
} else {
u32 ver = (doorbell & DRBL_FW_VER_MSK);
/* Driver supports API version 0 and 1 */
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
if (ver == DRBL_FW_VER_0) {
esas2r_lock_set_flags(&a->flags,
AF_CHPRST_DETECTED);
esas2r_lock_set_flags(&a->flags,
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge;
} else if (ver == DRBL_FW_VER_1) {
esas2r_lock_set_flags(&a->flags,
AF_CHPRST_DETECTED);
esas2r_lock_clear_flags(&a->flags,
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 1024;
a->build_sgl = esas2r_build_sg_list_prd;
} else {
esas2r_local_reset_adapter(a);
}
}
}
}
/* This function must be called once per timer tick */
void esas2r_timer_tick(struct esas2r_adapter *a)
{
u32 currtime = jiffies_to_msecs(jiffies);
u32 deltatime = currtime - a->last_tick_time;
a->last_tick_time = currtime;
/* count down the uptime */
if (a->chip_uptime
&& !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
if (deltatime >= a->chip_uptime)
a->chip_uptime = 0;
else
a->chip_uptime -= deltatime;
}
if (a->flags & AF_CHPRST_PENDING) {
if (!(a->flags & AF_CHPRST_NEEDED)
&& !(a->flags & AF_CHPRST_DETECTED))
esas2r_handle_pending_reset(a, currtime);
} else {
if (a->flags & AF_DISC_PENDING)
esas2r_disc_check_complete(a);
if (a->flags & AF_HEARTBEAT_ENB) {
if (a->flags & AF_HEARTBEAT) {
if ((currtime - a->heartbeat_time) >=
ESAS2R_HEARTBEAT_TIME) {
esas2r_lock_clear_flags(&a->flags,
AF_HEARTBEAT);
esas2r_hdebug("heartbeat failed");
esas2r_log(ESAS2R_LOG_CRIT,
"heartbeat failed");
esas2r_bugon();
esas2r_local_reset_adapter(a);
}
} else {
esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
a->heartbeat_time = currtime;
esas2r_force_interrupt(a);
}
}
}
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
}
/*
* Send the specified task management function to the target and LUN
* specified in rqaux. in addition, immediately abort any commands that
* are queued but not sent to the device according to the rules specified
* by the task management function.
*/
bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
struct esas2r_request *rqaux, u8 task_mgt_func)
{
u16 targetid = rqaux->target_id;
u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
bool ret = false;
struct esas2r_request *rq;
struct list_head *next, *element;
unsigned long flags;
LIST_HEAD(comp_list);
esas2r_trace_enter();
esas2r_trace("rqaux:%p", rqaux);
esas2r_trace("task_mgt_func:%x", task_mgt_func);
spin_lock_irqsave(&a->queue_lock, flags);
/* search the defer queue looking for requests for the device */
list_for_each_safe(element, next, &a->defer_list) {
rq = list_entry(element, struct esas2r_request, req_list);
if (rq->vrq->scsi.function == VDA_FUNC_SCSI
&& rq->target_id == targetid
&& (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
|| task_mgt_func == 0x20)) { /* target reset */
/* Found a request affected by the task management */
if (rq->req_stat == RS_PENDING) {
/*
* The request is pending or waiting. We can
* safelycomplete the request now.
*/
if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
list_add_tail(&rq->comp_list,
&comp_list);
}
}
}
/* Send the task management request to the firmware */
rqaux->sense_len = 0;
rqaux->vrq->scsi.length = 0;
rqaux->target_id = targetid;
rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
rqaux->vrq->scsi.flags |=
cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
if (a->flags & AF_FLASHING) {
/* Assume success. if there are active requests, return busy */
rqaux->req_stat = RS_SUCCESS;
list_for_each_safe(element, next, &a->active_list) {
rq = list_entry(element, struct esas2r_request,
req_list);
if (rq->vrq->scsi.function == VDA_FUNC_SCSI
&& rq->target_id == targetid
&& (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
|| task_mgt_func == 0x20)) /* target reset */
rqaux->req_stat = RS_BUSY;
}
ret = true;
}
spin_unlock_irqrestore(&a->queue_lock, flags);
if (!(a->flags & AF_FLASHING))
esas2r_start_request(a, rqaux);
esas2r_comp_list_drain(a, &comp_list);
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
esas2r_trace_exit();
return ret;
}
void esas2r_reset_bus(struct esas2r_adapter *a)
{
esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
if (!(a->flags & AF_DEGRADED_MODE)
&& !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
esas2r_schedule_tasklet(a);
}
}
bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
u8 status)
{
esas2r_trace_enter();
esas2r_trace("rq:%p", rq);
list_del_init(&rq->req_list);
if (rq->timeout > RQ_MAX_TIMEOUT) {
/*
* The request timed out, but we could not abort it because a
* chip reset occurred. Return busy status.
*/
rq->req_stat = RS_BUSY;
esas2r_trace_exit();
return true;
}
rq->req_stat = status;
esas2r_trace_exit();
return true;
}
/*
* linux/drivers/scsi/esas2r/esas2r_ioctl.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
/*
* Buffered ioctl handlers. A buffered ioctl is one which requires that we
* allocate a DMA-able memory area to communicate with the firmware. In
* order to prevent continually allocating and freeing consistent memory,
* we will allocate a global buffer the first time we need it and re-use
* it for subsequent ioctl calls that require it.
*/
u8 *esas2r_buffered_ioctl;
dma_addr_t esas2r_buffered_ioctl_addr;
u32 esas2r_buffered_ioctl_size;
struct pci_dev *esas2r_buffered_ioctl_pcid;
static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
struct esas2r_request *,
struct esas2r_sg_context *,
void *);
typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
struct esas2r_request *, void *);
struct esas2r_buffered_ioctl {
struct esas2r_adapter *a;
void *ioctl;
u32 length;
u32 control_code;
u32 offset;
BUFFERED_IOCTL_CALLBACK
callback;
void *context;
BUFFERED_IOCTL_DONE_CALLBACK
done_callback;
void *done_context;
};
static void complete_fm_api_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
a->fm_api_command_done = 1;
wake_up_interruptible(&a->fm_api_waiter);
}
/* Callbacks for building scatter/gather lists for FM API requests */
static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
int offset = sgc->cur_offset - a->save_offset;
(*addr) = a->firmware.phys + offset;
return a->firmware.orig_len - offset;
}
static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
int offset = sgc->cur_offset - a->save_offset;
(*addr) = a->firmware.header_buff_phys + offset;
return sizeof(struct esas2r_flash_img) - offset;
}
/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
{
struct esas2r_request *rq;
if (down_interruptible(&a->fm_api_semaphore)) {
fi->status = FI_STAT_BUSY;
return;
}
rq = esas2r_alloc_request(a);
if (rq == NULL) {
up(&a->fm_api_semaphore);
fi->status = FI_STAT_BUSY;
return;
}
if (fi == &a->firmware.header) {
a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
(size_t)sizeof(
struct
esas2r_flash_img),
(dma_addr_t *)&a->
firmware.
header_buff_phys,
GFP_KERNEL);
if (a->firmware.header_buff == NULL) {
esas2r_debug("failed to allocate header buffer!");
fi->status = FI_STAT_BUSY;
return;
}
memcpy(a->firmware.header_buff, fi,
sizeof(struct esas2r_flash_img));
a->save_offset = a->firmware.header_buff;
a->fm_api_sgc.get_phys_addr =
(PGETPHYSADDR)get_physaddr_fm_api_header;
} else {
a->save_offset = (u8 *)fi;
a->fm_api_sgc.get_phys_addr =
(PGETPHYSADDR)get_physaddr_fm_api;
}
rq->comp_cb = complete_fm_api_req;
a->fm_api_command_done = 0;
a->fm_api_sgc.cur_offset = a->save_offset;
if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
&a->fm_api_sgc))
goto all_done;
/* Now wait around for it to complete. */
while (!a->fm_api_command_done)
wait_event_interruptible(a->fm_api_waiter,
a->fm_api_command_done);
all_done:
if (fi == &a->firmware.header) {
memcpy(fi, a->firmware.header_buff,
sizeof(struct esas2r_flash_img));
dma_free_coherent(&a->pcid->dev,
(size_t)sizeof(struct esas2r_flash_img),
a->firmware.header_buff,
(dma_addr_t)a->firmware.header_buff_phys);
}
up(&a->fm_api_semaphore);
esas2r_free_request(a, (struct esas2r_request *)rq);
return;
}
static void complete_nvr_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
a->nvram_command_done = 1;
wake_up_interruptible(&a->nvram_waiter);
}
/* Callback for building scatter/gather lists for buffered ioctls */
static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
u64 *addr)
{
int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
(*addr) = esas2r_buffered_ioctl_addr + offset;
return esas2r_buffered_ioctl_size - offset;
}
static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
a->buffered_ioctl_done = 1;
wake_up_interruptible(&a->buffered_ioctl_waiter);
}
static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
{
struct esas2r_adapter *a = bi->a;
struct esas2r_request *rq;
struct esas2r_sg_context sgc;
u8 result = IOCTL_SUCCESS;
if (down_interruptible(&buffered_ioctl_semaphore))
return IOCTL_OUT_OF_RESOURCES;
/* allocate a buffer or use the existing buffer. */
if (esas2r_buffered_ioctl) {
if (esas2r_buffered_ioctl_size < bi->length) {
/* free the too-small buffer and get a new one */
dma_free_coherent(&a->pcid->dev,
(size_t)esas2r_buffered_ioctl_size,
esas2r_buffered_ioctl,
esas2r_buffered_ioctl_addr);
goto allocate_buffer;
}
} else {
allocate_buffer:
esas2r_buffered_ioctl_size = bi->length;
esas2r_buffered_ioctl_pcid = a->pcid;
esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
(size_t)
esas2r_buffered_ioctl_size,
&
esas2r_buffered_ioctl_addr,
GFP_KERNEL);
}
if (!esas2r_buffered_ioctl) {
esas2r_log(ESAS2R_LOG_CRIT,
"could not allocate %d bytes of consistent memory "
"for a buffered ioctl!",
bi->length);
esas2r_debug("buffered ioctl alloc failure");
result = IOCTL_OUT_OF_RESOURCES;
goto exit_cleanly;
}
memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
rq = esas2r_alloc_request(a);
if (rq == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
"could not allocate an internal request");
result = IOCTL_OUT_OF_RESOURCES;
esas2r_debug("buffered ioctl - no requests");
goto exit_cleanly;
}
a->buffered_ioctl_done = 0;
rq->comp_cb = complete_buffered_ioctl_req;
sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
sgc.length = esas2r_buffered_ioctl_size;
if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
/* completed immediately, no need to wait */
a->buffered_ioctl_done = 0;
goto free_andexit_cleanly;
}
/* now wait around for it to complete. */
while (!a->buffered_ioctl_done)
wait_event_interruptible(a->buffered_ioctl_waiter,
a->buffered_ioctl_done);
free_andexit_cleanly:
if (result == IOCTL_SUCCESS && bi->done_callback)
(*bi->done_callback)(a, rq, bi->done_context);
esas2r_free_request(a, rq);
exit_cleanly:
if (result == IOCTL_SUCCESS)
memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
up(&buffered_ioctl_semaphore);
return result;
}
/* SMP ioctl support */
static int smp_ioctl_callback(struct esas2r_adapter *a,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc, void *context)
{
struct atto_ioctl_smp *si =
(struct atto_ioctl_smp *)esas2r_buffered_ioctl;
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
if (!esas2r_build_sg_list(a, rq, sgc)) {
si->status = ATTO_STS_OUT_OF_RSRC;
return false;
}
esas2r_start_request(a, rq);
return true;
}
static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
{
struct esas2r_buffered_ioctl bi;
memset(&bi, 0, sizeof(bi));
bi.a = a;
bi.ioctl = si;
bi.length = sizeof(struct atto_ioctl_smp)
+ le32_to_cpu(si->req_length)
+ le32_to_cpu(si->rsp_length);
bi.offset = 0;
bi.callback = smp_ioctl_callback;
return handle_buffered_ioctl(&bi);
}
/* CSMI ioctl support */
static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
/* Now call the original completion callback. */
(*rq->aux_req_cb)(a, rq);
}
/* Tunnel a CSMI IOCTL to the back end driver for processing. */
static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
union atto_ioctl_csmi *ci,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc,
u32 ctrl_code,
u16 target_id)
{
struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
if (a->flags & AF_DEGRADED_MODE)
return false;
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
ioctl->csmi.target_id = cpu_to_le16(target_id);
ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
/*
* Always usurp the completion callback since the interrupt callback
* mechanism may be used.
*/
rq->aux_req_cx = ci;
rq->aux_req_cb = rq->comp_cb;
rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
if (!esas2r_build_sg_list(a, rq, sgc))
return false;
esas2r_start_request(a, rq);
return true;
}
static bool check_lun(struct scsi_lun lun)
{
bool result;
result = ((lun.scsi_lun[7] == 0) &&
(lun.scsi_lun[6] == 0) &&
(lun.scsi_lun[5] == 0) &&
(lun.scsi_lun[4] == 0) &&
(lun.scsi_lun[3] == 0) &&
(lun.scsi_lun[2] == 0) &&
/* Byte 1 is intentionally skipped */
(lun.scsi_lun[0] == 0));
return result;
}
static int csmi_ioctl_callback(struct esas2r_adapter *a,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc, void *context)
{
struct atto_csmi *ci = (struct atto_csmi *)context;
union atto_ioctl_csmi *ioctl_csmi =
(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
u8 path = 0;
u8 tid = 0;
u8 lun = 0;
u32 sts = CSMI_STS_SUCCESS;
struct esas2r_target *t;
unsigned long flags;
if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
path = gda->path_id;
tid = gda->target_id;
lun = gda->lun;
} else if (ci->control_code == CSMI_CC_TASK_MGT) {
struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
path = tm->path_id;
tid = tm->target_id;
lun = tm->lun;
}
if (path > 0 || tid > ESAS2R_MAX_ID) {
rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
CSMI_STS_INV_PARAM);
return false;
}
rq->target_id = tid;
rq->vrq->scsi.flags |= cpu_to_le32(lun);
switch (ci->control_code) {
case CSMI_CC_GET_DRVR_INFO:
{
struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
strcpy(gdi->description, esas2r_get_model_name(a));
gdi->csmi_major_rev = CSMI_MAJOR_REV;
gdi->csmi_minor_rev = CSMI_MINOR_REV;
break;
}
case CSMI_CC_GET_CNTLR_CFG:
{
struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
gcc->base_io_addr = 0;
pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
&gcc->base_memaddr_lo);
pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
&gcc->base_memaddr_hi);
gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
a->pcid->subsystem_vendor);
gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
gcc->pci_addr.bus_num = a->pcid->bus->number;
gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
gcc->bios_build_rev = LOWORD(a->flash_ver);
if (a->flags2 & AF2_THUNDERLINK)
gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
| CSMI_CNTLRF_SATA_HBA;
else
gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
| CSMI_CNTLRF_SATA_RAID;
gcc->rrom_major_rev = 0;
gcc->rrom_minor_rev = 0;
gcc->rrom_build_rev = 0;
gcc->rrom_release_rev = 0;
gcc->rrom_biosmajor_rev = 0;
gcc->rrom_biosminor_rev = 0;
gcc->rrom_biosbuild_rev = 0;
gcc->rrom_biosrelease_rev = 0;
break;
}
case CSMI_CC_GET_CNTLR_STS:
{
struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
if (a->flags & AF_DEGRADED_MODE)
gcs->status = CSMI_CNTLR_STS_FAILED;
else
gcs->status = CSMI_CNTLR_STS_GOOD;
gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
break;
}
case CSMI_CC_FW_DOWNLOAD:
case CSMI_CC_GET_RAID_INFO:
case CSMI_CC_GET_RAID_CFG:
sts = CSMI_STS_BAD_CTRL_CODE;
break;
case CSMI_CC_SMP_PASSTHRU:
case CSMI_CC_SSP_PASSTHRU:
case CSMI_CC_STP_PASSTHRU:
case CSMI_CC_GET_PHY_INFO:
case CSMI_CC_SET_PHY_INFO:
case CSMI_CC_GET_LINK_ERRORS:
case CSMI_CC_GET_SATA_SIG:
case CSMI_CC_GET_CONN_INFO:
case CSMI_CC_PHY_CTRL:
if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
ci->control_code,
ESAS2R_TARG_ID_INV)) {
sts = CSMI_STS_FAILED;
break;
}
return true;
case CSMI_CC_GET_SCSI_ADDR:
{
struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
struct scsi_lun lun;
memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
if (!check_lun(lun)) {
sts = CSMI_STS_NO_SCSI_ADDR;
break;
}
/* make sure the device is present */
spin_lock_irqsave(&a->mem_lock, flags);
t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
spin_unlock_irqrestore(&a->mem_lock, flags);
if (t == NULL) {
sts = CSMI_STS_NO_SCSI_ADDR;
break;
}
gsa->host_index = 0xFF;
gsa->lun = gsa->sas_lun[1];
rq->target_id = esas2r_targ_get_id(t, a);
break;
}
case CSMI_CC_GET_DEV_ADDR:
{
struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
/* make sure the target is present */
t = a->targetdb + rq->target_id;
if (t >= a->targetdb_end
|| t->target_state != TS_PRESENT
|| t->sas_addr == 0) {
sts = CSMI_STS_NO_DEV_ADDR;
break;
}
/* fill in the result */
*(u64 *)gda->sas_addr = t->sas_addr;
memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
break;
}
case CSMI_CC_TASK_MGT:
/* make sure the target is present */
t = a->targetdb + rq->target_id;
if (t >= a->targetdb_end
|| t->target_state != TS_PRESENT
|| !(t->flags & TF_PASS_THRU)) {
sts = CSMI_STS_NO_DEV_ADDR;
break;
}
if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
ci->control_code,
t->phys_targ_id)) {
sts = CSMI_STS_FAILED;
break;
}
return true;
default:
sts = CSMI_STS_BAD_CTRL_CODE;
break;
}
rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
return false;
}
static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
struct esas2r_request *rq, void *context)
{
struct atto_csmi *ci = (struct atto_csmi *)context;
union atto_ioctl_csmi *ioctl_csmi =
(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
switch (ci->control_code) {
case CSMI_CC_GET_DRVR_INFO:
{
struct atto_csmi_get_driver_info *gdi =
&ioctl_csmi->drvr_info;
strcpy(gdi->name, ESAS2R_VERSION_STR);
gdi->major_rev = ESAS2R_MAJOR_REV;
gdi->minor_rev = ESAS2R_MINOR_REV;
gdi->build_rev = 0;
gdi->release_rev = 0;
break;
}
case CSMI_CC_GET_SCSI_ADDR:
{
struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
CSMI_STS_SUCCESS) {
gsa->target_id = rq->target_id;
gsa->path_id = 0;
}
break;
}
}
ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
}
static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
{
struct esas2r_buffered_ioctl bi;
memset(&bi, 0, sizeof(bi));
bi.a = a;
bi.ioctl = &ci->data;
bi.length = sizeof(union atto_ioctl_csmi);
bi.offset = 0;
bi.callback = csmi_ioctl_callback;
bi.context = ci;
bi.done_callback = csmi_ioctl_done_callback;
bi.done_context = ci;
return handle_buffered_ioctl(&bi);
}
/* ATTO HBA ioctl support */
/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
struct atto_ioctl *hi,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc)
{
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
if (!esas2r_build_sg_list(a, rq, sgc)) {
hi->status = ATTO_STS_OUT_OF_RSRC;
return false;
}
esas2r_start_request(a, rq);
return true;
}
static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
u8 sts = ATTO_SPT_RS_FAILED;
spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
spt->sense_length = rq->sense_len;
spt->residual_length =
le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
switch (rq->req_stat) {
case RS_SUCCESS:
case RS_SCSI_ERROR:
sts = ATTO_SPT_RS_SUCCESS;
break;
case RS_UNDERRUN:
sts = ATTO_SPT_RS_UNDERRUN;
break;
case RS_OVERRUN:
sts = ATTO_SPT_RS_OVERRUN;
break;
case RS_SEL:
case RS_SEL2:
sts = ATTO_SPT_RS_NO_DEVICE;
break;
case RS_NO_LUN:
sts = ATTO_SPT_RS_NO_LUN;
break;
case RS_TIMEOUT:
sts = ATTO_SPT_RS_TIMEOUT;
break;
case RS_DEGRADED:
sts = ATTO_SPT_RS_DEGRADED;
break;
case RS_BUSY:
sts = ATTO_SPT_RS_BUSY;
break;
case RS_ABORTED:
sts = ATTO_SPT_RS_ABORTED;
break;
case RS_RESET:
sts = ATTO_SPT_RS_BUS_RESET;
break;
}
spt->req_status = sts;
/* Update the target ID to the next one present. */
spt->target_id =
esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
/* Done, call the completion callback. */
(*rq->aux_req_cb)(a, rq);
}
static int hba_ioctl_callback(struct esas2r_adapter *a,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc,
void *context)
{
struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
hi->status = ATTO_STS_SUCCESS;
switch (hi->function) {
case ATTO_FUNC_GET_ADAP_INFO:
{
u8 *class_code = (u8 *)&a->pcid->class;
struct atto_hba_get_adapter_info *gai =
&hi->data.get_adap_info;
int pcie_cap_reg;
if (hi->flags & HBAF_TUNNEL) {
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_GET_ADAP_INFO0;
break;
}
memset(gai, 0, sizeof(*gai));
gai->pci.vendor_id = a->pcid->vendor;
gai->pci.device_id = a->pcid->device;
gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
gai->pci.ss_device_id = a->pcid->subsystem_device;
gai->pci.class_code[0] = class_code[0];
gai->pci.class_code[1] = class_code[1];
gai->pci.class_code[2] = class_code[2];
gai->pci.rev_id = a->pcid->revision;
gai->pci.bus_num = a->pcid->bus->number;
gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
if (pcie_cap_reg) {
u16 stat;
u32 caps;
pci_read_config_word(a->pcid,
pcie_cap_reg + PCI_EXP_LNKSTA,
&stat);
pci_read_config_dword(a->pcid,
pcie_cap_reg + PCI_EXP_LNKCAP,
&caps);
gai->pci.link_speed_curr =
(u8)(stat & PCI_EXP_LNKSTA_CLS);
gai->pci.link_speed_max =
(u8)(caps & PCI_EXP_LNKCAP_SLS);
gai->pci.link_width_curr =
(u8)((stat & PCI_EXP_LNKSTA_NLW)
>> PCI_EXP_LNKSTA_NLW_SHIFT);
gai->pci.link_width_max =
(u8)((caps & PCI_EXP_LNKCAP_MLW)
>> 4);
}
gai->pci.msi_vector_cnt = 1;
if (a->pcid->msix_enabled)
gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
else if (a->pcid->msi_enabled)
gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
else
gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
gai->adap_type = ATTO_GAI_AT_ESASRAID2;
if (a->flags2 & AF2_THUNDERLINK)
gai->adap_type = ATTO_GAI_AT_TLSASHBA;
if (a->flags & AF_DEGRADED_MODE)
gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
ATTO_GAI_AF_DEVADDR_SUPP;
if (a->pcid->subsystem_device == ATTO_ESAS_R60F
|| a->pcid->subsystem_device == ATTO_ESAS_R608
|| a->pcid->subsystem_device == ATTO_ESAS_R644
|| a->pcid->subsystem_device == ATTO_TSSC_3808E)
gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
gai->num_ports = ESAS2R_NUM_PHYS;
gai->num_phys = ESAS2R_NUM_PHYS;
strcpy(gai->firmware_rev, a->fw_rev);
strcpy(gai->flash_rev, a->flash_rev);
strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
strcpy(gai->model_name, esas2r_get_model_name(a));
gai->num_targets = ESAS2R_MAX_TARGETS;
gai->num_busses = 1;
gai->num_targsper_bus = gai->num_targets;
gai->num_lunsper_targ = 256;
if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
|| a->pcid->subsystem_device == ATTO_ESAS_R60F)
gai->num_connectors = 4;
else
gai->num_connectors = 2;
gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
gai->num_targets_backend = a->num_targets_backend;
gai->tunnel_flags = a->ioctl_tunnel
& (ATTO_GAI_TF_MEM_RW
| ATTO_GAI_TF_TRACE
| ATTO_GAI_TF_SCSI_PASS_THRU
| ATTO_GAI_TF_GET_DEV_ADDR
| ATTO_GAI_TF_PHY_CTRL
| ATTO_GAI_TF_CONN_CTRL
| ATTO_GAI_TF_GET_DEV_INFO);
break;
}
case ATTO_FUNC_GET_ADAP_ADDR:
{
struct atto_hba_get_adapter_address *gaa =
&hi->data.get_adap_addr;
if (hi->flags & HBAF_TUNNEL) {
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_GET_ADAP_ADDR0;
} else if (gaa->addr_type == ATTO_GAA_AT_PORT
|| gaa->addr_type == ATTO_GAA_AT_NODE) {
if (gaa->addr_type == ATTO_GAA_AT_PORT
&& gaa->port_id >= ESAS2R_NUM_PHYS) {
hi->status = ATTO_STS_NOT_APPL;
} else {
memcpy((u64 *)gaa->address,
&a->nvram->sas_addr[0], sizeof(u64));
gaa->addr_len = sizeof(u64);
}
} else {
hi->status = ATTO_STS_INV_PARAM;
}
break;
}
case ATTO_FUNC_MEM_RW:
{
if (hi->flags & HBAF_TUNNEL) {
if (hba_ioctl_tunnel(a, hi, rq, sgc))
return true;
break;
}
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
case ATTO_FUNC_TRACE:
{
struct atto_hba_trace *trc = &hi->data.trace;
if (hi->flags & HBAF_TUNNEL) {
if (hba_ioctl_tunnel(a, hi, rq, sgc))
return true;
break;
}
if (hi->version > ATTO_VER_TRACE1) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_TRACE1;
break;
}
if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
&& hi->version >= ATTO_VER_TRACE1) {
if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
u32 len = hi->data_length;
u32 offset = trc->current_offset;
u32 total_len = ESAS2R_FWCOREDUMP_SZ;
/* Size is zero if a core dump isn't present */
if (!(a->flags2 & AF2_COREDUMP_SAVED))
total_len = 0;
if (len > total_len)
len = total_len;
if (offset >= total_len
|| offset + len > total_len
|| len == 0) {
hi->status = ATTO_STS_INV_PARAM;
break;
}
memcpy(trc + 1,
a->fw_coredump_buff + offset,
len);
hi->data_length = len;
} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
memset(a->fw_coredump_buff, 0,
ESAS2R_FWCOREDUMP_SZ);
esas2r_lock_clear_flags(&a->flags2,
AF2_COREDUMP_SAVED);
} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
/* Always return all the info we can. */
trc->trace_mask = 0;
trc->current_offset = 0;
trc->total_length = ESAS2R_FWCOREDUMP_SZ;
/* Return zero length buffer if core dump not present */
if (!(a->flags2 & AF2_COREDUMP_SAVED))
trc->total_length = 0;
} else {
hi->status = ATTO_STS_UNSUPPORTED;
}
break;
}
case ATTO_FUNC_SCSI_PASS_THRU:
{
struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
struct scsi_lun lun;
memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
if (hi->flags & HBAF_TUNNEL) {
if (hba_ioctl_tunnel(a, hi, rq, sgc))
return true;
break;
}
if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_SCSI_PASS_THRU0;
break;
}
if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
hi->status = ATTO_STS_INV_PARAM;
break;
}
esas2r_sgc_init(sgc, a, rq, NULL);
sgc->length = hi->data_length;
sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
+ sizeof(struct atto_hba_scsi_pass_thru);
/* Finish request initialization */
rq->target_id = (u16)spt->target_id;
rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
rq->sense_len = spt->sense_length;
rq->sense_buf = (u8 *)spt->sense_data;
/* NOTE: we ignore spt->timeout */
/*
* always usurp the completion callback since the interrupt
* callback mechanism may be used.
*/
rq->aux_req_cx = hi;
rq->aux_req_cb = rq->comp_cb;
rq->comp_cb = scsi_passthru_comp_cb;
if (spt->flags & ATTO_SPTF_DATA_IN) {
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
} else if (spt->flags & ATTO_SPTF_DATA_OUT) {
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
} else {
if (sgc->length) {
hi->status = ATTO_STS_INV_PARAM;
break;
}
}
if (spt->flags & ATTO_SPTF_ORDERED_Q)
rq->vrq->scsi.flags |=
cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
if (!esas2r_build_sg_list(a, rq, sgc)) {
hi->status = ATTO_STS_OUT_OF_RSRC;
break;
}
esas2r_start_request(a, rq);
return true;
}
case ATTO_FUNC_GET_DEV_ADDR:
{
struct atto_hba_get_device_address *gda =
&hi->data.get_dev_addr;
struct esas2r_target *t;
if (hi->flags & HBAF_TUNNEL) {
if (hba_ioctl_tunnel(a, hi, rq, sgc))
return true;
break;
}
if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_GET_DEV_ADDR0;
break;
}
if (gda->target_id >= ESAS2R_MAX_TARGETS) {
hi->status = ATTO_STS_INV_PARAM;
break;
}
t = a->targetdb + (u16)gda->target_id;
if (t->target_state != TS_PRESENT) {
hi->status = ATTO_STS_FAILED;
} else if (gda->addr_type == ATTO_GDA_AT_PORT) {
if (t->sas_addr == 0) {
hi->status = ATTO_STS_UNSUPPORTED;
} else {
*(u64 *)gda->address = t->sas_addr;
gda->addr_len = sizeof(u64);
}
} else if (gda->addr_type == ATTO_GDA_AT_NODE) {
hi->status = ATTO_STS_NOT_APPL;
} else {
hi->status = ATTO_STS_INV_PARAM;
}
/* update the target ID to the next one present. */
gda->target_id =
esas2r_targ_db_find_next_present(a,
(u16)gda->target_id);
break;
}
case ATTO_FUNC_PHY_CTRL:
case ATTO_FUNC_CONN_CTRL:
{
if (hba_ioctl_tunnel(a, hi, rq, sgc))
return true;
break;
}
case ATTO_FUNC_ADAP_CTRL:
{
struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
if (hi->flags & HBAF_TUNNEL) {
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
if (hi->version > ATTO_VER_ADAP_CTRL0) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_ADAP_CTRL0;
break;
}
if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
esas2r_reset_adapter(a);
} else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
if (a->flags & AF_CHPRST_NEEDED)
ac->adap_state = ATTO_AC_AS_RST_SCHED;
else if (a->flags & AF_CHPRST_PENDING)
ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
else if (a->flags & AF_DISC_PENDING)
ac->adap_state = ATTO_AC_AS_RST_DISC;
else if (a->flags & AF_DISABLED)
ac->adap_state = ATTO_AC_AS_DISABLED;
else if (a->flags & AF_DEGRADED_MODE)
ac->adap_state = ATTO_AC_AS_DEGRADED;
else
ac->adap_state = ATTO_AC_AS_OK;
break;
}
case ATTO_FUNC_GET_DEV_INFO:
{
struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
struct esas2r_target *t;
if (hi->flags & HBAF_TUNNEL) {
if (hba_ioctl_tunnel(a, hi, rq, sgc))
return true;
break;
}
if (hi->version > ATTO_VER_GET_DEV_INFO0) {
hi->status = ATTO_STS_INV_VERSION;
hi->version = ATTO_VER_GET_DEV_INFO0;
break;
}
if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
hi->status = ATTO_STS_INV_PARAM;
break;
}
t = a->targetdb + (u16)gdi->target_id;
/* update the target ID to the next one present. */
gdi->target_id =
esas2r_targ_db_find_next_present(a,
(u16)gdi->target_id);
if (t->target_state != TS_PRESENT) {
hi->status = ATTO_STS_FAILED;
break;
}
hi->status = ATTO_STS_UNSUPPORTED;
break;
}
default:
hi->status = ATTO_STS_INV_FUNC;
break;
}
return false;
}
static void hba_ioctl_done_callback(struct esas2r_adapter *a,
struct esas2r_request *rq, void *context)
{
struct atto_ioctl *ioctl_hba =
(struct atto_ioctl *)esas2r_buffered_ioctl;
esas2r_debug("hba_ioctl_done_callback %d", a->index);
if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
struct atto_hba_get_adapter_info *gai =
&ioctl_hba->data.get_adap_info;
esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
gai->drvr_rev_major = ESAS2R_MAJOR_REV;
gai->drvr_rev_minor = ESAS2R_MINOR_REV;
strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
gai->num_busses = 1;
gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
gai->num_lunsper_targ = 1;
}
}
u8 handle_hba_ioctl(struct esas2r_adapter *a,
struct atto_ioctl *ioctl_hba)
{
struct esas2r_buffered_ioctl bi;
memset(&bi, 0, sizeof(bi));
bi.a = a;
bi.ioctl = ioctl_hba;
bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
bi.callback = hba_ioctl_callback;
bi.context = NULL;
bi.done_callback = hba_ioctl_done_callback;
bi.done_context = NULL;
bi.offset = 0;
return handle_buffered_ioctl(&bi);
}
int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
struct esas2r_sas_nvram *data)
{
int result = 0;
a->nvram_command_done = 0;
rq->comp_cb = complete_nvr_req;
if (esas2r_nvram_write(a, rq, data)) {
/* now wait around for it to complete. */
while (!a->nvram_command_done)
wait_event_interruptible(a->nvram_waiter,
a->nvram_command_done);
;
/* done, check the status. */
if (rq->req_stat == RS_SUCCESS)
result = 1;
}
return result;
}
/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
{
struct atto_express_ioctl *ioctl = NULL;
struct esas2r_adapter *a;
struct esas2r_request *rq;
u16 code;
int err;
esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
if ((arg == NULL)
|| (cmd < EXPRESS_IOCTL_MIN)
|| (cmd > EXPRESS_IOCTL_MAX))
return -ENOTSUPP;
if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler access_ok failed for cmd %d, "
"address %p", cmd,
arg);
return -EFAULT;
}
/* allocate a kernel memory buffer for the IOCTL data */
ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
if (ioctl == NULL) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler kzalloc failed for %d bytes",
sizeof(struct atto_express_ioctl));
return -ENOMEM;
}
err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
if (err != 0) {
esas2r_log(ESAS2R_LOG_WARN,
"copy_from_user didn't copy everything (err %d, cmd %d)",
err,
cmd);
kfree(ioctl);
return -EFAULT;
}
/* verify the signature */
if (memcmp(ioctl->header.signature,
EXPRESS_IOCTL_SIGNATURE,
EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
kfree(ioctl);
return -ENOTSUPP;
}
/* assume success */
ioctl->header.return_code = IOCTL_SUCCESS;
err = 0;
/*
* handle EXPRESS_IOCTL_GET_CHANNELS
* without paying attention to channel
*/
if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
int i = 0, k = 0;
ioctl->data.chanlist.num_channels = 0;
while (i < MAX_ADAPTERS) {
if (esas2r_adapters[i]) {
ioctl->data.chanlist.num_channels++;
ioctl->data.chanlist.channel[k] = i;
k++;
}
i++;
}
goto ioctl_done;
}
/* get the channel */
if (ioctl->header.channel == 0xFF) {
a = (struct esas2r_adapter *)hostdata;
} else {
a = esas2r_adapters[ioctl->header.channel];
if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
ioctl->header.return_code = IOCTL_BAD_CHANNEL;
esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
kfree(ioctl);
return -ENOTSUPP;
}
}
switch (cmd) {
case EXPRESS_IOCTL_RW_FIRMWARE:
if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
err = esas2r_write_fw(a,
(char *)ioctl->data.fwrw.image,
0,
sizeof(struct
atto_express_ioctl));
if (err >= 0) {
err = esas2r_read_fw(a,
(char *)ioctl->data.fwrw.
image,
0,
sizeof(struct
atto_express_ioctl));
}
} else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
err = esas2r_write_fs(a,
(char *)ioctl->data.fwrw.image,
0,
sizeof(struct
atto_express_ioctl));
if (err >= 0) {
err = esas2r_read_fs(a,
(char *)ioctl->data.fwrw.
image,
0,
sizeof(struct
atto_express_ioctl));
}
} else {
ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
}
break;
case EXPRESS_IOCTL_READ_PARAMS:
memcpy(ioctl->data.prw.data_buffer, a->nvram,
sizeof(struct esas2r_sas_nvram));
ioctl->data.prw.code = 1;
break;
case EXPRESS_IOCTL_WRITE_PARAMS:
rq = esas2r_alloc_request(a);
if (rq == NULL) {
up(&a->nvram_semaphore);
ioctl->data.prw.code = 0;
break;
}
code = esas2r_write_params(a, rq,
(struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
ioctl->data.prw.code = code;
esas2r_free_request(a, rq);
break;
case EXPRESS_IOCTL_DEFAULT_PARAMS:
esas2r_nvram_get_defaults(a,
(struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
ioctl->data.prw.code = 1;
break;
case EXPRESS_IOCTL_CHAN_INFO:
ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
ioctl->data.chaninfo.IRQ = a->pcid->irq;
ioctl->data.chaninfo.device_id = a->pcid->device;
ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
ioctl->data.chaninfo.revision_id = a->pcid->revision;
ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
ioctl->data.chaninfo.core_rev = 0;
ioctl->data.chaninfo.host_no = a->host->host_no;
ioctl->data.chaninfo.hbaapi_rev = 0;
break;
case EXPRESS_IOCTL_SMP:
ioctl->header.return_code = handle_smp_ioctl(a,
&ioctl->data.
ioctl_smp);
break;
case EXPRESS_CSMI:
ioctl->header.return_code =
handle_csmi_ioctl(a, &ioctl->data.csmi);
break;
case EXPRESS_IOCTL_HBA:
ioctl->header.return_code = handle_hba_ioctl(a,
&ioctl->data.
ioctl_hba);
break;
case EXPRESS_IOCTL_VDA:
err = esas2r_write_vda(a,
(char *)&ioctl->data.ioctl_vda,
0,
sizeof(struct atto_ioctl_vda) +
ioctl->data.ioctl_vda.data_length);
if (err >= 0) {
err = esas2r_read_vda(a,
(char *)&ioctl->data.ioctl_vda,
0,
sizeof(struct atto_ioctl_vda) +
ioctl->data.ioctl_vda.data_length);
}
break;
case EXPRESS_IOCTL_GET_MOD_INFO:
ioctl->data.modinfo.adapter = a;
ioctl->data.modinfo.pci_dev = a->pcid;
ioctl->data.modinfo.scsi_host = a->host;
ioctl->data.modinfo.host_no = a->host->host_no;
break;
default:
esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
ioctl->header.return_code = IOCTL_ERR_INVCMD;
}
ioctl_done:
if (err < 0) {
esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
cmd);
switch (err) {
case -ENOMEM:
case -EBUSY:
ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
break;
case -ENOSYS:
case -EINVAL:
ioctl->header.return_code = IOCTL_INVALID_PARAM;
break;
}
ioctl->header.return_code = IOCTL_GENERAL_ERROR;
}
/* Always copy the buffer back, if only to pick up the status */
err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
if (err != 0) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler copy_to_user didn't copy "
"everything (err %d, cmd %d)", err,
cmd);
kfree(ioctl);
return -EFAULT;
}
kfree(ioctl);
return 0;
}
int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
{
return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
}
static void free_fw_buffers(struct esas2r_adapter *a)
{
if (a->firmware.data) {
dma_free_coherent(&a->pcid->dev,
(size_t)a->firmware.orig_len,
a->firmware.data,
(dma_addr_t)a->firmware.phys);
a->firmware.data = NULL;
}
}
static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
{
free_fw_buffers(a);
a->firmware.orig_len = length;
a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
(size_t)length,
(dma_addr_t *)&a->firmware.
phys,
GFP_KERNEL);
if (!a->firmware.data) {
esas2r_debug("buffer alloc failed!");
return 0;
}
return 1;
}
/* Handle a call to read firmware. */
int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
{
esas2r_trace_enter();
/* if the cached header is a status, simply copy it over and return. */
if (a->firmware.state == FW_STATUS_ST) {
int size = min_t(int, count, sizeof(a->firmware.header));
esas2r_trace_exit();
memcpy(buf, &a->firmware.header, size);
esas2r_debug("esas2r_read_fw: STATUS size %d", size);
return size;
}
/*
* if the cached header is a command, do it if at
* offset 0, otherwise copy the pieces.
*/
if (a->firmware.state == FW_COMMAND_ST) {
u32 length = a->firmware.header.length;
esas2r_trace_exit();
esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
length,
off);
if (off == 0) {
if (a->firmware.header.action == FI_ACT_UP) {
if (!allocate_fw_buffers(a, length))
return -ENOMEM;
/* copy header over */
memcpy(a->firmware.data,
&a->firmware.header,
sizeof(a->firmware.header));
do_fm_api(a,
(struct esas2r_flash_img *)a->firmware.data);
} else if (a->firmware.header.action == FI_ACT_UPSZ) {
int size =
min((int)count,
(int)sizeof(a->firmware.header));
do_fm_api(a, &a->firmware.header);
memcpy(buf, &a->firmware.header, size);
esas2r_debug("FI_ACT_UPSZ size %d", size);
return size;
} else {
esas2r_debug("invalid action %d",
a->firmware.header.action);
return -ENOSYS;
}
}
if (count + off > length)
count = length - off;
if (count < 0)
return 0;
if (!a->firmware.data) {
esas2r_debug(
"read: nonzero offset but no buffer available!");
return -ENOMEM;
}
esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
count,
length);
memcpy(buf, &a->firmware.data[off], count);
/* when done, release the buffer */
if (length <= off + count) {
esas2r_debug("esas2r_read_fw: freeing buffer!");
free_fw_buffers(a);
}
return count;
}
esas2r_trace_exit();
esas2r_debug("esas2r_read_fw: invalid firmware state %d",
a->firmware.state);
return -EINVAL;
}
/* Handle a call to write firmware. */
int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
int count)
{
u32 length;
if (off == 0) {
struct esas2r_flash_img *header =
(struct esas2r_flash_img *)buf;
/* assume version 0 flash image */
int min_size = sizeof(struct esas2r_flash_img_v0);
a->firmware.state = FW_INVALID_ST;
/* validate the version field first */
if (count < 4
|| header->fi_version > FI_VERSION_1) {
esas2r_debug(
"esas2r_write_fw: short header or invalid version");
return -EINVAL;
}
/* See if its a version 1 flash image */
if (header->fi_version == FI_VERSION_1)
min_size = sizeof(struct esas2r_flash_img);
/* If this is the start, the header must be full and valid. */
if (count < min_size) {
esas2r_debug("esas2r_write_fw: short header, aborting");
return -EINVAL;
}
/* Make sure the size is reasonable. */
length = header->length;
if (length > 1024 * 1024) {
esas2r_debug(
"esas2r_write_fw: hosed, length %d fi_version %d",
length, header->fi_version);
return -EINVAL;
}
/*
* If this is a write command, allocate memory because
* we have to cache everything. otherwise, just cache
* the header, because the read op will do the command.
*/
if (header->action == FI_ACT_DOWN) {
if (!allocate_fw_buffers(a, length))
return -ENOMEM;
/*
* Store the command, so there is context on subsequent
* calls.
*/
memcpy(&a->firmware.header,
buf,
sizeof(*header));
} else if (header->action == FI_ACT_UP
|| header->action == FI_ACT_UPSZ) {
/* Save the command, result will be picked up on read */
memcpy(&a->firmware.header,
buf,
sizeof(*header));
a->firmware.state = FW_COMMAND_ST;
esas2r_debug(
"esas2r_write_fw: COMMAND, count %d, action %d ",
count, header->action);
/*
* Pretend we took the whole buffer,
* so we don't get bothered again.
*/
return count;
} else {
esas2r_debug("esas2r_write_fw: invalid action %d ",
a->firmware.header.action);
return -ENOSYS;
}
} else {
length = a->firmware.header.length;
}
/*
* We only get here on a download command, regardless of offset.
* the chunks written by the system need to be cached, and when
* the final one arrives, issue the fmapi command.
*/
if (off + count > length)
count = length - off;
if (count > 0) {
esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
count,
length);
/*
* On a full upload, the system tries sending the whole buffer.
* there's nothing to do with it, so just drop it here, before
* trying to copy over into unallocated memory!
*/
if (a->firmware.header.action == FI_ACT_UP)
return count;
if (!a->firmware.data) {
esas2r_debug(
"write: nonzero offset but no buffer available!");
return -ENOMEM;
}
memcpy(&a->firmware.data[off], buf, count);
if (length == off + count) {
do_fm_api(a,
(struct esas2r_flash_img *)a->firmware.data);
/*
* Now copy the header result to be picked up by the
* next read
*/
memcpy(&a->firmware.header,
a->firmware.data,
sizeof(a->firmware.header));
a->firmware.state = FW_STATUS_ST;
esas2r_debug("write completed");
/*
* Since the system has the data buffered, the only way
* this can leak is if a root user writes a program
* that writes a shorter buffer than it claims, and the
* copyin fails.
*/
free_fw_buffers(a);
}
}
return count;
}
/* Callback for the completion of a VDA request. */
static void vda_complete_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
a->vda_command_done = 1;
wake_up_interruptible(&a->vda_waiter);
}
/* Scatter/gather callback for VDA requests */
static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
(*addr) = a->ppvda_buffer + offset;
return VDA_MAX_BUFFER_SIZE - offset;
}
/* Handle a call to read a VDA command. */
int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
{
if (!a->vda_buffer)
return -ENOMEM;
if (off == 0) {
struct esas2r_request *rq;
struct atto_ioctl_vda *vi =
(struct atto_ioctl_vda *)a->vda_buffer;
struct esas2r_sg_context sgc;
bool wait_for_completion;
/*
* Presumeably, someone has already written to the vda_buffer,
* and now they are reading the node the response, so now we
* will actually issue the request to the chip and reply.
*/
/* allocate a request */
rq = esas2r_alloc_request(a);
if (rq == NULL) {
esas2r_debug("esas2r_read_vda: out of requestss");
return -EBUSY;
}
rq->comp_cb = vda_complete_req;
sgc.first_req = rq;
sgc.adapter = a;
sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
a->vda_command_done = 0;
wait_for_completion =
esas2r_process_vda_ioctl(a, vi, rq, &sgc);
if (wait_for_completion) {
/* now wait around for it to complete. */
while (!a->vda_command_done)
wait_event_interruptible(a->vda_waiter,
a->vda_command_done);
}
esas2r_free_request(a, (struct esas2r_request *)rq);
}
if (off > VDA_MAX_BUFFER_SIZE)
return 0;
if (count + off > VDA_MAX_BUFFER_SIZE)
count = VDA_MAX_BUFFER_SIZE - off;
if (count < 0)
return 0;
memcpy(buf, a->vda_buffer + off, count);
return count;
}
/* Handle a call to write a VDA command. */
int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
int count)
{
/*
* allocate memory for it, if not already done. once allocated,
* we will keep it around until the driver is unloaded.
*/
if (!a->vda_buffer) {
dma_addr_t dma_addr;
a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
(size_t)
VDA_MAX_BUFFER_SIZE,
&dma_addr,
GFP_KERNEL);
a->ppvda_buffer = dma_addr;
}
if (!a->vda_buffer)
return -ENOMEM;
if (off > VDA_MAX_BUFFER_SIZE)
return 0;
if (count + off > VDA_MAX_BUFFER_SIZE)
count = VDA_MAX_BUFFER_SIZE - off;
if (count < 1)
return 0;
memcpy(a->vda_buffer + off, buf, count);
return count;
}
/* Callback for the completion of an FS_API request.*/
static void fs_api_complete_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
a->fs_api_command_done = 1;
wake_up_interruptible(&a->fs_api_waiter);
}
/* Scatter/gather callback for VDA requests */
static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
struct esas2r_ioctl_fs *fs =
(struct esas2r_ioctl_fs *)a->fs_api_buffer;
u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
(*addr) = a->ppfs_api_buffer + offset;
return a->fs_api_buffer_size - offset;
}
/* Handle a call to read firmware via FS_API. */
int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
{
if (!a->fs_api_buffer)
return -ENOMEM;
if (off == 0) {
struct esas2r_request *rq;
struct esas2r_sg_context sgc;
struct esas2r_ioctl_fs *fs =
(struct esas2r_ioctl_fs *)a->fs_api_buffer;
/* If another flash request is already in progress, return. */
if (down_interruptible(&a->fs_api_semaphore)) {
busy:
fs->status = ATTO_STS_OUT_OF_RSRC;
return -EBUSY;
}
/*
* Presumeably, someone has already written to the
* fs_api_buffer, and now they are reading the node the
* response, so now we will actually issue the request to the
* chip and reply. Allocate a request
*/
rq = esas2r_alloc_request(a);
if (rq == NULL) {
esas2r_debug("esas2r_read_fs: out of requests");
up(&a->fs_api_semaphore);
goto busy;
}
rq->comp_cb = fs_api_complete_req;
/* Set up the SGCONTEXT for to build the s/g table */
sgc.cur_offset = fs->data;
sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
a->fs_api_command_done = 0;
if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
if (fs->status == ATTO_STS_OUT_OF_RSRC)
count = -EBUSY;
goto dont_wait;
}
/* Now wait around for it to complete. */
while (!a->fs_api_command_done)
wait_event_interruptible(a->fs_api_waiter,
a->fs_api_command_done);
;
dont_wait:
/* Free the request and keep going */
up(&a->fs_api_semaphore);
esas2r_free_request(a, (struct esas2r_request *)rq);
/* Pick up possible error code from above */
if (count < 0)
return count;
}
if (off > a->fs_api_buffer_size)
return 0;
if (count + off > a->fs_api_buffer_size)
count = a->fs_api_buffer_size - off;
if (count < 0)
return 0;
memcpy(buf, a->fs_api_buffer + off, count);
return count;
}
/* Handle a call to write firmware via FS_API. */
int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
int count)
{
if (off == 0) {
struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
u32 length = fs->command.length + offsetof(
struct esas2r_ioctl_fs,
data);
/*
* Special case, for BEGIN commands, the length field
* is lying to us, so just get enough for the header.
*/
if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
length = offsetof(struct esas2r_ioctl_fs, data);
/*
* Beginning a command. We assume we'll get at least
* enough in the first write so we can look at the
* header and see how much we need to alloc.
*/
if (count < offsetof(struct esas2r_ioctl_fs, data))
return -EINVAL;
/* Allocate a buffer or use the existing buffer. */
if (a->fs_api_buffer) {
if (a->fs_api_buffer_size < length) {
/* Free too-small buffer and get a new one */
dma_free_coherent(&a->pcid->dev,
(size_t)a->fs_api_buffer_size,
a->fs_api_buffer,
(dma_addr_t)a->ppfs_api_buffer);
goto re_allocate_buffer;
}
} else {
re_allocate_buffer:
a->fs_api_buffer_size = length;
a->fs_api_buffer = (u8 *)dma_alloc_coherent(
&a->pcid->dev,
(size_t)a->fs_api_buffer_size,
(dma_addr_t *)&a->ppfs_api_buffer,
GFP_KERNEL);
}
}
if (!a->fs_api_buffer)
return -ENOMEM;
if (off > a->fs_api_buffer_size)
return 0;
if (count + off > a->fs_api_buffer_size)
count = a->fs_api_buffer_size - off;
if (count < 1)
return 0;
memcpy(a->fs_api_buffer + off, buf, count);
return count;
}
/*
* linux/drivers/scsi/esas2r/esas2r_log.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
/*
* this module within the driver is tasked with providing logging functionality.
* the event_log_level module parameter controls the level of messages that are
* written to the system log. the default level of messages that are written
* are critical and warning messages. if other types of messages are desired,
* one simply needs to load the module with the correct value for the
* event_log_level module parameter. for example:
*
* insmod <module> event_log_level=1
*
* will load the module and only critical events will be written by this module
* to the system log. if critical, warning, and information-level messages are
* desired, the correct value for the event_log_level module parameter
* would be as follows:
*
* insmod <module> event_log_level=3
*/
#define EVENT_LOG_BUFF_SIZE 1024
static long event_log_level = ESAS2R_LOG_DFLT;
module_param(event_log_level, long, S_IRUGO | S_IRUSR);
MODULE_PARM_DESC(event_log_level,
"Specifies the level of events to report to the system log. Critical and warning level events are logged by default.");
/* A shared buffer to use for formatting messages. */
static char event_buffer[EVENT_LOG_BUFF_SIZE];
/* A lock to protect the shared buffer used for formatting messages. */
static DEFINE_SPINLOCK(event_buffer_lock);
/**
* translates an esas2r-defined logging event level to a kernel logging level.
*
* @param [in] level the esas2r-defined logging event level to translate
*
* @return the corresponding kernel logging level.
*/
static const char *translate_esas2r_event_level_to_kernel(const long level)
{
switch (level) {
case ESAS2R_LOG_CRIT:
return KERN_CRIT;
case ESAS2R_LOG_WARN:
return KERN_WARNING;
case ESAS2R_LOG_INFO:
return KERN_INFO;
case ESAS2R_LOG_DEBG:
case ESAS2R_LOG_TRCE:
default:
return KERN_DEBUG;
}
}
/**
* the master logging function. this function will format the message as
* outlined by the formatting string, the input device information and the
* substitution arguments and output the resulting string to the system log.
*
* @param [in] level the event log level of the message
* @param [in] dev the device information
* @param [in] format the formatting string for the message
* @param [in] args the substition arguments to the formatting string
*
* @return 0 on success, or -1 if an error occurred.
*/
static int esas2r_log_master(const long level,
const struct device *dev,
const char *format,
va_list args)
{
if (level <= event_log_level) {
unsigned long flags = 0;
int retval = 0;
char *buffer = event_buffer;
size_t buflen = EVENT_LOG_BUFF_SIZE;
const char *fmt_nodev = "%s%s: ";
const char *fmt_dev = "%s%s [%s, %s, %s]";
const char *slevel =
translate_esas2r_event_level_to_kernel(level);
spin_lock_irqsave(&event_buffer_lock, flags);
if (buffer == NULL) {
spin_unlock_irqrestore(&event_buffer_lock, flags);
return -1;
}
memset(buffer, 0, buflen);
/*
* format the level onto the beginning of the string and do
* some pointer arithmetic to move the pointer to the point
* where the actual message can be inserted.
*/
if (dev == NULL) {
snprintf(buffer, buflen, fmt_nodev, slevel,
ESAS2R_DRVR_NAME);
} else {
snprintf(buffer, buflen, fmt_dev, slevel,
ESAS2R_DRVR_NAME,
(dev->driver ? dev->driver->name : "unknown"),
(dev->bus ? dev->bus->name : "unknown"),
dev_name(dev));
}
buffer += strlen(event_buffer);
buflen -= strlen(event_buffer);
retval = vsnprintf(buffer, buflen, format, args);
if (retval < 0) {
spin_unlock_irqrestore(&event_buffer_lock, flags);
return -1;
}
/*
* Put a line break at the end of the formatted string so that
* we don't wind up with run-on messages. only append if there
* is enough space in the buffer.
*/
if (strlen(event_buffer) < buflen)
strcat(buffer, "\n");
printk(event_buffer);
spin_unlock_irqrestore(&event_buffer_lock, flags);
}
return 0;
}
/**
* formats and logs a message to the system log.
*
* @param [in] level the event level of the message
* @param [in] format the formating string for the message
* @param [in] ... the substitution arguments to the formatting string
*
* @return 0 on success, or -1 if an error occurred.
*/
int esas2r_log(const long level, const char *format, ...)
{
int retval = 0;
va_list args;
va_start(args, format);
retval = esas2r_log_master(level, NULL, format, args);
va_end(args);
return retval;
}
/**
* formats and logs a message to the system log. this message will include
* device information.
*
* @param [in] level the event level of the message
* @param [in] dev the device information
* @param [in] format the formatting string for the message
* @param [in] ... the substitution arguments to the formatting string
*
* @return 0 on success, or -1 if an error occurred.
*/
int esas2r_log_dev(const long level,
const struct device *dev,
const char *format,
...)
{
int retval = 0;
va_list args;
va_start(args, format);
retval = esas2r_log_master(level, dev, format, args);
va_end(args);
return retval;
}
/**
* formats and logs a message to the system log. this message will include
* device information.
*
* @param [in] level the event level of the message
* @param [in] buf
* @param [in] len
*
* @return 0 on success, or -1 if an error occurred.
*/
int esas2r_log_hexdump(const long level,
const void *buf,
size_t len)
{
if (level <= event_log_level) {
print_hex_dump(translate_esas2r_event_level_to_kernel(level),
"", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
}
return 1;
}
/*
* linux/drivers/scsi/esas2r/esas2r_log.h
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#ifndef __esas2r_log_h__
#define __esas2r_log_h__
struct device;
enum {
ESAS2R_LOG_NONE = 0, /* no events logged */
ESAS2R_LOG_CRIT = 1, /* critical events */
ESAS2R_LOG_WARN = 2, /* warning events */
ESAS2R_LOG_INFO = 3, /* info events */
ESAS2R_LOG_DEBG = 4, /* debugging events */
ESAS2R_LOG_TRCE = 5, /* tracing events */
#ifdef ESAS2R_TRACE
ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
#else
ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
#endif
};
int esas2r_log(const long level, const char *format, ...);
int esas2r_log_dev(const long level,
const struct device *dev,
const char *format,
...);
int esas2r_log_hexdump(const long level,
const void *buf,
size_t len);
/*
* the following macros are provided specifically for debugging and tracing
* messages. esas2r_debug() is provided for generic non-hardware layer
* debugging and tracing events. esas2r_hdebug is provided specifically for
* hardware layer debugging and tracing events.
*/
#ifdef ESAS2R_DEBUG
#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
#else
#define esas2r_debug(f, args ...)
#define esas2r_hdebug(f, args ...)
#endif /* ESAS2R_DEBUG */
/*
* the following macros are provided in order to trace the driver and catch
* some more serious bugs. be warned, enabling these macros may *severely*
* impact performance.
*/
#ifdef ESAS2R_TRACE
#define esas2r_bugon() \
do { \
esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
" - dumping stack and stopping kernel", __func__, \
__LINE__); \
dump_stack(); \
BUG(); \
} while (0)
#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
__func__, __FILE__, __LINE__)
#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
__func__, __FILE__, __LINE__)
#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
f, __func__, __FILE__, __LINE__, \
## args)
#else
#define esas2r_bugon()
#define esas2r_trace_enter()
#define esas2r_trace_exit()
#define esas2r_trace(f, args ...)
#endif /* ESAS2R_TRACE */
#endif /* __esas2r_log_h__ */
/*
* linux/drivers/scsi/esas2r/esas2r_main.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
MODULE_AUTHOR("ATTO Technology, Inc.");
MODULE_LICENSE("GPL");
MODULE_VERSION(ESAS2R_VERSION_STR);
/* global definitions */
static int found_adapters;
struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
#define ESAS2R_VDA_EVENT_PORT1 54414
#define ESAS2R_VDA_EVENT_PORT2 54415
#define ESAS2R_VDA_EVENT_SOCK_COUNT 2
static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct Scsi_Host *host = class_to_shost(dev);
return (struct esas2r_adapter *)host->hostdata;
}
static ssize_t read_fw(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
return esas2r_read_fw(a, buf, off, count);
}
static ssize_t write_fw(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
return esas2r_write_fw(a, buf, off, count);
}
static ssize_t read_fs(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
return esas2r_read_fs(a, buf, off, count);
}
static ssize_t write_fs(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
int length = min(sizeof(struct esas2r_ioctl_fs), count);
int result = 0;
result = esas2r_write_fs(a, buf, off, count);
if (result < 0)
result = 0;
return length;
}
static ssize_t read_vda(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
return esas2r_read_vda(a, buf, off, count);
}
static ssize_t write_vda(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
return esas2r_write_vda(a, buf, off, count);
}
static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
memcpy(buf, a->nvram, length);
return length;
}
static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
struct esas2r_request *rq;
int result = -EFAULT;
rq = esas2r_alloc_request(a);
if (rq == NULL)
return -ENOMEM;
if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
result = count;
esas2r_free_request(a, rq);
return result;
}
static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
return sizeof(struct esas2r_sas_nvram);
}
static ssize_t read_hw(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
if (!a->local_atto_ioctl)
return -ENOMEM;
if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
return -ENOMEM;
memcpy(buf, a->local_atto_ioctl, length);
return length;
}
static ssize_t write_hw(struct file *file, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
int length = min(sizeof(struct atto_ioctl), count);
if (!a->local_atto_ioctl) {
a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
GFP_KERNEL);
if (a->local_atto_ioctl == NULL) {
esas2r_log(ESAS2R_LOG_WARN,
"write_hw kzalloc failed for %d bytes",
sizeof(struct atto_ioctl));
return -ENOMEM;
}
}
memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
memcpy(a->local_atto_ioctl, buf, length);
return length;
}
#define ESAS2R_RW_BIN_ATTR(_name) \
struct bin_attribute bin_attr_ ## _name = { \
.attr = \
{ .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
.size = 0, \
.read = read_ ## _name, \
.write = write_ ## _name }
ESAS2R_RW_BIN_ATTR(fw);
ESAS2R_RW_BIN_ATTR(fs);
ESAS2R_RW_BIN_ATTR(vda);
ESAS2R_RW_BIN_ATTR(hw);
ESAS2R_RW_BIN_ATTR(live_nvram);
struct bin_attribute bin_attr_default_nvram = {
.attr = { .name = "default_nvram", .mode = S_IRUGO },
.size = 0,
.read = read_default_nvram,
.write = NULL
};
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.show_info = esas2r_show_info,
.name = ESAS2R_LONGNAME,
.release = esas2r_release,
.info = esas2r_info,
.ioctl = esas2r_ioctl,
.queuecommand = esas2r_queuecommand,
.eh_abort_handler = esas2r_eh_abort,
.eh_device_reset_handler = esas2r_device_reset,
.eh_bus_reset_handler = esas2r_bus_reset,
.eh_host_reset_handler = esas2r_host_reset,
.eh_target_reset_handler = esas2r_target_reset,
.can_queue = 128,
.this_id = -1,
.sg_tablesize = SCSI_MAX_SG_SEGMENTS,
.cmd_per_lun =
ESAS2R_DEFAULT_CMD_PER_LUN,
.present = 0,
.unchecked_isa_dma = 0,
.use_clustering = ENABLE_CLUSTERING,
.emulated = 0,
.proc_name = ESAS2R_DRVR_NAME,
.slave_configure = esas2r_slave_configure,
.slave_alloc = esas2r_slave_alloc,
.slave_destroy = esas2r_slave_destroy,
.change_queue_depth = esas2r_change_queue_depth,
.change_queue_type = esas2r_change_queue_type,
.max_sectors = 0xFFFF,
};
int sgl_page_size = 512;
module_param(sgl_page_size, int, 0);
MODULE_PARM_DESC(sgl_page_size,
"Scatter/gather list (SGL) page size in number of S/G "
"entries. If your application is doing a lot of very large "
"transfers, you may want to increase the SGL page size. "
"Default 512.");
int num_sg_lists = 1024;
module_param(num_sg_lists, int, 0);
MODULE_PARM_DESC(num_sg_lists,
"Number of scatter/gather lists. Default 1024.");
int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
module_param(sg_tablesize, int, 0);
MODULE_PARM_DESC(sg_tablesize,
"Maximum number of entries in a scatter/gather table.");
int num_requests = 256;
module_param(num_requests, int, 0);
MODULE_PARM_DESC(num_requests,
"Number of requests. Default 256.");
int num_ae_requests = 4;
module_param(num_ae_requests, int, 0);
MODULE_PARM_DESC(num_ae_requests,
"Number of VDA asynchromous event requests. Default 4.");
int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
module_param(cmd_per_lun, int, 0);
MODULE_PARM_DESC(cmd_per_lun,
"Maximum number of commands per LUN. Default "
DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
int can_queue = 128;
module_param(can_queue, int, 0);
MODULE_PARM_DESC(can_queue,
"Maximum number of commands per adapter. Default 128.");
int esas2r_max_sectors = 0xFFFF;
module_param(esas2r_max_sectors, int, 0);
MODULE_PARM_DESC(esas2r_max_sectors,
"Maximum number of disk sectors in a single data transfer. "
"Default 65535 (largest possible setting).");
int interrupt_mode = 1;
module_param(interrupt_mode, int, 0);
MODULE_PARM_DESC(interrupt_mode,
"Defines the interrupt mode to use. 0 for legacy"
", 1 for MSI. Default is MSI (1).");
static struct pci_device_id
esas2r_pci_table[] = {
{ ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049,
0,
0, 0 },
{ ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A,
0,
0, 0 },
{ ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B,
0,
0, 0 },
{ ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C,
0,
0, 0 },
{ ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D,
0,
0, 0 },
{ ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E,
0,
0, 0 },
{ 0, 0, 0, 0,
0,
0, 0 }
};
MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
static int
esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
static void
esas2r_remove(struct pci_dev *pcid);
static struct pci_driver
esas2r_pci_driver = {
.name = ESAS2R_DRVR_NAME,
.id_table = esas2r_pci_table,
.probe = esas2r_probe,
.remove = esas2r_remove,
.suspend = esas2r_suspend,
.resume = esas2r_resume,
};
static int esas2r_probe(struct pci_dev *pcid,
const struct pci_device_id *id)
{
struct Scsi_Host *host = NULL;
struct esas2r_adapter *a;
int err;
size_t host_alloc_size = sizeof(struct esas2r_adapter)
+ ((num_requests) +
1) * sizeof(struct esas2r_request);
esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
"esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
pcid->vendor,
pcid->device,
pcid->subsystem_vendor,
pcid->subsystem_device);
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
"before pci_enable_device() "
"enable_cnt: %d",
pcid->enable_cnt.counter);
err = pci_enable_device(pcid);
if (err != 0) {
esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
"pci_enable_device() FAIL (%d)",
err);
return -ENODEV;
}
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
"pci_enable_device() OK");
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
"after pci_device_enable() enable_cnt: %d",
pcid->enable_cnt.counter);
host = scsi_host_alloc(&driver_template, host_alloc_size);
if (host == NULL) {
esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
return -ENODEV;
}
memset(host->hostdata, 0, host_alloc_size);
a = (struct esas2r_adapter *)host->hostdata;
esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
/* override max LUN and max target id */
host->max_id = ESAS2R_MAX_ID + 1;
host->max_lun = 255;
/* we can handle 16-byte CDbs */
host->max_cmd_len = 16;
host->can_queue = can_queue;
host->cmd_per_lun = cmd_per_lun;
host->this_id = host->max_id + 1;
host->max_channel = 0;
host->unique_id = found_adapters;
host->sg_tablesize = sg_tablesize;
host->max_sectors = esas2r_max_sectors;
/* set to bus master for BIOses that don't do it for us */
esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
pci_set_master(pcid);
if (!esas2r_init_adapter(host, pcid, found_adapters)) {
esas2r_log(ESAS2R_LOG_CRIT,
"unable to initialize device at PCI bus %x:%x",
pcid->bus->number,
pcid->devfn);
esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
"scsi_host_put() called");
scsi_host_put(host);
return 0;
}
esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
host->hostdata);
pci_set_drvdata(pcid, host);
esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
err = scsi_add_host(host, &pcid->dev);
if (err) {
esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
"scsi_add_host() FAIL");
esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
"scsi_host_put() called");
scsi_host_put(host);
esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
"pci_set_drvdata(%p, NULL) called",
pcid);
pci_set_drvdata(pcid, NULL);
return -ENODEV;
}
esas2r_fw_event_on(a);
esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
"scsi_scan_host() called");
scsi_scan_host(host);
/* Add sysfs binary files */
if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
"Failed to create sysfs binary file: fw");
else
a->sysfs_fw_created = 1;
if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
"Failed to create sysfs binary file: fs");
else
a->sysfs_fs_created = 1;
if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
"Failed to create sysfs binary file: vda");
else
a->sysfs_vda_created = 1;
if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
"Failed to create sysfs binary file: hw");
else
a->sysfs_hw_created = 1;
if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
"Failed to create sysfs binary file: live_nvram");
else
a->sysfs_live_nvram_created = 1;
if (sysfs_create_bin_file(&host->shost_dev.kobj,
&bin_attr_default_nvram))
esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
"Failed to create sysfs binary file: default_nvram");
else
a->sysfs_default_nvram_created = 1;
found_adapters++;
return 0;
}
static void esas2r_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host;
int index;
if (pdev == NULL) {
esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
return;
}
host = pci_get_drvdata(pdev);
if (host == NULL) {
/*
* this can happen if pci_set_drvdata was already called
* to clear the host pointer. if this is the case, we
* are okay; this channel has already been cleaned up.
*/
return;
}
esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
"esas2r_remove(%p) called; "
"host:%p", pdev,
host);
index = esas2r_cleanup(host);
if (index < 0)
esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
"unknown host in %s",
__func__);
found_adapters--;
/* if this was the last adapter, clean up the rest of the driver */
if (found_adapters == 0)
esas2r_cleanup(NULL);
}
static int __init esas2r_init(void)
{
int i;
esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
/* verify valid parameters */
if (can_queue < 1) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: can_queue must be at least 1, value "
"forced.");
can_queue = 1;
} else if (can_queue > 2048) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: can_queue must be no larger than 2048, "
"value forced.");
can_queue = 2048;
}
if (cmd_per_lun < 1) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: cmd_per_lun must be at least 1, value "
"forced.");
cmd_per_lun = 1;
} else if (cmd_per_lun > 2048) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: cmd_per_lun must be no larger than "
"2048, value forced.");
cmd_per_lun = 2048;
}
if (sg_tablesize < 32) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: sg_tablesize must be at least 32, "
"value forced.");
sg_tablesize = 32;
}
if (esas2r_max_sectors < 1) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: esas2r_max_sectors must be at least "
"1, value forced.");
esas2r_max_sectors = 1;
} else if (esas2r_max_sectors > 0xffff) {
esas2r_log(ESAS2R_LOG_WARN,
"warning: esas2r_max_sectors must be no larger "
"than 0xffff, value forced.");
esas2r_max_sectors = 0xffff;
}
sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
if (sgl_page_size < SGL_PG_SZ_MIN)
sgl_page_size = SGL_PG_SZ_MIN;
else if (sgl_page_size > SGL_PG_SZ_MAX)
sgl_page_size = SGL_PG_SZ_MAX;
if (num_sg_lists < NUM_SGL_MIN)
num_sg_lists = NUM_SGL_MIN;
else if (num_sg_lists > NUM_SGL_MAX)
num_sg_lists = NUM_SGL_MAX;
if (num_requests < NUM_REQ_MIN)
num_requests = NUM_REQ_MIN;
else if (num_requests > NUM_REQ_MAX)
num_requests = NUM_REQ_MAX;
if (num_ae_requests < NUM_AE_MIN)
num_ae_requests = NUM_AE_MIN;
else if (num_ae_requests > NUM_AE_MAX)
num_ae_requests = NUM_AE_MAX;
/* set up other globals */
for (i = 0; i < MAX_ADAPTERS; i++)
esas2r_adapters[i] = NULL;
/* initialize */
driver_template.module = THIS_MODULE;
if (pci_register_driver(&esas2r_pci_driver) != 0)
esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
else
esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
if (!found_adapters) {
pci_unregister_driver(&esas2r_pci_driver);
esas2r_cleanup(NULL);
esas2r_log(ESAS2R_LOG_CRIT,
"driver will not be loaded because no ATTO "
"%s devices were found",
ESAS2R_DRVR_NAME);
return -1;
} else {
esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
found_adapters);
}
return 0;
}
/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
static const struct file_operations esas2r_proc_fops = {
.compat_ioctl = esas2r_proc_ioctl,
.unlocked_ioctl = esas2r_proc_ioctl,
};
static struct Scsi_Host *esas2r_proc_host;
static int esas2r_proc_major;
long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
(int)cmd, (void __user *)arg);
}
static void __exit esas2r_exit(void)
{
esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
if (esas2r_proc_major > 0) {
esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
remove_proc_entry(ATTONODE_NAME,
esas2r_proc_host->hostt->proc_dir);
unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
esas2r_proc_major = 0;
}
esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
pci_unregister_driver(&esas2r_pci_driver);
}
int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
struct esas2r_target *t;
int dev_count = 0;
esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
seq_printf(m, ESAS2R_LONGNAME "\n"
"Driver version: "ESAS2R_VERSION_STR "\n"
"Flash version: %s\n"
"Firmware version: %s\n"
"Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
"http://www.attotech.com\n"
"\n",
a->flash_rev,
a->fw_rev[0] ? a->fw_rev : "(none)");
seq_printf(m, "Adapter information:\n"
"--------------------\n"
"Model: %s\n"
"SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
esas2r_get_model_name(a),
a->nvram->sas_addr[0],
a->nvram->sas_addr[1],
a->nvram->sas_addr[2],
a->nvram->sas_addr[3],
a->nvram->sas_addr[4],
a->nvram->sas_addr[5],
a->nvram->sas_addr[6],
a->nvram->sas_addr[7]);
seq_puts(m, "\n"
"Discovered devices:\n"
"\n"
" # Target ID\n"
"---------------\n");
for (t = a->targetdb; t < a->targetdb_end; t++)
if (t->buffered_target_state == TS_PRESENT) {
seq_printf(m, " %3d %3d\n",
++dev_count,
(u16)(uintptr_t)(t - a->targetdb));
}
if (dev_count == 0)
seq_puts(m, "none\n");
seq_puts(m, "\n");
return 0;
}
int esas2r_release(struct Scsi_Host *sh)
{
esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
"esas2r_release() called");
esas2r_cleanup(sh);
if (sh->irq)
free_irq(sh->irq, NULL);
scsi_unregister(sh);
return 0;
}
const char *esas2r_info(struct Scsi_Host *sh)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
static char esas2r_info_str[512];
esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
"esas2r_info() called");
/*
* if we haven't done so already, register as a char driver
* and stick a node under "/proc/scsi/esas2r/ATTOnode"
*/
if (esas2r_proc_major <= 0) {
esas2r_proc_host = sh;
esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
&esas2r_proc_fops);
esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
"register_chrdev (major %d)",
esas2r_proc_major);
if (esas2r_proc_major > 0) {
struct proc_dir_entry *pde;
pde = proc_create(ATTONODE_NAME, 0,
sh->hostt->proc_dir,
&esas2r_proc_fops);
if (!pde) {
esas2r_log_dev(ESAS2R_LOG_WARN,
&(sh->shost_gendev),
"failed to create_proc_entry");
esas2r_proc_major = -1;
}
}
}
sprintf(esas2r_info_str,
ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
" driver version: "ESAS2R_VERSION_STR " firmware version: "
"%s\n",
a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
a->fw_rev[0] ? a->fw_rev : "(none)");
return esas2r_info_str;
}
/* Callback for building a request scatter/gather list */
static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
{
u32 len;
if (likely(sgc->cur_offset == sgc->exp_offset)) {
/*
* the normal case: caller used all bytes from previous call, so
* expected offset is the same as the current offset.
*/
if (sgc->sgel_count < sgc->num_sgel) {
/* retrieve next segment, except for first time */
if (sgc->exp_offset > (u8 *)0) {
/* advance current segment */
sgc->cur_sgel = sg_next(sgc->cur_sgel);
++(sgc->sgel_count);
}
len = sg_dma_len(sgc->cur_sgel);
(*addr) = sg_dma_address(sgc->cur_sgel);
/* save the total # bytes returned to caller so far */
sgc->exp_offset += len;
} else {
len = 0;
}
} else if (sgc->cur_offset < sgc->exp_offset) {
/*
* caller did not use all bytes from previous call. need to
* compute the address based on current segment.
*/
len = sg_dma_len(sgc->cur_sgel);
(*addr) = sg_dma_address(sgc->cur_sgel);
sgc->exp_offset -= len;
/* calculate PA based on prev segment address and offsets */
*addr = *addr +
(sgc->cur_offset - sgc->exp_offset);
sgc->exp_offset += len;
/* re-calculate length based on offset */
len = lower_32_bits(
sgc->exp_offset - sgc->cur_offset);
} else { /* if ( sgc->cur_offset > sgc->exp_offset ) */
/*
* we don't expect the caller to skip ahead.
* cur_offset will never exceed the len we return
*/
len = 0;
}
return len;
}
int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct esas2r_adapter *a =
(struct esas2r_adapter *)cmd->device->host->hostdata;
struct esas2r_request *rq;
struct esas2r_sg_context sgc;
unsigned bufflen;
/* Assume success, if it fails we will fix the result later. */
cmd->result = DID_OK << 16;
if (unlikely(a->flags & AF_DEGRADED_MODE)) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
rq = esas2r_alloc_request(a);
if (unlikely(rq == NULL)) {
esas2r_debug("esas2r_alloc_request failed");
return SCSI_MLQUEUE_HOST_BUSY;
}
rq->cmd = cmd;
bufflen = scsi_bufflen(cmd);
if (likely(bufflen != 0)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
}
memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
rq->vrq->scsi.length = cpu_to_le32(bufflen);
rq->target_id = cmd->device->id;
rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
rq->sense_buf = cmd->sense_buffer;
rq->sense_len = SCSI_SENSE_BUFFERSIZE;
esas2r_sgc_init(&sgc, a, rq, NULL);
sgc.length = bufflen;
sgc.cur_offset = NULL;
sgc.cur_sgel = scsi_sglist(cmd);
sgc.exp_offset = NULL;
sgc.num_sgel = scsi_dma_map(cmd);
sgc.sgel_count = 0;
if (unlikely(sgc.num_sgel < 0)) {
esas2r_free_request(a, rq);
return SCSI_MLQUEUE_HOST_BUSY;
}
sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
scsi_dma_unmap(cmd);
esas2r_free_request(a, rq);
return SCSI_MLQUEUE_HOST_BUSY;
}
esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
(int)cmd->device->lun);
esas2r_start_request(a, rq);
return 0;
}
static void complete_task_management_request(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
(*rq->task_management_status_ptr) = rq->req_stat;
esas2r_free_request(a, rq);
}
/**
* Searches the specified queue for the specified queue for the command
* to abort.
*
* @param [in] a
* @param [in] abort_request
* @param [in] cmd
* t
* @return 0 on failure, 1 if command was not found, 2 if command was found
*/
static int esas2r_check_active_queue(struct esas2r_adapter *a,
struct esas2r_request **abort_request,
struct scsi_cmnd *cmd,
struct list_head *queue)
{
bool found = false;
struct esas2r_request *ar = *abort_request;
struct esas2r_request *rq;
struct list_head *element, *next;
list_for_each_safe(element, next, queue) {
rq = list_entry(element, struct esas2r_request, req_list);
if (rq->cmd == cmd) {
/* Found the request. See what to do with it. */
if (queue == &a->active_list) {
/*
* We are searching the active queue, which
* means that we need to send an abort request
* to the firmware.
*/
ar = esas2r_alloc_request(a);
if (ar == NULL) {
esas2r_log_dev(ESAS2R_LOG_WARN,
&(a->host->shost_gendev),
"unable to allocate an abort request for cmd %p",
cmd);
return 0; /* Failure */
}
/*
* Task management request must be formatted
* with a lock held.
*/
ar->sense_len = 0;
ar->vrq->scsi.length = 0;
ar->target_id = rq->target_id;
ar->vrq->scsi.flags |= cpu_to_le32(
(u8)le32_to_cpu(rq->vrq->scsi.flags));
memset(ar->vrq->scsi.cdb, 0,
sizeof(ar->vrq->scsi.cdb));
ar->vrq->scsi.flags |= cpu_to_le32(
FCP_CMND_TRM);
ar->vrq->scsi.u.abort_handle =
rq->vrq->scsi.handle;
} else {
/*
* The request is pending but not active on
* the firmware. Just free it now and we'll
* report the successful abort below.
*/
list_del_init(&rq->req_list);
esas2r_free_request(a, rq);
}
found = true;
break;
}
}
if (!found)
return 1; /* Not found */
return 2; /* found */
}
int esas2r_eh_abort(struct scsi_cmnd *cmd)
{
struct esas2r_adapter *a =
(struct esas2r_adapter *)cmd->device->host->hostdata;
struct esas2r_request *abort_request = NULL;
unsigned long flags;
struct list_head *queue;
int result;
esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
if (a->flags & AF_DEGRADED_MODE) {
cmd->result = DID_ABORT << 16;
scsi_set_resid(cmd, 0);
cmd->scsi_done(cmd);
return 0;
}
spin_lock_irqsave(&a->queue_lock, flags);
/*
* Run through the defer and active queues looking for the request
* to abort.
*/
queue = &a->defer_list;
check_active_queue:
result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
if (!result) {
spin_unlock_irqrestore(&a->queue_lock, flags);
return FAILED;
} else if (result == 2 && (queue == &a->defer_list)) {
queue = &a->active_list;
goto check_active_queue;
}
spin_unlock_irqrestore(&a->queue_lock, flags);
if (abort_request) {
u8 task_management_status = RS_PENDING;
/*
* the request is already active, so we need to tell
* the firmware to abort it and wait for the response.
*/
abort_request->comp_cb = complete_task_management_request;
abort_request->task_management_status_ptr =
&task_management_status;
esas2r_start_request(a, abort_request);
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
while (task_management_status == RS_PENDING)
msleep(10);
/*
* Once we get here, the original request will have been
* completed by the firmware and the abort request will have
* been cleaned up. we're done!
*/
return SUCCESS;
}
/*
* If we get here, either we found the inactive request and
* freed it, or we didn't find it at all. Either way, success!
*/
cmd->result = DID_ABORT << 16;
scsi_set_resid(cmd, 0);
cmd->scsi_done(cmd);
return SUCCESS;
}
static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
{
struct esas2r_adapter *a =
(struct esas2r_adapter *)cmd->device->host->hostdata;
if (a->flags & AF_DEGRADED_MODE)
return FAILED;
if (host_reset)
esas2r_reset_adapter(a);
else
esas2r_reset_bus(a);
/* above call sets the AF_OS_RESET flag. wait for it to clear. */
while (a->flags & AF_OS_RESET) {
msleep(10);
if (a->flags & AF_DEGRADED_MODE)
return FAILED;
}
if (a->flags & AF_DEGRADED_MODE)
return FAILED;
return SUCCESS;
}
int esas2r_host_reset(struct scsi_cmnd *cmd)
{
esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
return esas2r_host_bus_reset(cmd, true);
}
int esas2r_bus_reset(struct scsi_cmnd *cmd)
{
esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
return esas2r_host_bus_reset(cmd, false);
}
static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
{
struct esas2r_adapter *a =
(struct esas2r_adapter *)cmd->device->host->hostdata;
struct esas2r_request *rq;
u8 task_management_status = RS_PENDING;
bool completed;
if (a->flags & AF_DEGRADED_MODE)
return FAILED;
retry:
rq = esas2r_alloc_request(a);
if (rq == NULL) {
if (target_reset) {
esas2r_log(ESAS2R_LOG_CRIT,
"unable to allocate a request for a "
"target reset (%d)!",
cmd->device->id);
} else {
esas2r_log(ESAS2R_LOG_CRIT,
"unable to allocate a request for a "
"device reset (%d:%d)!",
cmd->device->id,
cmd->device->lun);
}
return FAILED;
}
rq->target_id = cmd->device->id;
rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
rq->req_stat = RS_PENDING;
rq->comp_cb = complete_task_management_request;
rq->task_management_status_ptr = &task_management_status;
if (target_reset) {
esas2r_debug("issuing target reset (%p) to id %d", rq,
cmd->device->id);
completed = esas2r_send_task_mgmt(a, rq, 0x20);
} else {
esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
cmd->device->id, cmd->device->lun);
completed = esas2r_send_task_mgmt(a, rq, 0x10);
}
if (completed) {
/* Task management cmd completed right away, need to free it. */
esas2r_free_request(a, rq);
} else {
/*
* Wait for firmware to complete the request. Completion
* callback will free it.
*/
while (task_management_status == RS_PENDING)
msleep(10);
}
if (a->flags & AF_DEGRADED_MODE)
return FAILED;
if (task_management_status == RS_BUSY) {
/*
* Busy, probably because we are flashing. Wait a bit and
* try again.
*/
msleep(100);
goto retry;
}
return SUCCESS;
}
int esas2r_device_reset(struct scsi_cmnd *cmd)
{
esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
return esas2r_dev_targ_reset(cmd, false);
}
int esas2r_target_reset(struct scsi_cmnd *cmd)
{
esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
return esas2r_dev_targ_reset(cmd, true);
}
int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
{
esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
return dev->queue_depth;
}
int esas2r_change_queue_type(struct scsi_device *dev, int type)
{
esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
if (dev->tagged_supported) {
scsi_set_tag_type(dev, type);
if (type)
scsi_activate_tcq(dev, dev->queue_depth);
else
scsi_deactivate_tcq(dev, dev->queue_depth);
} else {
type = 0;
}
return type;
}
int esas2r_slave_alloc(struct scsi_device *dev)
{
return 0;
}
int esas2r_slave_configure(struct scsi_device *dev)
{
esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
"esas2r_slave_configure()");
if (dev->tagged_supported) {
scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
scsi_activate_tcq(dev, cmd_per_lun);
} else {
scsi_set_tag_type(dev, 0);
scsi_deactivate_tcq(dev, cmd_per_lun);
}
return 0;
}
void esas2r_slave_destroy(struct scsi_device *dev)
{
esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
"esas2r_slave_destroy()");
}
void esas2r_log_request_failure(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
u8 reqstatus = rq->req_stat;
if (reqstatus == RS_SUCCESS)
return;
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
if (reqstatus == RS_SCSI_ERROR) {
if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
esas2r_log(ESAS2R_LOG_WARN,
"request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
rq->sense_buf[2], rq->sense_buf[12],
rq->sense_buf[13],
rq->vrq->scsi.cdb[0]);
} else {
esas2r_log(ESAS2R_LOG_WARN,
"request failure - SCSI error CDB:%x\n",
rq->vrq->scsi.cdb[0]);
}
} else if ((rq->vrq->scsi.cdb[0] != INQUIRY
&& rq->vrq->scsi.cdb[0] != REPORT_LUNS)
|| (reqstatus != RS_SEL
&& reqstatus != RS_SEL2)) {
if ((reqstatus == RS_UNDERRUN) &&
(rq->vrq->scsi.cdb[0] == INQUIRY)) {
/* Don't log inquiry underruns */
} else {
esas2r_log(ESAS2R_LOG_WARN,
"request failure - cdb:%x reqstatus:%d target:%d",
rq->vrq->scsi.cdb[0], reqstatus,
rq->target_id);
}
}
}
}
void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
{
u32 starttime;
u32 timeout;
starttime = jiffies_to_msecs(jiffies);
timeout = rq->timeout ? rq->timeout : 5000;
while (true) {
esas2r_polled_interrupt(a);
if (rq->req_stat != RS_STARTED)
break;
schedule_timeout_interruptible(msecs_to_jiffies(100));
if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
esas2r_hdebug("request TMO");
esas2r_bugon();
rq->req_stat = RS_TIMEOUT;
esas2r_local_reset_adapter(a);
return;
}
}
}
u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
{
u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
if (a->window_base != base) {
esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
base | MVRPW1R_ENABLE);
esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
a->window_base = base;
}
return offset;
}
/* Read a block of data from chip memory */
bool esas2r_read_mem_block(struct esas2r_adapter *a,
void *to,
u32 from,
u32 size)
{
u8 *end = (u8 *)to;
while (size) {
u32 len;
u32 offset;
u32 iatvr;
iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
esas2r_map_data_window(a, iatvr);
offset = from & (MW_DATA_WINDOW_SIZE - 1);
len = size;
if (len > MW_DATA_WINDOW_SIZE - offset)
len = MW_DATA_WINDOW_SIZE - offset;
from += len;
size -= len;
while (len--) {
*end++ = esas2r_read_data_byte(a, offset);
offset++;
}
}
return true;
}
void esas2r_nuxi_mgt_data(u8 function, void *data)
{
struct atto_vda_grp_info *g;
struct atto_vda_devinfo *d;
struct atto_vdapart_info *p;
struct atto_vda_dh_info *h;
struct atto_vda_metrics_info *m;
struct atto_vda_schedule_info *s;
struct atto_vda_buzzer_info *b;
u8 i;
switch (function) {
case VDAMGT_BUZZER_INFO:
case VDAMGT_BUZZER_SET:
b = (struct atto_vda_buzzer_info *)data;
b->duration = le32_to_cpu(b->duration);
break;
case VDAMGT_SCHEDULE_INFO:
case VDAMGT_SCHEDULE_EVENT:
s = (struct atto_vda_schedule_info *)data;
s->id = le32_to_cpu(s->id);
break;
case VDAMGT_DEV_INFO:
case VDAMGT_DEV_CLEAN:
case VDAMGT_DEV_PT_INFO:
case VDAMGT_DEV_FEATURES:
case VDAMGT_DEV_PT_FEATURES:
case VDAMGT_DEV_OPERATION:
d = (struct atto_vda_devinfo *)data;
d->capacity = le64_to_cpu(d->capacity);
d->block_size = le32_to_cpu(d->block_size);
d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
d->target_id = le16_to_cpu(d->target_id);
d->lun = le16_to_cpu(d->lun);
d->features = le16_to_cpu(d->features);
break;
case VDAMGT_GRP_INFO:
case VDAMGT_GRP_CREATE:
case VDAMGT_GRP_DELETE:
case VDAMGT_ADD_STORAGE:
case VDAMGT_MEMBER_ADD:
case VDAMGT_GRP_COMMIT:
case VDAMGT_GRP_REBUILD:
case VDAMGT_GRP_COMMIT_INIT:
case VDAMGT_QUICK_RAID:
case VDAMGT_GRP_FEATURES:
case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
case VDAMGT_SPARE_LIST:
case VDAMGT_SPARE_ADD:
case VDAMGT_SPARE_REMOVE:
case VDAMGT_LOCAL_SPARE_ADD:
case VDAMGT_GRP_OPERATION:
g = (struct atto_vda_grp_info *)data;
g->capacity = le64_to_cpu(g->capacity);
g->block_size = le32_to_cpu(g->block_size);
g->interleave = le32_to_cpu(g->interleave);
g->features = le16_to_cpu(g->features);
for (i = 0; i < 32; i++)
g->members[i] = le16_to_cpu(g->members[i]);
break;
case VDAMGT_PART_INFO:
case VDAMGT_PART_MAP:
case VDAMGT_PART_UNMAP:
case VDAMGT_PART_AUTOMAP:
case VDAMGT_PART_SPLIT:
case VDAMGT_PART_MERGE:
p = (struct atto_vdapart_info *)data;
p->part_size = le64_to_cpu(p->part_size);
p->start_lba = le32_to_cpu(p->start_lba);
p->block_size = le32_to_cpu(p->block_size);
p->target_id = le16_to_cpu(p->target_id);
break;
case VDAMGT_DEV_HEALTH_REQ:
h = (struct atto_vda_dh_info *)data;
h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
break;
case VDAMGT_DEV_METRICS:
m = (struct atto_vda_metrics_info *)data;
for (i = 0; i < 32; i++)
m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
break;
default:
break;
}
}
void esas2r_nuxi_cfg_data(u8 function, void *data)
{
struct atto_vda_cfg_init *ci;
switch (function) {
case VDA_CFG_INIT:
case VDA_CFG_GET_INIT:
case VDA_CFG_GET_INIT2:
ci = (struct atto_vda_cfg_init *)data;
ci->date_time.year = le16_to_cpu(ci->date_time.year);
ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
ci->vda_version = le32_to_cpu(ci->vda_version);
ci->epoch_time = le32_to_cpu(ci->epoch_time);
ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
break;
default:
break;
}
}
void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
{
struct atto_vda_ae_raid *r = &ae->raid;
struct atto_vda_ae_lu *l = &ae->lu;
switch (ae->hdr.bytype) {
case VDAAE_HDR_TYPE_RAID:
r->dwflags = le32_to_cpu(r->dwflags);
break;
case VDAAE_HDR_TYPE_LU:
l->dwevent = le32_to_cpu(l->dwevent);
l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
l->id.tgtlun_raid.dwinterleave
= le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
l->id.tgtlun_raid.dwblock_size
= le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
}
break;
case VDAAE_HDR_TYPE_DISK:
default:
break;
}
}
void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
{
unsigned long flags;
esas2r_rq_destroy_request(rq, a);
spin_lock_irqsave(&a->request_lock, flags);
list_add(&rq->comp_list, &a->avail_request);
spin_unlock_irqrestore(&a->request_lock, flags);
}
struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
{
struct esas2r_request *rq;
unsigned long flags;
spin_lock_irqsave(&a->request_lock, flags);
if (unlikely(list_empty(&a->avail_request))) {
spin_unlock_irqrestore(&a->request_lock, flags);
return NULL;
}
rq = list_first_entry(&a->avail_request, struct esas2r_request,
comp_list);
list_del(&rq->comp_list);
spin_unlock_irqrestore(&a->request_lock, flags);
esas2r_rq_init_request(rq, a);
return rq;
}
void esas2r_complete_request_cb(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
esas2r_debug("completing request %p\n", rq);
scsi_dma_unmap(rq->cmd);
if (unlikely(rq->req_stat != RS_SUCCESS)) {
esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
rq->req_stat,
rq->func_rsp.scsi_rsp.scsi_stat,
rq->cmd);
rq->cmd->result =
((esas2r_req_status_to_error(rq->req_stat) << 16)
| (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
if (rq->req_stat == RS_UNDERRUN)
scsi_set_resid(rq->cmd,
le32_to_cpu(rq->func_rsp.scsi_rsp.
residual_length));
else
scsi_set_resid(rq->cmd, 0);
}
rq->cmd->scsi_done(rq->cmd);
esas2r_free_request(a, rq);
}
/* Run tasklet to handle stuff outside of interrupt context. */
void esas2r_adapter_tasklet(unsigned long context)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)context;
if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
esas2r_timer_tick(a);
}
if (likely(a->flags2 & AF2_INT_PENDING)) {
esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
esas2r_adapter_interrupt(a);
}
if (esas2r_is_tasklet_pending(a))
esas2r_do_tasklet_tasks(a);
if (esas2r_is_tasklet_pending(a)
|| (a->flags2 & AF2_INT_PENDING)
|| (a->flags2 & AF2_TIMER_TICK)) {
esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
esas2r_schedule_tasklet(a);
} else {
esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
}
}
static void esas2r_timer_callback(unsigned long context);
void esas2r_kickoff_timer(struct esas2r_adapter *a)
{
init_timer(&a->timer);
a->timer.function = esas2r_timer_callback;
a->timer.data = (unsigned long)a;
a->timer.expires = jiffies +
msecs_to_jiffies(100);
add_timer(&a->timer);
}
static void esas2r_timer_callback(unsigned long context)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)context;
esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
esas2r_schedule_tasklet(a);
esas2r_kickoff_timer(a);
}
/*
* Firmware events need to be handled outside of interrupt context
* so we schedule a delayed_work to handle them.
*/
static void
esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
{
unsigned long flags;
struct esas2r_adapter *a = fw_event->a;
spin_lock_irqsave(&a->fw_event_lock, flags);
list_del(&fw_event->list);
kfree(fw_event);
spin_unlock_irqrestore(&a->fw_event_lock, flags);
}
void
esas2r_fw_event_off(struct esas2r_adapter *a)
{
unsigned long flags;
spin_lock_irqsave(&a->fw_event_lock, flags);
a->fw_events_off = 1;
spin_unlock_irqrestore(&a->fw_event_lock, flags);
}
void
esas2r_fw_event_on(struct esas2r_adapter *a)
{
unsigned long flags;
spin_lock_irqsave(&a->fw_event_lock, flags);
a->fw_events_off = 0;
spin_unlock_irqrestore(&a->fw_event_lock, flags);
}
static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
{
int ret;
struct scsi_device *scsi_dev;
scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
if (scsi_dev) {
esas2r_log_dev(
ESAS2R_LOG_WARN,
&(scsi_dev->
sdev_gendev),
"scsi device already exists at id %d", target_id);
scsi_device_put(scsi_dev);
} else {
esas2r_log_dev(
ESAS2R_LOG_INFO,
&(a->host->
shost_gendev),
"scsi_add_device() called for 0:%d:0",
target_id);
ret = scsi_add_device(a->host, 0, target_id, 0);
if (ret) {
esas2r_log_dev(
ESAS2R_LOG_CRIT,
&(a->host->
shost_gendev),
"scsi_add_device failed with %d for id %d",
ret, target_id);
}
}
}
static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
{
struct scsi_device *scsi_dev;
scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
if (scsi_dev) {
scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
esas2r_log_dev(
ESAS2R_LOG_INFO,
&(scsi_dev->
sdev_gendev),
"scsi_remove_device() called for 0:%d:0",
target_id);
scsi_remove_device(scsi_dev);
esas2r_log_dev(
ESAS2R_LOG_INFO,
&(scsi_dev->
sdev_gendev),
"scsi_device_put() called");
scsi_device_put(scsi_dev);
} else {
esas2r_log_dev(
ESAS2R_LOG_WARN,
&(a->host->shost_gendev),
"no target found at id %d",
target_id);
}
}
/*
* Sends a firmware asynchronous event to anyone who happens to be
* listening on the defined ATTO VDA event ports.
*/
static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
{
struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
char *type;
switch (ae->vda_ae.hdr.bytype) {
case VDAAE_HDR_TYPE_RAID:
type = "RAID group state change";
break;
case VDAAE_HDR_TYPE_LU:
type = "Mapped destination LU change";
break;
case VDAAE_HDR_TYPE_DISK:
type = "Physical disk inventory change";
break;
case VDAAE_HDR_TYPE_RESET:
type = "Firmware reset";
break;
case VDAAE_HDR_TYPE_LOG_INFO:
type = "Event Log message (INFO level)";
break;
case VDAAE_HDR_TYPE_LOG_WARN:
type = "Event Log message (WARN level)";
break;
case VDAAE_HDR_TYPE_LOG_CRIT:
type = "Event Log message (CRIT level)";
break;
case VDAAE_HDR_TYPE_LOG_FAIL:
type = "Event Log message (FAIL level)";
break;
case VDAAE_HDR_TYPE_NVC:
type = "NVCache change";
break;
case VDAAE_HDR_TYPE_TLG_INFO:
type = "Time stamped log message (INFO level)";
break;
case VDAAE_HDR_TYPE_TLG_WARN:
type = "Time stamped log message (WARN level)";
break;
case VDAAE_HDR_TYPE_TLG_CRIT:
type = "Time stamped log message (CRIT level)";
break;
case VDAAE_HDR_TYPE_PWRMGT:
type = "Power management";
break;
case VDAAE_HDR_TYPE_MUTE:
type = "Mute button pressed";
break;
case VDAAE_HDR_TYPE_DEV:
type = "Device attribute change";
break;
default:
type = "Unknown";
break;
}
esas2r_log(ESAS2R_LOG_WARN,
"An async event of type \"%s\" was received from the firmware. The event contents are:",
type);
esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
ae->vda_ae.hdr.bylength);
}
static void
esas2r_firmware_event_work(struct work_struct *work)
{
struct esas2r_fw_event_work *fw_event =
container_of(work, struct esas2r_fw_event_work, work.work);
struct esas2r_adapter *a = fw_event->a;
u16 target_id = *(u16 *)&fw_event->data[0];
if (a->fw_events_off)
goto done;
switch (fw_event->type) {
case fw_event_null:
break; /* do nothing */
case fw_event_lun_change:
esas2r_remove_device(a, target_id);
esas2r_add_device(a, target_id);
break;
case fw_event_present:
esas2r_add_device(a, target_id);
break;
case fw_event_not_present:
esas2r_remove_device(a, target_id);
break;
case fw_event_vda_ae:
esas2r_send_ae_event(fw_event);
break;
}
done:
esas2r_free_fw_event(fw_event);
}
void esas2r_queue_fw_event(struct esas2r_adapter *a,
enum fw_event_type type,
void *data,
int data_sz)
{
struct esas2r_fw_event_work *fw_event;
unsigned long flags;
fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
if (!fw_event) {
esas2r_log(ESAS2R_LOG_WARN,
"esas2r_queue_fw_event failed to alloc");
return;
}
if (type == fw_event_vda_ae) {
struct esas2r_vda_ae *ae =
(struct esas2r_vda_ae *)fw_event->data;
ae->signature = ESAS2R_VDA_EVENT_SIG;
ae->bus_number = a->pcid->bus->number;
ae->devfn = a->pcid->devfn;
memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
} else {
memcpy(fw_event->data, data, data_sz);
}
fw_event->type = type;
fw_event->a = a;
spin_lock_irqsave(&a->fw_event_lock, flags);
list_add_tail(&fw_event->list, &a->fw_event_list);
INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
queue_delayed_work_on(
smp_processor_id(), a->fw_event_q, &fw_event->work,
msecs_to_jiffies(1));
spin_unlock_irqrestore(&a->fw_event_lock, flags);
}
void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
u8 state)
{
if (state == TS_LUN_CHANGE)
esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
sizeof(targ_id));
else if (state == TS_PRESENT)
esas2r_queue_fw_event(a, fw_event_present, &targ_id,
sizeof(targ_id));
else if (state == TS_NOT_PRESENT)
esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
sizeof(targ_id));
}
/* Translate status to a Linux SCSI mid-layer error code */
int esas2r_req_status_to_error(u8 req_stat)
{
switch (req_stat) {
case RS_OVERRUN:
case RS_UNDERRUN:
case RS_SUCCESS:
/*
* NOTE: SCSI mid-layer wants a good status for a SCSI error, because
* it will check the scsi_stat value in the completion anyway.
*/
case RS_SCSI_ERROR:
return DID_OK;
case RS_SEL:
case RS_SEL2:
return DID_NO_CONNECT;
case RS_RESET:
return DID_RESET;
case RS_ABORTED:
return DID_ABORT;
case RS_BUSY:
return DID_BUS_BUSY;
}
/* everything else is just an error. */
return DID_ERROR;
}
module_init(esas2r_init);
module_exit(esas2r_exit);
/*
* linux/drivers/scsi/esas2r/esas2r_targdb.c
* For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include "esas2r.h"
void esas2r_targ_db_initialize(struct esas2r_adapter *a)
{
struct esas2r_target *t;
for (t = a->targetdb; t < a->targetdb_end; t++) {
memset(t, 0, sizeof(struct esas2r_target));
t->target_state = TS_NOT_PRESENT;
t->buffered_target_state = TS_NOT_PRESENT;
t->new_target_state = TS_INVALID;
}
}
void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
{
struct esas2r_target *t;
unsigned long flags;
for (t = a->targetdb; t < a->targetdb_end; t++) {
if (t->target_state != TS_PRESENT)
continue;
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_targ_db_remove(a, t);
spin_unlock_irqrestore(&a->mem_lock, flags);
if (notify) {
esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
a));
esas2r_target_state_changed(a, esas2r_targ_get_id(t,
a),
TS_NOT_PRESENT);
}
}
}
void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
{
struct esas2r_target *t;
unsigned long flags;
esas2r_trace_enter();
if (a->flags & AF_DISC_PENDING) {
esas2r_trace_exit();
return;
}
for (t = a->targetdb; t < a->targetdb_end; t++) {
u8 state = TS_INVALID;
spin_lock_irqsave(&a->mem_lock, flags);
if (t->buffered_target_state != t->target_state)
state = t->buffered_target_state = t->target_state;
spin_unlock_irqrestore(&a->mem_lock, flags);
if (state != TS_INVALID) {
esas2r_trace("targ_db_report_changes:%d",
esas2r_targ_get_id(
t,
a));
esas2r_trace("state:%d", state);
esas2r_target_state_changed(a,
esas2r_targ_get_id(t,
a),
state);
}
}
esas2r_trace_exit();
}
struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
struct esas2r_disc_context *
dc)
{
struct esas2r_target *t;
esas2r_trace_enter();
if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
esas2r_bugon();
esas2r_trace_exit();
return NULL;
}
t = a->targetdb + dc->curr_virt_id;
if (t->target_state == TS_PRESENT) {
esas2r_trace_exit();
return NULL;
}
esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
esas2r_targ_get_id(
t,
a));
if (dc->interleave == 0
|| dc->block_size == 0) {
/* these are invalid values, don't create the target entry. */
esas2r_hdebug("invalid RAID group dimensions");
esas2r_trace_exit();
return NULL;
}
t->block_size = dc->block_size;
t->inter_byte = dc->interleave;
t->inter_block = dc->interleave / dc->block_size;
t->virt_targ_id = dc->curr_virt_id;
t->phys_targ_id = ESAS2R_TARG_ID_INV;
t->flags &= ~TF_PASS_THRU;
t->flags |= TF_USED;
t->identifier_len = 0;
t->target_state = TS_PRESENT;
return t;
}
struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
struct esas2r_disc_context *dc,
u8 *ident,
u8 ident_len)
{
struct esas2r_target *t;
esas2r_trace_enter();
if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
esas2r_bugon();
esas2r_trace_exit();
return NULL;
}
/* see if we found this device before. */
t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
if (t == NULL) {
t = a->targetdb + dc->curr_virt_id;
if (ident_len > sizeof(t->identifier)
|| t->target_state == TS_PRESENT) {
esas2r_trace_exit();
return NULL;
}
}
esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
dc->curr_virt_id,
dc->curr_phys_id);
t->block_size = 0;
t->inter_byte = 0;
t->inter_block = 0;
t->virt_targ_id = dc->curr_virt_id;
t->phys_targ_id = dc->curr_phys_id;
t->identifier_len = ident_len;
memcpy(t->identifier, ident, ident_len);
t->flags |= TF_PASS_THRU | TF_USED;
t->target_state = TS_PRESENT;
return t;
}
void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
{
esas2r_trace_enter();
t->target_state = TS_NOT_PRESENT;
esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
esas2r_trace_exit();
}
struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
u64 *sas_addr)
{
struct esas2r_target *t;
for (t = a->targetdb; t < a->targetdb_end; t++)
if (t->sas_addr == *sas_addr)
return t;
return NULL;
}
struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
void *identifier,
u8 ident_len)
{
struct esas2r_target *t;
for (t = a->targetdb; t < a->targetdb_end; t++) {
if (ident_len == t->identifier_len
&& memcmp(&t->identifier[0], identifier,
ident_len) == 0)
return t;
}
return NULL;
}
u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
{
u16 id = target_id + 1;
while (id < ESAS2R_MAX_TARGETS) {
struct esas2r_target *t = a->targetdb + id;
if (t->target_state == TS_PRESENT)
break;
id++;
}
return id;
}
struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
u16 virt_id)
{
struct esas2r_target *t;
for (t = a->targetdb; t < a->targetdb_end; t++) {
if (t->target_state != TS_PRESENT)
continue;
if (t->virt_targ_id == virt_id)
return t;
}
return NULL;
}
u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
{
u16 devcnt = 0;
struct esas2r_target *t;
unsigned long flags;
spin_lock_irqsave(&a->mem_lock, flags);
for (t = a->targetdb; t < a->targetdb_end; t++)
if (t->target_state == TS_PRESENT)
devcnt++;
spin_unlock_irqrestore(&a->mem_lock, flags);
return devcnt;
}
/*
* linux/drivers/scsi/esas2r/esas2r_vda.c
* esas2r driver VDA firmware interface functions
*
* Copyright (c) 2001-2013 ATTO Technology, Inc.
* (mailto:linuxdrivers@attotech.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include "esas2r.h"
static u8 esas2r_vdaioctl_versions[] = {
ATTO_VDA_VER_UNSUPPORTED,
ATTO_VDA_FLASH_VER,
ATTO_VDA_VER_UNSUPPORTED,
ATTO_VDA_VER_UNSUPPORTED,
ATTO_VDA_CLI_VER,
ATTO_VDA_VER_UNSUPPORTED,
ATTO_VDA_CFG_VER,
ATTO_VDA_MGT_VER,
ATTO_VDA_GSV_VER
};
static void clear_vda_request(struct esas2r_request *rq);
static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
struct esas2r_request *rq);
/* Prepare a VDA IOCTL request to be sent to the firmware. */
bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
struct atto_ioctl_vda *vi,
struct esas2r_request *rq,
struct esas2r_sg_context *sgc)
{
u32 datalen = 0;
struct atto_vda_sge *firstsg = NULL;
u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
vi->status = ATTO_STS_SUCCESS;
vi->vda_status = RS_PENDING;
if (vi->function >= vercnt) {
vi->status = ATTO_STS_INV_FUNC;
return false;
}
if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
vi->status = ATTO_STS_INV_VERSION;
return false;
}
if (a->flags & AF_DEGRADED_MODE) {
vi->status = ATTO_STS_DEGRADED;
return false;
}
if (vi->function != VDA_FUNC_SCSI)
clear_vda_request(rq);
rq->vrq->scsi.function = vi->function;
rq->interrupt_cb = esas2r_complete_vda_ioctl;
rq->interrupt_cx = vi;
switch (vi->function) {
case VDA_FUNC_FLASH:
if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
&& vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
&& vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
vi->status = ATTO_STS_INV_FUNC;
return false;
}
if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
datalen = vi->data_length;
rq->vrq->flash.length = cpu_to_le32(datalen);
rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
memcpy(rq->vrq->flash.data.file.file_name,
vi->cmd.flash.data.file.file_name,
sizeof(vi->cmd.flash.data.file.file_name));
firstsg = rq->vrq->flash.data.file.sge;
break;
case VDA_FUNC_CLI:
datalen = vi->data_length;
rq->vrq->cli.cmd_rsp_len =
cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
rq->vrq->cli.length = cpu_to_le32(datalen);
firstsg = rq->vrq->cli.sge;
break;
case VDA_FUNC_MGT:
{
u8 *cmdcurr_offset = sgc->cur_offset
- offsetof(struct atto_ioctl_vda, data)
+ offsetof(struct atto_ioctl_vda, cmd)
+ offsetof(struct atto_ioctl_vda_mgt_cmd,
data);
/*
* build the data payload SGL here first since
* esas2r_sgc_init() will modify the S/G list offset for the
* management SGL (which is built below where the data SGL is
* usually built).
*/
if (vi->data_length) {
u32 payldlen = 0;
if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
|| vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
rq->vrq->mgt.payld_sglst_offset =
(u8)offsetof(struct atto_vda_mgmt_req,
payld_sge);
payldlen = vi->data_length;
datalen = vi->cmd.mgt.data_length;
} else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
|| vi->cmd.mgt.mgt_func ==
VDAMGT_DEV_INFO2_BYADDR) {
datalen = vi->data_length;
cmdcurr_offset = sgc->cur_offset;
} else {
vi->status = ATTO_STS_INV_PARAM;
return false;
}
/* Setup the length so building the payload SGL works */
rq->vrq->mgt.length = cpu_to_le32(datalen);
if (payldlen) {
rq->vrq->mgt.payld_length =
cpu_to_le32(payldlen);
esas2r_sgc_init(sgc, a, rq,
rq->vrq->mgt.payld_sge);
sgc->length = payldlen;
if (!esas2r_build_sg_list(a, rq, sgc)) {
vi->status = ATTO_STS_OUT_OF_RSRC;
return false;
}
}
} else {
datalen = vi->cmd.mgt.data_length;
rq->vrq->mgt.length = cpu_to_le32(datalen);
}
/*
* Now that the payload SGL is built, if any, setup to build
* the management SGL.
*/
firstsg = rq->vrq->mgt.sge;
sgc->cur_offset = cmdcurr_offset;
/* Finish initializing the management request. */
rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
rq->vrq->mgt.dev_index =
cpu_to_le32(vi->cmd.mgt.dev_index);
esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
break;
}
case VDA_FUNC_CFG:
if (vi->data_length
|| vi->cmd.cfg.data_length == 0) {
vi->status = ATTO_STS_INV_PARAM;
return false;
}
if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
vi->status = ATTO_STS_INV_FUNC;
return false;
}
rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
memcpy(&rq->vrq->cfg.data,
&vi->cmd.cfg.data,
vi->cmd.cfg.data_length);
esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
&rq->vrq->cfg.data);
} else {
vi->status = ATTO_STS_INV_FUNC;
return false;
}
break;
case VDA_FUNC_GSV:
vi->cmd.gsv.rsp_len = vercnt;
memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
vercnt);
vi->vda_status = RS_SUCCESS;
break;
default:
vi->status = ATTO_STS_INV_FUNC;
return false;
}
if (datalen) {
esas2r_sgc_init(sgc, a, rq, firstsg);
sgc->length = datalen;
if (!esas2r_build_sg_list(a, rq, sgc)) {
vi->status = ATTO_STS_OUT_OF_RSRC;
return false;
}
}
esas2r_start_request(a, rq);
return true;
}
static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
vi->vda_status = rq->req_stat;
switch (vi->function) {
case VDA_FUNC_FLASH:
if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
|| vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
vi->cmd.flash.data.file.file_size =
le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
break;
case VDA_FUNC_MGT:
vi->cmd.mgt.scan_generation =
rq->func_rsp.mgt_rsp.scan_generation;
vi->cmd.mgt.dev_index = le16_to_cpu(
rq->func_rsp.mgt_rsp.dev_index);
if (vi->data_length == 0)
vi->cmd.mgt.data_length =
le32_to_cpu(rq->func_rsp.mgt_rsp.length);
esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
break;
case VDA_FUNC_CFG:
if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
cfg->data_length =
cpu_to_le32(sizeof(struct atto_vda_cfg_init));
cfg->data.init.vda_version =
le32_to_cpu(rsp->vda_version);
cfg->data.init.fw_build = rsp->fw_build;
sprintf((char *)&cfg->data.init.fw_release,
"%1d.%02d",
(int)LOBYTE(le16_to_cpu(rsp->fw_release)),
(int)HIBYTE(le16_to_cpu(rsp->fw_release)));
if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
cfg->data.init.fw_version =
cfg->data.init.fw_build;
else
cfg->data.init.fw_version =
cfg->data.init.fw_release;
} else {
esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
&vi->cmd.cfg.data);
}
break;
case VDA_FUNC_CLI:
vi->cmd.cli.cmd_rsp_len =
le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
break;
default:
break;
}
}
/* Build a flash VDA request. */
void esas2r_build_flash_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u8 sub_func,
u8 cksum,
u32 addr,
u32 length)
{
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
clear_vda_request(rq);
rq->vrq->scsi.function = VDA_FUNC_FLASH;
if (sub_func == VDA_FLASH_BEGINW
|| sub_func == VDA_FLASH_WRITE
|| sub_func == VDA_FLASH_READ)
vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
data.sge);
vrq->length = cpu_to_le32(length);
vrq->flash_addr = cpu_to_le32(addr);
vrq->checksum = cksum;
vrq->sub_func = sub_func;
}
/* Build a VDA management request. */
void esas2r_build_mgt_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u8 sub_func,
u8 scan_gen,
u16 dev_index,
u32 length,
void *data)
{
struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
clear_vda_request(rq);
rq->vrq->scsi.function = VDA_FUNC_MGT;
vrq->mgt_func = sub_func;
vrq->scan_generation = scan_gen;
vrq->dev_index = cpu_to_le16(dev_index);
vrq->length = cpu_to_le32(length);
if (vrq->length) {
if (a->flags & AF_LEGACY_SGE_MODE) {
vrq->sg_list_offset = (u8)offsetof(
struct atto_vda_mgmt_req, sge);
vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
vrq->sge[0].address = cpu_to_le64(
rq->vrq_md->phys_addr +
sizeof(union atto_vda_req));
} else {
vrq->sg_list_offset = (u8)offsetof(
struct atto_vda_mgmt_req, prde);
vrq->prde[0].ctl_len = cpu_to_le32(length);
vrq->prde[0].address = cpu_to_le64(
rq->vrq_md->phys_addr +
sizeof(union atto_vda_req));
}
}
if (data) {
esas2r_nuxi_mgt_data(sub_func, data);
memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
length);
}
}
/* Build a VDA asyncronous event (AE) request. */
void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
{
struct atto_vda_ae_req *vrq = &rq->vrq->ae;
clear_vda_request(rq);
rq->vrq->scsi.function = VDA_FUNC_AE;
vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
if (a->flags & AF_LEGACY_SGE_MODE) {
vrq->sg_list_offset =
(u8)offsetof(struct atto_vda_ae_req, sge);
vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
vrq->sge[0].address = cpu_to_le64(
rq->vrq_md->phys_addr +
sizeof(union atto_vda_req));
} else {
vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
prde);
vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
vrq->prde[0].address = cpu_to_le64(
rq->vrq_md->phys_addr +
sizeof(union atto_vda_req));
}
}
/* Build a VDA CLI request. */
void esas2r_build_cli_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u32 length,
u32 cmd_rsp_len)
{
struct atto_vda_cli_req *vrq = &rq->vrq->cli;
clear_vda_request(rq);
rq->vrq->scsi.function = VDA_FUNC_CLI;
vrq->length = cpu_to_le32(length);
vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
}
/* Build a VDA IOCTL request. */
void esas2r_build_ioctl_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u32 length,
u8 sub_func)
{
struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
clear_vda_request(rq);
rq->vrq->scsi.function = VDA_FUNC_IOCTL;
vrq->length = cpu_to_le32(length);
vrq->sub_func = sub_func;
vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
}
/* Build a VDA configuration request. */
void esas2r_build_cfg_req(struct esas2r_adapter *a,
struct esas2r_request *rq,
u8 sub_func,
u32 length,
void *data)
{
struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
clear_vda_request(rq);
rq->vrq->scsi.function = VDA_FUNC_CFG;
vrq->sub_func = sub_func;
vrq->length = cpu_to_le32(length);
if (data) {
esas2r_nuxi_cfg_data(sub_func, data);
memcpy(&vrq->data, data, length);
}
}
static void clear_vda_request(struct esas2r_request *rq)
{
u32 handle = rq->vrq->scsi.handle;
memset(rq->vrq, 0, sizeof(*rq->vrq));
rq->vrq->scsi.handle = handle;
rq->req_stat = RS_PENDING;
/* since the data buffer is separate clear that too */
memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
/*
* Setup next and prev pointer in case the request is not going through
* esas2r_start_request().
*/
INIT_LIST_HEAD(&rq->req_list);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment