Commit 7df5358d authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-iscsi'

Manish Rangankar says:

====================
Add QLogic FastLinQ iSCSI (qedi) driver.

This series introduces hardware offload iSCSI initiator driver for the
41000 Series Converged Network Adapters (579xx chip) by Qlogic. The overall
driver design includes a common module ('qed') and protocol specific
dependent modules ('qedi' for iSCSI).

This is an open iSCSI driver, modifications to open iSCSI user components
'iscsid', 'iscsiuio', etc. are required for the solution to work. The user
space changes are also in the process of being submitted.

    https://groups.google.com/forum/#!forum/open-iscsi

The 'qed' common module, under drivers/net/ethernet/qlogic/qed/, is
enhanced with functionality required for the iSCSI support. This series
is based on:

    net tree base: Merge of net and net-next as of 11/29/2016

Changes from RFC v2:

  1. qedi patches are squashed into single patch to prevent krobot
     warning.
  2. Fixed 'hw_p_cpuq' incompatible pointer type.
  3. Fixed sparse incompatible types in comparison expression.
  4. Misc fixes with latest 'checkpatch --strict' option.
  5. Remove int_mode option from MODULE_PARAM.
  6. Prefix all MODULE_PARAM params with qedi_*.
  7. Use CONFIG_QED_ISCSI instead of CONFIG_QEDI
  8. Added bad task mem access fix.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b14945ac 1d6cff4f
...@@ -110,4 +110,7 @@ config QEDE ...@@ -110,4 +110,7 @@ config QEDE
config QED_RDMA config QED_RDMA
bool bool
config QED_ISCSI
bool
endif # NET_VENDOR_QLOGIC endif # NET_VENDOR_QLOGIC
...@@ -6,3 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ ...@@ -6,3 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed-$(CONFIG_QED_RDMA) += qed_roce.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
...@@ -35,6 +35,7 @@ extern const struct qed_common_ops qed_common_ops_pass; ...@@ -35,6 +35,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100 #define QED_WFQ_UNIT 100
#define ISCSI_BDQ_ID(_port_id) (_port_id)
#define QED_WID_SIZE (1024) #define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4) #define QED_PF_DEMS_SIZE (4)
...@@ -382,7 +383,9 @@ struct qed_hwfn { ...@@ -382,7 +383,9 @@ struct qed_hwfn {
/* Protocol related */ /* Protocol related */
bool using_ll2; bool using_ll2;
struct qed_ll2_info *p_ll2_info; struct qed_ll2_info *p_ll2_info;
struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info; struct qed_rdma_info *p_rdma_info;
struct qed_iscsi_info *p_iscsi_info;
struct qed_pf_params pf_params; struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs; bool b_rdma_enabled_in_prs;
...@@ -581,6 +584,8 @@ struct qed_dev { ...@@ -581,6 +584,8 @@ struct qed_dev {
/* Linux specific here */ /* Linux specific here */
struct qede_dev *edev; struct qede_dev *edev;
struct pci_dev *pdev; struct pci_dev *pdev;
u32 flags;
#define QED_FLAG_STORAGE_STARTED (BIT(0))
int msg_enable; int msg_enable;
struct pci_params pci_params; struct pci_params pci_params;
...@@ -594,6 +599,7 @@ struct qed_dev { ...@@ -594,6 +599,7 @@ struct qed_dev {
union { union {
struct qed_common_cb_ops *common; struct qed_common_cb_ops *common;
struct qed_eth_cb_ops *eth; struct qed_eth_cb_ops *eth;
struct qed_iscsi_cb_ops *iscsi;
} protocol_ops; } protocol_ops;
void *ops_cookie; void *ops_cookie;
...@@ -603,7 +609,7 @@ struct qed_dev { ...@@ -603,7 +609,7 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2; struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN]; u8 ll2_mac_address[ETH_ALEN];
#endif #endif
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware; const struct firmware *firmware;
u32 rdma_max_sge; u32 rdma_max_sge;
......
...@@ -29,8 +29,10 @@ ...@@ -29,8 +29,10 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_init_ops.h" #include "qed_init_ops.h"
#include "qed_int.h" #include "qed_int.h"
#include "qed_iscsi.h"
#include "qed_ll2.h" #include "qed_ll2.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_ooo.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
...@@ -146,6 +148,10 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -146,6 +148,10 @@ void qed_resc_free(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
#endif #endif
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
}
qed_iov_free(p_hwfn); qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn); qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
...@@ -402,6 +408,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -402,6 +408,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev) int qed_resc_alloc(struct qed_dev *cdev)
{ {
struct qed_iscsi_info *p_iscsi_info;
struct qed_ooo_info *p_ooo_info;
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info; struct qed_ll2_info *p_ll2_info;
#endif #endif
...@@ -507,6 +515,16 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -507,6 +515,16 @@ int qed_resc_alloc(struct qed_dev *cdev)
p_hwfn->p_ll2_info = p_ll2_info; p_hwfn->p_ll2_info = p_ll2_info;
} }
#endif #endif
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
p_iscsi_info = qed_iscsi_alloc(p_hwfn);
if (!p_iscsi_info)
goto alloc_no_mem;
p_hwfn->p_iscsi_info = p_iscsi_info;
p_ooo_info = qed_ooo_alloc(p_hwfn);
if (!p_ooo_info)
goto alloc_no_mem;
p_hwfn->p_ooo_info = p_ooo_info;
}
/* DMA info initialization */ /* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn); rc = qed_dmae_info_alloc(p_hwfn);
...@@ -560,6 +578,10 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -560,6 +578,10 @@ void qed_resc_setup(struct qed_dev *cdev)
if (p_hwfn->using_ll2) if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
#endif #endif
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
}
} }
} }
......
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/param.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/qed/qed_iscsi_if.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_reg_addr.h"
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
u16 conn_id;
u32 icid;
u32 fw_cid;
u8 layer_code;
u8 offl_flags;
u8 connect_mode;
u32 initial_ack;
dma_addr_t sq_pbl_addr;
struct qed_chain r2tq;
struct qed_chain xhq;
struct qed_chain uhq;
struct tcp_upload_params *tcp_upload_params_virt_addr;
dma_addr_t tcp_upload_params_phys_addr;
struct scsi_terminate_extra_params *queue_cnts_virt_addr;
dma_addr_t queue_cnts_phys_addr;
dma_addr_t syn_phy_addr;
u16 syn_ip_payload_length;
u8 local_mac[6];
u8 remote_mac[6];
u16 vlan_id;
u8 tcp_flags;
u8 ip_version;
u32 remote_ip[4];
u32 local_ip[4];
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
u32 rcv_next;
u32 snd_una;
u32 snd_next;
u32 snd_max;
u32 snd_wnd;
u32 rcv_wnd;
u32 snd_wl1;
u32 cwnd;
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
u32 ka_timeout_delta;
u32 rt_timeout_delta;
u8 dup_ack_cnt;
u8 snd_wnd_probe_cnt;
u8 ka_probe_cnt;
u8 rt_cnt;
u32 flow_label;
u32 ka_timeout;
u32 ka_interval;
u32 max_rt_time;
u32 initial_rcv_wnd;
u8 ttl;
u8 tos_or_tc;
u16 remote_port;
u16 local_port;
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
u8 update_flag;
u8 default_cq;
u32 max_seq_size;
u32 max_recv_pdu_length;
u32 max_send_pdu_length;
u32 first_seq_length;
u32 exp_stat_sn;
u32 stat_sn;
u16 physical_q0;
u16 physical_q1;
u8 abortive_dsconnect;
};
static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr,
void *event_context, iscsi_event_cb_t async_event_cb)
{
struct iscsi_init_ramrod_params *p_ramrod = NULL;
struct scsi_init_func_queues *p_queue = NULL;
struct qed_iscsi_pf_params *p_params = NULL;
struct iscsi_spe_func_init *p_init = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
u32 dval;
u16 val;
u8 i;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_INIT_FUNC,
PROTOCOLID_ISCSI, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_init;
p_init = &p_ramrod->iscsi_init_spe;
p_params = &p_hwfn->pf_params.iscsi_pf_params;
p_queue = &p_init->q_params;
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
val = p_params->half_way_close_timeout;
p_init->half_way_close_timeout = cpu_to_le16(val);
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
p_init->debug_mode.flags = p_params->debug_mode;
DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
p_params->glbl_q_params_addr);
val = p_params->cq_num_entries;
p_queue->cq_num_entries = cpu_to_le16(val);
val = p_params->cmdq_num_entries;
p_queue->cmdq_num_entries = cpu_to_le16(val);
p_queue->num_queues = p_params->num_queues;
dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
p_queue->queue_relative_offset = (u8)dval;
p_queue->cq_sb_pi = p_params->gl_rq_pi;
p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
for (i = 0; i < p_params->num_queues; i++) {
val = p_hwfn->sbs_info[i]->igu_sb_id;
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] =
p_params->bdq_pbl_num_entries[BDQ_ID_RQ];
val = p_params->bdq_xoff_threshold[BDQ_ID_RQ];
p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
val = p_params->bdq_xon_threshold[BDQ_ID_RQ];
p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA],
p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
val = p_params->rq_buffer_size;
p_queue->rq_buffer_size = cpu_to_le16(val);
if (p_params->is_target) {
SET_FIELD(p_queue->q_validity,
SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
SET_FIELD(p_queue->q_validity,
SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
SET_FIELD(p_queue->q_validity,
SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
} else {
SET_FIELD(p_queue->q_validity,
SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
}
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_spe_conn_offload *p_ramrod = NULL;
struct tcp_offload_params_opt2 *p_tcp2 = NULL;
struct tcp_offload_params *p_tcp = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
union qed_qm_pq_params pq_params;
u16 pq0_id = 0, pq1_id = 0;
dma_addr_t r2tq_pbl_addr;
dma_addr_t xhq_pbl_addr;
dma_addr_t uhq_pbl_addr;
int rc = 0;
u32 dval;
u16 wval;
u8 i;
u16 *p;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_conn->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
PROTOCOLID_ISCSI, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
/* Transmission PQ is the first of the PF */
memset(&pq_params, 0, sizeof(pq_params));
pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
p_conn->physical_q0 = cpu_to_le16(pq0_id);
p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id);
/* iSCSI Pure-ACK PQ */
pq_params.iscsi.q_idx = 1;
pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
p_conn->physical_q1 = cpu_to_le16(pq1_id);
p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id);
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr);
xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr);
uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr);
p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack);
p_ramrod->iscsi.flags = p_conn->offl_flags;
p_ramrod->iscsi.default_cq = p_conn->default_cq;
p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn);
if (!GET_FIELD(p_ramrod->iscsi.flags,
ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) {
p_tcp = &p_ramrod->tcp;
p = (u16 *)p_conn->local_mac;
p_tcp->local_mac_addr_hi = swab16(get_unaligned(p));
p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1));
p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2));
p = (u16 *)p_conn->remote_mac;
p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p));
p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
p_tcp->flags = p_conn->tcp_flags;
p_tcp->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i];
p_tcp->remote_ip[i] = cpu_to_le32(dval);
dval = p_conn->local_ip[i];
p_tcp->local_ip[i] = cpu_to_le32(dval);
}
p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold;
p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next);
p_tcp->snd_una = cpu_to_le32(p_conn->snd_una);
p_tcp->snd_next = cpu_to_le32(p_conn->snd_next);
p_tcp->snd_max = cpu_to_le32(p_conn->snd_max);
p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd);
p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd);
p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1);
p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
p_tcp->srtt = cpu_to_le16(p_conn->srtt);
p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
p_tcp->ts_time = cpu_to_le32(p_conn->ts_time);
p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
dval = p_conn->ka_timeout_delta;
p_tcp->ka_timeout_delta = cpu_to_le32(dval);
dval = p_conn->rt_timeout_delta;
p_tcp->rt_timeout_delta = cpu_to_le32(dval);
p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt;
p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt;
p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt;
p_tcp->rt_cnt = p_conn->rt_cnt;
p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
dval = p_conn->initial_rcv_wnd;
p_tcp->initial_rcv_wnd = cpu_to_le32(dval);
p_tcp->ttl = p_conn->ttl;
p_tcp->tos_or_tc = p_conn->tos_or_tc;
p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
p_tcp->local_port = cpu_to_le16(p_conn->local_port);
p_tcp->mss = cpu_to_le16(p_conn->mss);
p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
dval = p_conn->ts_ticks_per_second;
p_tcp->ts_ticks_per_second = cpu_to_le32(dval);
wval = p_conn->da_timeout_value;
p_tcp->da_timeout_value = cpu_to_le16(wval);
p_tcp->ack_frequency = p_conn->ack_frequency;
p_tcp->connect_mode = p_conn->connect_mode;
} else {
p_tcp2 =
&((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp;
p = (u16 *)p_conn->local_mac;
p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p));
p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1));
p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2));
p = (u16 *)p_conn->remote_mac;
p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p));
p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
p_tcp2->flags = p_conn->tcp_flags;
p_tcp2->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i];
p_tcp2->remote_ip[i] = cpu_to_le32(dval);
dval = p_conn->local_ip[i];
p_tcp2->local_ip[i] = cpu_to_le32(dval);
}
p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label);
p_tcp2->ttl = p_conn->ttl;
p_tcp2->tos_or_tc = p_conn->tos_or_tc;
p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port);
p_tcp2->local_port = cpu_to_le16(p_conn->local_port);
p_tcp2->mss = cpu_to_le16(p_conn->mss);
p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale;
p_tcp2->connect_mode = p_conn->connect_mode;
wval = p_conn->syn_ip_payload_length;
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
}
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
u32 dval;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_conn->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
PROTOCOLID_ISCSI, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length;
p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
dval = p_conn->max_send_pdu_length;
p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
dval = p_conn->first_seq_length;
p_ramrod->first_seq_length = cpu_to_le32(dval);
p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_spe_conn_termination *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_conn->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
PROTOCOLID_ISCSI, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->abortive = p_conn->abortive_dsconnect;
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
p_conn->tcp_upload_params_phys_addr);
DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_slow_path_hdr *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_conn->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
PROTOCOLID_ISCSI, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_empty;
p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
SET_FIELD(p_ramrod->flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_spe_func_dstry *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
PROTOCOLID_ISCSI, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
{
return (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(cid, DQ_DEMS_LEGACY);
}
static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
bdq_id);
}
static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
bdq_id);
}
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn)
{
if (!p_conn->queue_cnts_virt_addr)
goto nomem;
memset(p_conn->queue_cnts_virt_addr, 0,
sizeof(*p_conn->queue_cnts_virt_addr));
if (!p_conn->tcp_upload_params_virt_addr)
goto nomem;
memset(p_conn->tcp_upload_params_virt_addr, 0,
sizeof(*p_conn->tcp_upload_params_virt_addr));
if (!p_conn->r2tq.p_virt_addr)
goto nomem;
qed_chain_pbl_zero_mem(&p_conn->r2tq);
if (!p_conn->uhq.p_virt_addr)
goto nomem;
qed_chain_pbl_zero_mem(&p_conn->uhq);
if (!p_conn->xhq.p_virt_addr)
goto nomem;
qed_chain_pbl_zero_mem(&p_conn->xhq);
return 0;
nomem:
return -ENOMEM;
}
static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn **p_out_conn)
{
u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0;
struct scsi_terminate_extra_params *p_q_cnts = NULL;
struct qed_iscsi_pf_params *p_params = NULL;
struct tcp_upload_params *p_tcp = NULL;
struct qed_iscsi_conn *p_conn = NULL;
int rc = 0;
/* Try finding a free connection that can be used */
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
if (!list_empty(&p_hwfn->p_iscsi_info->free_list))
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
struct qed_iscsi_conn, list_entry);
if (p_conn) {
list_del(&p_conn->list_entry);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
*p_out_conn = p_conn;
return 0;
}
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
/* Need to allocate a new connection */
p_params = &p_hwfn->pf_params.iscsi_pf_params;
p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
if (!p_conn)
return -ENOMEM;
p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_q_cnts),
&p_conn->queue_cnts_phys_addr,
GFP_KERNEL);
if (!p_q_cnts)
goto nomem_queue_cnts_param;
p_conn->queue_cnts_virt_addr = p_q_cnts;
p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_tcp),
&p_conn->tcp_upload_params_phys_addr,
GFP_KERNEL);
if (!p_tcp)
goto nomem_upload_param;
p_conn->tcp_upload_params_virt_addr = p_tcp;
r2tq_num_elements = p_params->num_r2tq_pages_in_ring *
QED_CHAIN_PAGE_SIZE / 0x80;
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
r2tq_num_elements, 0x80, &p_conn->r2tq);
if (rc)
goto nomem_r2tq;
uhq_num_elements = p_params->num_uhq_pages_in_ring *
QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
uhq_num_elements,
sizeof(struct iscsi_uhqe), &p_conn->uhq);
if (rc)
goto nomem_uhq;
xhq_num_elements = uhq_num_elements;
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
xhq_num_elements,
sizeof(struct iscsi_xhqe), &p_conn->xhq);
if (rc)
goto nomem;
p_conn->free_on_delete = true;
*p_out_conn = p_conn;
return 0;
nomem:
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
nomem_uhq:
qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
nomem_r2tq:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct tcp_upload_params),
p_conn->tcp_upload_params_virt_addr,
p_conn->tcp_upload_params_phys_addr);
nomem_upload_param:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct scsi_terminate_extra_params),
p_conn->queue_cnts_virt_addr,
p_conn->queue_cnts_phys_addr);
nomem_queue_cnts_param:
kfree(p_conn);
return -ENOMEM;
}
static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_in_conn,
struct qed_iscsi_conn **p_out_conn)
{
struct qed_iscsi_conn *p_conn = NULL;
int rc = 0;
u32 icid;
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
if (rc)
return rc;
/* Use input connection or allocate a new one */
if (p_in_conn)
p_conn = p_in_conn;
else
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
if (!rc)
rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
if (rc) {
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
qed_cxt_release_cid(p_hwfn, icid);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
return rc;
}
p_conn->icid = icid;
p_conn->conn_id = (u16)icid;
p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
*p_out_conn = p_conn;
return rc;
}
static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn)
{
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list);
qed_cxt_release_cid(p_hwfn, p_conn->icid);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
if (!p_iscsi_info)
return NULL;
INIT_LIST_HEAD(&p_iscsi_info->free_list);
return p_iscsi_info;
}
void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info)
{
spin_lock_init(&p_iscsi_info->lock);
}
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info)
{
kfree(p_iscsi_info);
}
static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iscsi_stats *p_stats)
{
struct tstorm_iscsi_stats_drv tstats;
u32 tstats_addr;
memset(&tstats, 0, sizeof(tstats));
tstats_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
p_stats->iscsi_rx_bytes_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
p_stats->iscsi_rx_packet_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
p_stats->iscsi_cmdq_threshold_cnt =
le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
p_stats->iscsi_rq_threshold_cnt =
le32_to_cpu(tstats.iscsi_rq_threshold_cnt);
p_stats->iscsi_immq_threshold_cnt =
le32_to_cpu(tstats.iscsi_immq_threshold_cnt);
}
static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iscsi_stats *p_stats)
{
struct mstorm_iscsi_stats_drv mstats;
u32 mstats_addr;
memset(&mstats, 0, sizeof(mstats));
mstats_addr = BAR0_MAP_REG_MSDM_RAM +
MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats));
p_stats->iscsi_rx_dropped_pdus_task_not_valid =
HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid);
}
static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iscsi_stats *p_stats)
{
struct ustorm_iscsi_stats_drv ustats;
u32 ustats_addr;
memset(&ustats, 0, sizeof(ustats));
ustats_addr = BAR0_MAP_REG_USDM_RAM +
USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
p_stats->iscsi_rx_data_pdu_cnt =
HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt);
p_stats->iscsi_rx_r2t_pdu_cnt =
HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt);
p_stats->iscsi_rx_total_pdu_cnt =
HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt);
}
static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iscsi_stats *p_stats)
{
struct xstorm_iscsi_stats_drv xstats;
u32 xstats_addr;
memset(&xstats, 0, sizeof(xstats));
xstats_addr = BAR0_MAP_REG_XSDM_RAM +
XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats));
p_stats->iscsi_tx_go_to_slow_start_event_cnt =
HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt);
p_stats->iscsi_tx_fast_retransmit_event_cnt =
HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt);
}
static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iscsi_stats *p_stats)
{
struct ystorm_iscsi_stats_drv ystats;
u32 ystats_addr;
memset(&ystats, 0, sizeof(ystats));
ystats_addr = BAR0_MAP_REG_YSDM_RAM +
YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats));
p_stats->iscsi_tx_data_pdu_cnt =
HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt);
p_stats->iscsi_tx_r2t_pdu_cnt =
HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt);
p_stats->iscsi_tx_total_pdu_cnt =
HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt);
}
static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iscsi_stats *p_stats)
{
struct pstorm_iscsi_stats_drv pstats;
u32 pstats_addr;
memset(&pstats, 0, sizeof(pstats));
pstats_addr = BAR0_MAP_REG_PSDM_RAM +
PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
p_stats->iscsi_tx_bytes_cnt =
HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt);
p_stats->iscsi_tx_packet_cnt =
HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt);
}
static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
struct qed_iscsi_stats *stats)
{
struct qed_ptt *p_ptt;
memset(stats, 0, sizeof(*stats));
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
return -EAGAIN;
}
_qed_iscsi_get_tstats(p_hwfn, p_ptt, stats);
_qed_iscsi_get_mstats(p_hwfn, p_ptt, stats);
_qed_iscsi_get_ustats(p_hwfn, p_ptt, stats);
_qed_iscsi_get_xstats(p_hwfn, p_ptt, stats);
_qed_iscsi_get_ystats(p_hwfn, p_ptt, stats);
_qed_iscsi_get_pstats(p_hwfn, p_ptt, stats);
qed_ptt_release(p_hwfn, p_ptt);
return 0;
}
struct qed_hash_iscsi_con {
struct hlist_node node;
struct qed_iscsi_conn *con;
};
static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
struct qed_dev_iscsi_info *info)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
int rc;
memset(info, 0, sizeof(*info));
rc = qed_fill_dev_info(cdev, &info->common);
info->primary_dbq_rq_addr =
qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
info->secondary_bdq_rq_addr =
qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
return rc;
}
static void qed_register_iscsi_ops(struct qed_dev *cdev,
struct qed_iscsi_cb_ops *ops, void *cookie)
{
cdev->protocol_ops.iscsi = ops;
cdev->ops_cookie = cookie;
}
static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev,
u32 handle)
{
struct qed_hash_iscsi_con *hash_con = NULL;
if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
return NULL;
hash_for_each_possible(cdev->connections, hash_con, node, handle) {
if (hash_con->con->icid == handle)
break;
}
if (!hash_con || (hash_con->con->icid != handle))
return NULL;
return hash_con;
}
static int qed_iscsi_stop(struct qed_dev *cdev)
{
int rc;
if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
DP_NOTICE(cdev, "iscsi already stopped\n");
return 0;
}
if (!hash_empty(cdev->connections)) {
DP_NOTICE(cdev,
"Can't stop iscsi - not all connections were returned\n");
return -EINVAL;
}
/* Stop the iscsi */
rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev),
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
return rc;
}
static int qed_iscsi_start(struct qed_dev *cdev,
struct qed_iscsi_tid *tasks,
void *event_context,
iscsi_event_cb_t async_event_cb)
{
int rc;
struct qed_tid_mem *tid_info;
if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
DP_NOTICE(cdev, "iscsi already started;\n");
return 0;
}
rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev),
QED_SPQ_MODE_EBLOCK, NULL, event_context,
async_event_cb);
if (rc) {
DP_NOTICE(cdev, "Failed to start iscsi\n");
return rc;
}
cdev->flags |= QED_FLAG_STORAGE_STARTED;
hash_init(cdev->connections);
if (!tasks)
return 0;
tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
if (!tid_info) {
qed_iscsi_stop(cdev);
return -ENOMEM;
}
rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev),
tid_info);
if (rc) {
DP_NOTICE(cdev, "Failed to gather task information\n");
qed_iscsi_stop(cdev);
kfree(tid_info);
return rc;
}
/* Fill task information */
tasks->size = tid_info->tid_size;
tasks->num_tids_per_block = tid_info->num_tids_per_block;
memcpy(tasks->blocks, tid_info->blocks,
MAX_TID_BLOCKS_ISCSI * sizeof(u8 *));
kfree(tid_info);
return 0;
}
static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
u32 *handle,
u32 *fw_cid, void __iomem **p_doorbell)
{
struct qed_hash_iscsi_con *hash_con;
int rc;
/* Allocate a hashed connection */
hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
if (!hash_con)
return -ENOMEM;
/* Acquire the connection */
rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
&hash_con->con);
if (rc) {
DP_NOTICE(cdev, "Failed to acquire Connection\n");
kfree(hash_con);
return rc;
}
/* Added the connection to hash table */
*handle = hash_con->con->icid;
*fw_cid = hash_con->con->fw_cid;
hash_add(cdev->connections, &hash_con->node, *handle);
if (p_doorbell)
*p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev),
*handle);
return 0;
}
static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle)
{
struct qed_hash_iscsi_con *hash_con;
hash_con = qed_iscsi_get_hash(cdev, handle);
if (!hash_con) {
DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
handle);
return -EINVAL;
}
hlist_del(&hash_con->node);
qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
kfree(hash_con);
return 0;
}
static int qed_iscsi_offload_conn(struct qed_dev *cdev,
u32 handle,
struct qed_iscsi_params_offload *conn_info)
{
struct qed_hash_iscsi_con *hash_con;
struct qed_iscsi_conn *con;
hash_con = qed_iscsi_get_hash(cdev, handle);
if (!hash_con) {
DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
handle);
return -EINVAL;
}
/* Update the connection with information from the params */
con = hash_con->con;
ether_addr_copy(con->local_mac, conn_info->src.mac);
ether_addr_copy(con->remote_mac, conn_info->dst.mac);
memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
con->local_port = conn_info->src.port;
con->remote_port = conn_info->dst.port;
con->layer_code = conn_info->layer_code;
con->sq_pbl_addr = conn_info->sq_pbl_addr;
con->initial_ack = conn_info->initial_ack;
con->vlan_id = conn_info->vlan_id;
con->tcp_flags = conn_info->tcp_flags;
con->ip_version = conn_info->ip_version;
con->default_cq = conn_info->default_cq;
con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
con->dup_ack_theshold = conn_info->dup_ack_theshold;
con->rcv_next = conn_info->rcv_next;
con->snd_una = conn_info->snd_una;
con->snd_next = conn_info->snd_next;
con->snd_max = conn_info->snd_max;
con->snd_wnd = conn_info->snd_wnd;
con->rcv_wnd = conn_info->rcv_wnd;
con->snd_wl1 = conn_info->snd_wl1;
con->cwnd = conn_info->cwnd;
con->ss_thresh = conn_info->ss_thresh;
con->srtt = conn_info->srtt;
con->rtt_var = conn_info->rtt_var;
con->ts_time = conn_info->ts_time;
con->ts_recent = conn_info->ts_recent;
con->ts_recent_age = conn_info->ts_recent_age;
con->total_rt = conn_info->total_rt;
con->ka_timeout_delta = conn_info->ka_timeout_delta;
con->rt_timeout_delta = conn_info->rt_timeout_delta;
con->dup_ack_cnt = conn_info->dup_ack_cnt;
con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt;
con->ka_probe_cnt = conn_info->ka_probe_cnt;
con->rt_cnt = conn_info->rt_cnt;
con->flow_label = conn_info->flow_label;
con->ka_timeout = conn_info->ka_timeout;
con->ka_interval = conn_info->ka_interval;
con->max_rt_time = conn_info->max_rt_time;
con->initial_rcv_wnd = conn_info->initial_rcv_wnd;
con->ttl = conn_info->ttl;
con->tos_or_tc = conn_info->tos_or_tc;
con->remote_port = conn_info->remote_port;
con->local_port = conn_info->local_port;
con->mss = conn_info->mss;
con->snd_wnd_scale = conn_info->snd_wnd_scale;
con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
con->da_timeout_value = conn_info->da_timeout_value;
con->ack_frequency = conn_info->ack_frequency;
/* Set default values on other connection fields */
con->offl_flags = 0x1;
return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_update_conn(struct qed_dev *cdev,
u32 handle,
struct qed_iscsi_params_update *conn_info)
{
struct qed_hash_iscsi_con *hash_con;
struct qed_iscsi_conn *con;
hash_con = qed_iscsi_get_hash(cdev, handle);
if (!hash_con) {
DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
handle);
return -EINVAL;
}
/* Update the connection with information from the params */
con = hash_con->con;
con->update_flag = conn_info->update_flag;
con->max_seq_size = conn_info->max_seq_size;
con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
con->max_send_pdu_length = conn_info->max_send_pdu_length;
con->first_seq_length = conn_info->first_seq_length;
con->exp_stat_sn = conn_info->exp_stat_sn;
return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle)
{
struct qed_hash_iscsi_con *hash_con;
hash_con = qed_iscsi_get_hash(cdev, handle);
if (!hash_con) {
DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
handle);
return -EINVAL;
}
return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev),
hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
u32 handle, u8 abrt_conn)
{
struct qed_hash_iscsi_con *hash_con;
hash_con = qed_iscsi_get_hash(cdev, handle);
if (!hash_con) {
DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
handle);
return -EINVAL;
}
hash_con->con->abortive_dsconnect = abrt_conn;
return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev),
hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
{
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.common = &qed_common_ops_pass,
.ll2 = &qed_ll2_ops_pass,
.fill_dev_info = &qed_fill_iscsi_dev_info,
.register_ops = &qed_register_iscsi_ops,
.start = &qed_iscsi_start,
.stop = &qed_iscsi_stop,
.acquire_conn = &qed_iscsi_acquire_conn,
.release_conn = &qed_iscsi_release_conn,
.offload_conn = &qed_iscsi_offload_conn,
.update_conn = &qed_iscsi_update_conn,
.destroy_conn = &qed_iscsi_destroy_conn,
.clear_sq = &qed_iscsi_clear_conn_sq,
.get_stats = &qed_iscsi_stats,
};
const struct qed_iscsi_ops *qed_get_iscsi_ops()
{
return &qed_iscsi_ops_pass;
}
EXPORT_SYMBOL(qed_get_iscsi_ops);
void qed_put_iscsi_ops(void)
{
}
EXPORT_SYMBOL(qed_put_iscsi_ops);
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_ISCSI_H
#define _QED_ISCSI_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/qed/tcp_common.h>
#include <linux/qed/qed_iscsi_if.h>
#include <linux/qed/qed_chain.h>
#include "qed.h"
#include "qed_hsi.h"
#include "qed_mcp.h"
#include "qed_sp.h"
struct qed_iscsi_info {
spinlock_t lock; /* Connection resources. */
struct list_head free_list;
u16 max_num_outstanding_tasks;
void *event_context;
iscsi_event_cb_t event_cb;
};
#ifdef CONFIG_QED_LL2
extern const struct qed_ll2_ops qed_ll2_ops_pass;
#endif
#if IS_ENABLED(CONFIG_QED_ISCSI)
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "qed_int.h" #include "qed_int.h"
#include "qed_ll2.h" #include "qed_ll2.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_ooo.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_roce.h" #include "qed_roce.h"
...@@ -296,13 +297,22 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -296,13 +297,22 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry); list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq); b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
} else {
p_tx->cur_completing_packet = *p_pkt; p_tx->cur_completing_packet = *p_pkt;
p_tx->cur_completing_bd_idx = 1; p_tx->cur_completing_bd_idx = 1;
b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag; tx_frag = p_pkt->bds_set[0].tx_frag;
if (p_ll2_conn->gsi_enable) if (p_ll2_conn->gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn, qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id, p_ll2_conn->
my_id,
p_pkt->cookie, p_pkt->cookie,
tx_frag, tx_frag,
b_last_frag, b_last_frag,
...@@ -314,7 +324,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -314,7 +324,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
tx_frag, tx_frag,
b_last_frag, b_last_frag,
b_last_packet); b_last_packet);
}
} }
} }
...@@ -540,13 +550,458 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -540,13 +550,458 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
} else {
rx_buf_addr = p_pkt->rx_buf_addr; rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie; cookie = p_pkt->cookie;
b_last = list_empty(&p_rx->active_descq); b_last = list_empty(&p_rx->active_descq);
} }
}
}
#if IS_ENABLED(CONFIG_QED_ISCSI)
static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
{
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
return bd_flags;
} }
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
u16 packet_length = 0, parse_flags = 0, vlan = 0;
struct qed_ll2_rx_packet *p_pkt = NULL;
u32 num_ooo_add_to_peninsula = 0, cid;
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
struct qed_ooo_buffer *p_buffer;
struct ooo_opaque *iscsi_ooo;
u8 placement_offset = 0;
u8 cqe_type;
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
if (cq_new_idx == cq_old_idx)
return 0;
while (cq_new_idx != cq_old_idx) {
struct core_rx_fast_path_cqe *p_cqe_fp;
cqe = qed_chain_consume(&p_rx->rcq_chain);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
cqe_type = cqe->rx_cqe_sp.type;
if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
cqe_type);
return -EINVAL;
}
p_cqe_fp = &cqe->rx_cqe_fp;
placement_offset = p_cqe_fp->placement_offset;
parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
packet_length = le16_to_cpu(p_cqe_fp->packet_length);
vlan = le16_to_cpu(p_cqe_fp->vlan);
iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
iscsi_ooo);
cid = le32_to_cpu(iscsi_ooo->cid);
/* Process delete isle first */
if (iscsi_ooo->drop_size)
qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
iscsi_ooo->drop_isle,
iscsi_ooo->drop_size);
if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
continue;
/* Now process create/add/join isles */
if (list_empty(&p_rx->active_descq)) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX chain has no submitted buffers\n"
);
return -EIO;
}
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
if (!p_pkt) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX packet is not valid\n");
return -EIO;
}
list_del(&p_pkt->list_entry);
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
p_buffer->packet_length = packet_length;
p_buffer->parse_flags = parse_flags;
p_buffer->vlan = vlan;
p_buffer->placement_offset = placement_offset;
qed_chain_consume(&p_rx->rxq_chain);
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
switch (iscsi_ooo->ooo_opcode) {
case TCP_EVENT_ADD_NEW_ISLE:
qed_ooo_add_new_isle(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle,
p_buffer);
break;
case TCP_EVENT_ADD_ISLE_RIGHT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle,
p_buffer,
QED_OOO_RIGHT_BUF);
break;
case TCP_EVENT_ADD_ISLE_LEFT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle,
p_buffer,
QED_OOO_LEFT_BUF);
break;
case TCP_EVENT_JOIN:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle +
1,
p_buffer,
QED_OOO_LEFT_BUF);
qed_ooo_join_isles(p_hwfn,
p_hwfn->p_ooo_info,
cid, iscsi_ooo->ooo_isle);
break;
case TCP_EVENT_ADD_PEN:
num_ooo_add_to_peninsula++;
qed_ooo_put_ready_buffer(p_hwfn,
p_hwfn->p_ooo_info,
p_buffer, true);
break;
}
} else {
DP_NOTICE(p_hwfn,
"Unexpected event (%d) TX OOO completion\n",
iscsi_ooo->ooo_opcode);
}
}
return 0;
}
static void
qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
int rc;
u16 l4_hdr_offset_w;
dma_addr_t first_frag;
u16 parse_flags;
u8 bd_flags;
/* Submit Tx buffers here */
while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
l4_hdr_offset_w = 0;
bd_flags = 0;
first_frag = p_buffer->rx_buffer_phys_addr +
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
l4_hdr_offset_w,
p_ll2_conn->tx_dest, 0,
first_frag,
p_buffer->packet_length,
p_buffer, true);
if (rc) {
qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer, false);
break;
}
}
}
static void
qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
int rc;
while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
rc = qed_ll2_post_rx_buffer(p_hwfn,
p_ll2_conn->my_id,
p_buffer->rx_buffer_phys_addr,
0, p_buffer, true);
if (rc) {
qed_ooo_put_free_buffer(p_hwfn,
p_hwfn->p_ooo_info, p_buffer);
break;
}
}
}
static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
{
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
if (rc)
return rc;
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
return 0;
}
static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
{
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct qed_ll2_tx_packet *p_pkt = NULL;
struct qed_ooo_buffer *p_buffer;
bool b_dont_submit_rx = false;
u16 new_idx = 0, num_bds = 0;
int rc;
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
if (!num_bds)
return 0;
while (num_bds) {
if (list_empty(&p_tx->active_descq))
return -EINVAL;
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
if (!p_pkt)
return -EINVAL;
if (p_pkt->bd_used != 1) {
DP_NOTICE(p_hwfn,
"Unexpectedly many BDs(%d) in TX OOO completion\n",
p_pkt->bd_used);
return -EINVAL;
}
list_del(&p_pkt->list_entry);
num_bds--;
p_tx->bds_idx++;
qed_chain_consume(&p_tx->txq_chain);
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
if (b_dont_submit_rx) {
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
continue;
}
rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
p_buffer->rx_buffer_phys_addr, 0,
p_buffer, true);
if (rc != 0) {
qed_ooo_put_free_buffer(p_hwfn,
p_hwfn->p_ooo_info, p_buffer);
b_dont_submit_rx = true;
}
}
qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
return 0;
}
static int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
u16 rx_num_ooo_buffers, u16 mtu)
{
struct qed_ooo_buffer *p_buf = NULL;
void *p_virt;
u16 buf_idx;
int rc = 0;
if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc;
if (!rx_num_ooo_buffers)
return -EINVAL;
for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
if (!p_buf) {
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
ETH_CACHE_LINE_SIZE - 1) &
~(ETH_CACHE_LINE_SIZE - 1);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_buf->rx_buffer_size,
&p_buf->rx_buffer_phys_addr,
GFP_KERNEL);
if (!p_virt) {
kfree(p_buf);
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_virt_addr = p_virt;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
}
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
rx_num_ooo_buffers, p_buf->rx_buffer_size);
out:
return rc;
}
static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr);
kfree(p_buffer);
}
}
static void qed_ll2_stop_ooo(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
*handle);
qed_ll2_terminate_connection(hwfn, *handle);
qed_ll2_release_connection(hwfn, *handle);
*handle = QED_LL2_UNUSED_HANDLE;
}
static int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
struct qed_ll2_info *ll2_info;
int rc;
ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
if (!ll2_info)
return -ENOMEM;
ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
ll2_info->mtu = params->mtu;
ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info->tx_tc = OOO_LB_TC;
ll2_info->tx_dest = CORE_TX_DEST_LB;
rc = qed_ll2_acquire_connection(hwfn, ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
handle);
kfree(ll2_info);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
}
rc = qed_ll2_establish_connection(hwfn, *handle);
if (rc) {
DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
goto fail;
}
return 0;
fail:
qed_ll2_release_connection(hwfn, *handle);
out:
*handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
void *p_cookie) { return -EINVAL; }
static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
void *p_cookie) { return -EINVAL; }
static inline int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
static inline void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) { return; }
static inline void
qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) { return; }
static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{ return -EINVAL; }
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
u8 action_on_error) u8 action_on_error)
...@@ -588,7 +1043,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -588,7 +1043,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = 1; p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
...@@ -619,6 +1075,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -619,6 +1075,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0; return 0;
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_ll2_conn->cid; init_data.cid = p_ll2_conn->cid;
...@@ -636,7 +1097,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -636,7 +1097,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index; p_ramrod->sb_index = p_tx->tx_sb_index;
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
p_ll2_conn->tx_stats_en = 1;
p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id; p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
...@@ -860,9 +1320,19 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -860,9 +1320,19 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
goto q_allocate_fail; goto q_allocate_fail;
rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
rx_num_desc * 2, p_params->mtu);
if (rc)
goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */ /* Register callbacks for the Rx/Tx queues */
if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion;
} else {
comp_rx_cb = qed_ll2_rxq_completion; comp_rx_cb = qed_ll2_rxq_completion;
comp_tx_cb = qed_ll2_txq_completion; comp_tx_cb = qed_ll2_txq_completion;
}
if (rx_num_desc) { if (rx_num_desc) {
qed_int_register_cb(p_hwfn, comp_rx_cb, qed_int_register_cb(p_hwfn, comp_rx_cb,
...@@ -975,6 +1445,8 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -975,6 +1445,8 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
return rc; return rc;
} }
...@@ -1213,6 +1685,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1213,6 +1685,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan, u16 vlan,
u8 bd_flags, u8 bd_flags,
u16 l4_hdr_offset_w, u16 l4_hdr_offset_w,
enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor, enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag, dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw) u16 first_frag_len, void *cookie, u8 notify_fw)
...@@ -1222,6 +1695,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1222,6 +1695,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
enum core_roce_flavor_type roce_flavor; enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx; struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain; struct qed_chain *p_tx_chain;
enum core_tx_dest tx_dest;
unsigned long flags; unsigned long flags;
int rc = 0; int rc = 0;
...@@ -1252,6 +1726,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1252,6 +1726,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
CORE_TX_DEST_LB;
if (qed_roce_flavor == QED_LL2_ROCE) { if (qed_roce_flavor == QED_LL2_ROCE) {
roce_flavor = CORE_ROCE; roce_flavor = CORE_ROCE;
} else if (qed_roce_flavor == QED_LL2_RROCE) { } else if (qed_roce_flavor == QED_LL2_RROCE) {
...@@ -1266,7 +1742,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1266,7 +1742,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
num_of_bds, first_frag, num_of_bds, first_frag,
first_frag_len, cookie, notify_fw); first_frag_len, cookie, notify_fw);
qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
num_of_bds, CORE_TX_DEST_NW, num_of_bds, tx_dest,
vlan, bd_flags, l4_hdr_offset_w, vlan, bd_flags, l4_hdr_offset_w,
roce_flavor, roce_flavor,
first_frag, first_frag_len); first_frag, first_frag_len);
...@@ -1341,6 +1817,9 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -1341,6 +1817,9 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle); qed_ll2_rxq_flush(p_hwfn, connection_handle);
} }
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
return rc; return rc;
} }
...@@ -1371,6 +1850,8 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -1371,6 +1850,8 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
mutex_lock(&p_ll2_conn->mutex); mutex_lock(&p_ll2_conn->mutex);
p_ll2_conn->b_active = false; p_ll2_conn->b_active = false;
mutex_unlock(&p_ll2_conn->mutex); mutex_unlock(&p_ll2_conn->mutex);
...@@ -1517,6 +1998,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -1517,6 +1998,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
enum qed_ll2_conn_type conn_type; enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
int rc, i; int rc, i;
u8 gsi_enable = 1;
/* Initialize LL2 locks & lists */ /* Initialize LL2 locks & lists */
INIT_LIST_HEAD(&cdev->ll2->list); INIT_LIST_HEAD(&cdev->ll2->list);
...@@ -1548,6 +2030,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -1548,6 +2030,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
case QED_PCI_ISCSI: case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI; conn_type = QED_LL2_TYPE_ISCSI;
gsi_enable = 0;
break; break;
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE; conn_type = QED_LL2_TYPE_ROCE;
...@@ -1564,7 +2047,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -1564,7 +2047,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info.tx_tc = 0; ll2_info.tx_tc = 0;
ll2_info.tx_dest = CORE_TX_DEST_NW; ll2_info.tx_dest = CORE_TX_DEST_NW;
ll2_info.gsi_enable = 1; ll2_info.gsi_enable = gsi_enable;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
...@@ -1611,6 +2094,17 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -1611,6 +2094,17 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate; goto release_terminate;
} }
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params);
if (rc) {
DP_INFO(cdev,
"Failed to initialize the OOO LL2 queue\n");
goto release_terminate;
}
}
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (!p_ptt) { if (!p_ptt) {
DP_INFO(cdev, "Failed to acquire PTT\n"); DP_INFO(cdev, "Failed to acquire PTT\n");
...@@ -1660,6 +2154,10 @@ static int qed_ll2_stop(struct qed_dev *cdev) ...@@ -1660,6 +2154,10 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address); eth_zero_addr(cdev->ll2_mac_address);
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
cdev->ll2->handle); cdev->ll2->handle);
if (rc) if (rc)
...@@ -1714,7 +2212,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) ...@@ -1714,7 +2212,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
cdev->ll2->handle, cdev->ll2->handle,
1 + skb_shinfo(skb)->nr_frags, 1 + skb_shinfo(skb)->nr_frags,
vlan, flags, 0, 0 /* RoCE FLAVOR */, vlan, flags, 0, QED_LL2_TX_DEST_NW,
0 /* RoCE FLAVOR */,
mapping, skb->len, skb, 1); mapping, skb->len, skb, 1);
if (rc) if (rc)
goto err; goto err;
......
...@@ -41,6 +41,12 @@ enum qed_ll2_conn_type { ...@@ -41,6 +41,12 @@ enum qed_ll2_conn_type {
MAX_QED_LL2_RX_CONN_TYPE MAX_QED_LL2_RX_CONN_TYPE
}; };
enum qed_ll2_tx_dest {
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
QED_LL2_TX_DEST_MAX
};
struct qed_ll2_rx_packet { struct qed_ll2_rx_packet {
struct list_head list_entry; struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd; struct core_rx_bd_with_buff_len *rxq_bd;
...@@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, ...@@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
* @param l4_hdr_offset_w L4 Header Offset from start of packet * @param l4_hdr_offset_w L4 Header Offset from start of packet
* (in words). This is needed if both l4_csum * (in words). This is needed if both l4_csum
* and ipv6_ext are set * and ipv6_ext are set
* @param e_tx_dest indicates if the packet is to be transmitted via
* loopback or to the network
* @param first_frag * @param first_frag
* @param first_frag_len * @param first_frag_len
* @param cookie * @param cookie
...@@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan, u16 vlan,
u8 bd_flags, u8 bd_flags,
u16 l4_hdr_offset_w, u16 l4_hdr_offset_w,
enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor, enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag, dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw); u16 first_frag_len, void *cookie, u8 notify_fw);
......
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_ooo.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
struct qed_ooo_info
*p_ooo_info,
u32 cid)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
list_for_each_entry(p_archipelago,
&p_ooo_info->archipelagos_list, list_entry) {
if (p_archipelago->cid == cid)
return p_archipelago;
}
return NULL;
}
static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 isle)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_isle = NULL;
u8 the_num_of_isle = 1;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
if (!p_archipelago) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return NULL;
}
list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
if (the_num_of_isle == isle)
return p_isle;
the_num_of_isle++;
}
return NULL;
}
void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe)
{
struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
if (p_history->head_idx == p_history->num_of_cqes)
p_history->head_idx = 0;
p_history->p_cqes[p_history->head_idx] = *p_cqe;
p_history->head_idx++;
}
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ooo_info *p_ooo_info;
u16 max_num_archipelagos = 0;
u16 max_num_isles = 0;
u32 i;
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n");
return NULL;
}
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown amount of connections\n");
return NULL;
}
p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
if (!p_ooo_info)
return NULL;
INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
sizeof(struct qed_ooo_isle),
GFP_KERNEL);
if (!p_ooo_info->p_isles_mem)
goto no_isles_mem;
for (i = 0; i < max_num_isles; i++) {
INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
&p_ooo_info->free_isles_list);
}
p_ooo_info->p_archipelagos_mem =
kcalloc(max_num_archipelagos,
sizeof(struct qed_ooo_archipelago),
GFP_KERNEL);
if (!p_ooo_info->p_archipelagos_mem)
goto no_archipelagos_mem;
for (i = 0; i < max_num_archipelagos; i++) {
INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
&p_ooo_info->free_archipelagos_list);
}
p_ooo_info->ooo_history.p_cqes =
kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
sizeof(struct ooo_opaque),
GFP_KERNEL);
if (!p_ooo_info->ooo_history.p_cqes)
goto no_history_mem;
return p_ooo_info;
no_history_mem:
kfree(p_ooo_info->p_archipelagos_mem);
no_archipelagos_mem:
kfree(p_ooo_info->p_isles_mem);
no_isles_mem:
kfree(p_ooo_info);
return NULL;
}
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid)
{
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
bool b_found = false;
if (list_empty(&p_ooo_info->archipelagos_list))
return;
list_for_each_entry(p_archipelago,
&p_ooo_info->archipelagos_list, list_entry) {
if (p_archipelago->cid == cid) {
list_del(&p_archipelago->list_entry);
b_found = true;
break;
}
}
if (!b_found)
return;
while (!list_empty(&p_archipelago->isles_list)) {
p_isle = list_first_entry(&p_archipelago->isles_list,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
while (!list_empty(&p_isle->buffers_list)) {
p_buffer = list_first_entry(&p_isle->buffers_list,
struct qed_ooo_buffer,
list_entry);
if (!p_buffer)
break;
list_del(&p_buffer->list_entry);
list_add_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
list_add_tail(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_archipelago *p_arch;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
while (!list_empty(&p_ooo_info->archipelagos_list)) {
p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
struct qed_ooo_archipelago,
list_entry);
list_del(&p_arch->list_entry);
while (!list_empty(&p_arch->isles_list)) {
p_isle = list_first_entry(&p_arch->isles_list,
struct qed_ooo_isle,
list_entry);
list_del(&p_isle->list_entry);
while (!list_empty(&p_isle->buffers_list)) {
p_buffer =
list_first_entry(&p_isle->buffers_list,
struct qed_ooo_buffer,
list_entry);
if (!p_buffer)
break;
list_del(&p_buffer->list_entry);
list_add_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
list_add_tail(&p_arch->list_entry,
&p_ooo_info->free_archipelagos_list);
}
if (!list_empty(&p_ooo_info->ready_buffers_list))
list_splice_tail_init(&p_ooo_info->ready_buffers_list,
&p_ooo_info->free_buffers_list);
}
void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
{
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
memset(p_ooo_info->ooo_history.p_cqes, 0,
p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque));
p_ooo_info->ooo_history.head_idx = 0;
}
void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_buffer *p_buffer;
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
while (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
if (!p_buffer)
break;
list_del(&p_buffer->list_entry);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr);
kfree(p_buffer);
}
kfree(p_ooo_info->p_isles_mem);
kfree(p_ooo_info->p_archipelagos_mem);
kfree(p_ooo_info->ooo_history.p_cqes);
kfree(p_ooo_info);
}
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer)
{
list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
}
struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_buffer *p_buffer = NULL;
if (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
list_del(&p_buffer->list_entry);
}
return p_buffer;
}
void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer, u8 on_tail)
{
if (on_tail)
list_add_tail(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
else
list_add(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
}
struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_buffer *p_buffer = NULL;
if (!list_empty(&p_ooo_info->ready_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
struct qed_ooo_buffer, list_entry);
list_del(&p_buffer->list_entry);
}
return p_buffer;
}
void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_isle = NULL;
u8 isle_idx;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
if (!p_isle) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
drop_isle, cid);
return;
}
if (list_empty(&p_isle->buffers_list))
DP_NOTICE(p_hwfn,
"Isle %d is empty(cid %d)\n", drop_isle, cid);
else
list_splice_tail_init(&p_isle->buffers_list,
&p_ooo_info->free_buffers_list);
list_del(&p_isle->list_entry);
p_ooo_info->cur_isles_number--;
list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
}
if (list_empty(&p_archipelago->isles_list)) {
list_del(&p_archipelago->list_entry);
list_add(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
}
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_prev_isle = NULL;
struct qed_ooo_isle *p_isle = NULL;
if (ooo_isle > 1) {
p_prev_isle = qed_ooo_seek_isle(p_hwfn,
p_ooo_info, cid, ooo_isle - 1);
if (!p_prev_isle) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
ooo_isle - 1, cid);
return;
}
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
if (!p_archipelago && (ooo_isle != 1)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return;
}
if (!list_empty(&p_ooo_info->free_isles_list)) {
p_isle = list_first_entry(&p_ooo_info->free_isles_list,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
if (!list_empty(&p_isle->buffers_list)) {
DP_NOTICE(p_hwfn, "Free isle is not empty\n");
INIT_LIST_HEAD(&p_isle->buffers_list);
}
} else {
DP_NOTICE(p_hwfn, "No more free isles\n");
return;
}
if (!p_archipelago &&
!list_empty(&p_ooo_info->free_archipelagos_list)) {
p_archipelago =
list_first_entry(&p_ooo_info->free_archipelagos_list,
struct qed_ooo_archipelago, list_entry);
list_del(&p_archipelago->list_entry);
if (!list_empty(&p_archipelago->isles_list)) {
DP_NOTICE(p_hwfn,
"Free OOO connection is not empty\n");
INIT_LIST_HEAD(&p_archipelago->isles_list);
}
p_archipelago->cid = cid;
list_add(&p_archipelago->list_entry,
&p_ooo_info->archipelagos_list);
} else if (!p_archipelago) {
DP_NOTICE(p_hwfn, "No more free OOO connections\n");
list_add(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
list_add(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
return;
}
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
p_ooo_info->cur_isles_number++;
p_ooo_info->gen_isles_number++;
if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
if (!p_prev_isle)
list_add(&p_isle->list_entry, &p_archipelago->isles_list);
else
list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
}
void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct qed_ooo_buffer *p_buffer, u8 buffer_side)
{
struct qed_ooo_isle *p_isle = NULL;
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
if (!p_isle) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n", ooo_isle, cid);
return;
}
if (buffer_side == QED_OOO_LEFT_BUF)
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
else
list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
}
void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_right_isle = NULL;
struct qed_ooo_isle *p_left_isle = NULL;
p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle + 1);
if (!p_right_isle) {
DP_NOTICE(p_hwfn,
"Right isle %d is not found(cid %d)\n",
left_isle + 1, cid);
return;
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
list_del(&p_right_isle->list_entry);
p_ooo_info->cur_isles_number--;
if (left_isle) {
p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle);
if (!p_left_isle) {
DP_NOTICE(p_hwfn,
"Left isle %d is not found(cid %d)\n",
left_isle, cid);
return;
}
list_splice_tail_init(&p_right_isle->buffers_list,
&p_left_isle->buffers_list);
} else {
list_splice_tail_init(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
if (list_empty(&p_archipelago->isles_list)) {
list_del(&p_archipelago->list_entry);
list_add(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
}
list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
}
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_OOO_H
#define _QED_OOO_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "qed.h"
#define QED_MAX_NUM_ISLES 256
#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512
#define QED_OOO_LEFT_BUF 0
#define QED_OOO_RIGHT_BUF 1
struct qed_ooo_buffer {
struct list_head list_entry;
void *rx_buffer_virt_addr;
dma_addr_t rx_buffer_phys_addr;
u32 rx_buffer_size;
u16 packet_length;
u16 parse_flags;
u16 vlan;
u8 placement_offset;
};
struct qed_ooo_isle {
struct list_head list_entry;
struct list_head buffers_list;
};
struct qed_ooo_archipelago {
struct list_head list_entry;
struct list_head isles_list;
u32 cid;
};
struct qed_ooo_history {
struct ooo_opaque *p_cqes;
u32 head_idx;
u32 num_of_cqes;
};
struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
struct list_head free_archipelagos_list;
struct list_head archipelagos_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
};
#if IS_ENABLED(CONFIG_QED_ISCSI)
void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid);
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer);
struct qed_ooo_buffer *
qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer, u8 on_tail);
struct qed_ooo_buffer *
qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size);
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct qed_ooo_buffer *p_buffer, u8 buffer_side);
void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid,
u8 left_isle);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {}
static inline struct qed_ooo_info *qed_ooo_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid) {}
static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{}
static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) {}
static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) {}
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {}
static inline struct qed_ooo_buffer *
qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) { return NULL; }
static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer,
u8 on_tail) {}
static inline struct qed_ooo_buffer *
qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) { return NULL; }
static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size) {}
static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer) {}
static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer,
u8 buffer_side) {}
static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid,
u8 left_isle) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
...@@ -82,6 +82,8 @@ ...@@ -82,6 +82,8 @@
0x1c80000UL 0x1c80000UL
#define BAR0_MAP_REG_XSDM_RAM \ #define BAR0_MAP_REG_XSDM_RAM \
0x1e00000UL 0x1e00000UL
#define BAR0_MAP_REG_YSDM_RAM \
0x1e80000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL 0x5011f4UL
#define PRS_REG_SEARCH_TCP \ #define PRS_REG_SEARCH_TCP \
......
...@@ -2771,6 +2771,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, ...@@ -2771,6 +2771,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
/* Tx header */ /* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
1 + pkt->n_seg, 0, flags, 0, 1 + pkt->n_seg, 0, flags, 0,
QED_LL2_TX_DEST_NW,
qed_roce_flavor, pkt->header.baddr, qed_roce_flavor, pkt->header.baddr,
pkt->header.len, pkt, 1); pkt->header.len, pkt, 1);
if (rc) { if (rc) {
......
...@@ -24,7 +24,9 @@ ...@@ -24,7 +24,9 @@
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_int.h" #include "qed_int.h"
#include "qed_iscsi.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_ooo.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
...@@ -277,6 +279,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, ...@@ -277,6 +279,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
return qed_sriov_eqe_event(p_hwfn, return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode, p_eqe->opcode,
p_eqe->echo, &p_eqe->data); p_eqe->echo, &p_eqe->data);
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
qed_ooo_release_connection_isles(p_hwfn,
p_hwfn->p_ooo_info,
cid);
return 0;
}
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
p_eqe->opcode, &p_eqe->data);
} else {
DP_NOTICE(p_hwfn,
"iSCSI async completion is not set\n");
return -EINVAL;
}
default: default:
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n", "Unknown Async completion for protocol: %d\n",
......
...@@ -166,6 +166,7 @@ struct qed_iscsi_pf_params { ...@@ -166,6 +166,7 @@ struct qed_iscsi_pf_params {
u32 max_cwnd; u32 max_cwnd;
u16 cq_num_entries; u16 cq_num_entries;
u16 cmdq_num_entries; u16 cmdq_num_entries;
u32 two_msl_timer;
u16 dup_ack_threshold; u16 dup_ack_threshold;
u16 tx_sws_timer; u16 tx_sws_timer;
u16 min_rto; u16 min_rto;
...@@ -275,6 +276,7 @@ struct qed_dev_info { ...@@ -275,6 +276,7 @@ struct qed_dev_info {
enum qed_sb_type { enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE, QED_SB_TYPE_L2_QUEUE,
QED_SB_TYPE_CNQ, QED_SB_TYPE_CNQ,
QED_SB_TYPE_STORAGE,
}; };
enum qed_protocol { enum qed_protocol {
......
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_ISCSI_IF_H
#define _QED_ISCSI_IF_H
#include <linux/types.h>
#include <linux/qed/qed_if.h>
typedef int (*iscsi_event_cb_t) (void *context,
u8 fw_event_code, void *fw_handle);
struct qed_iscsi_stats {
u64 iscsi_rx_bytes_cnt;
u64 iscsi_rx_packet_cnt;
u64 iscsi_rx_new_ooo_isle_events_cnt;
u32 iscsi_cmdq_threshold_cnt;
u32 iscsi_rq_threshold_cnt;
u32 iscsi_immq_threshold_cnt;
u64 iscsi_rx_dropped_pdus_task_not_valid;
u64 iscsi_rx_data_pdu_cnt;
u64 iscsi_rx_r2t_pdu_cnt;
u64 iscsi_rx_total_pdu_cnt;
u64 iscsi_tx_go_to_slow_start_event_cnt;
u64 iscsi_tx_fast_retransmit_event_cnt;
u64 iscsi_tx_data_pdu_cnt;
u64 iscsi_tx_r2t_pdu_cnt;
u64 iscsi_tx_total_pdu_cnt;
u64 iscsi_tx_bytes_cnt;
u64 iscsi_tx_packet_cnt;
};
struct qed_dev_iscsi_info {
struct qed_dev_info common;
void __iomem *primary_dbq_rq_addr;
void __iomem *secondary_bdq_rq_addr;
};
struct qed_iscsi_id_params {
u8 mac[ETH_ALEN];
u32 ip[4];
u16 port;
};
struct qed_iscsi_params_offload {
u8 layer_code;
dma_addr_t sq_pbl_addr;
u32 initial_ack;
struct qed_iscsi_id_params src;
struct qed_iscsi_id_params dst;
u16 vlan_id;
u8 tcp_flags;
u8 ip_version;
u8 default_cq;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
u32 rcv_next;
u32 snd_una;
u32 snd_next;
u32 snd_max;
u32 snd_wnd;
u32 rcv_wnd;
u32 snd_wl1;
u32 cwnd;
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
u32 ka_timeout_delta;
u32 rt_timeout_delta;
u8 dup_ack_cnt;
u8 snd_wnd_probe_cnt;
u8 ka_probe_cnt;
u8 rt_cnt;
u32 flow_label;
u32 ka_timeout;
u32 ka_interval;
u32 max_rt_time;
u32 initial_rcv_wnd;
u8 ttl;
u8 tos_or_tc;
u16 remote_port;
u16 local_port;
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
};
struct qed_iscsi_params_update {
u8 update_flag;
#define QED_ISCSI_CONN_HD_EN BIT(0)
#define QED_ISCSI_CONN_DD_EN BIT(1)
#define QED_ISCSI_CONN_INITIAL_R2T BIT(2)
#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3)
u32 max_seq_size;
u32 max_recv_pdu_length;
u32 max_send_pdu_length;
u32 first_seq_length;
u32 exp_stat_sn;
};
#define MAX_TID_BLOCKS_ISCSI (512)
struct qed_iscsi_tid {
u32 size; /* In bytes per task */
u32 num_tids_per_block;
u8 *blocks[MAX_TID_BLOCKS_ISCSI];
};
struct qed_iscsi_cb_ops {
struct qed_common_cb_ops common;
};
/**
* struct qed_iscsi_ops - qed iSCSI operations.
* @common: common operations pointer
* @ll2: light L2 operations pointer
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
* @return 0 on sucesss, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
* @param cookie - driver private
* @start: iscsi in FW
* @param cdev
* @param tasks - qed will fill information about tasks
* return 0 on success, otherwise error value.
* @stop: iscsi in FW
* @param cdev
* return 0 on success, otherwise error value.
* @acquire_conn: acquire a new iscsi connection
* @param cdev
* @param handle - qed will fill handle that should be
* used henceforth as identifier of the
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
* @return 0 on sucesss, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @offload_conn: configures an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @param conn_info - the configuration to use for the
* offload.
* @return 0 on success, otherwise error value.
* @update_conn: updates an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @param conn_info - the configuration to use for the
* offload.
* @return 0 on success, otherwise error value.
* @destroy_conn: stops an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @clear_sq: clear all task in sq
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @get_stats: iSCSI related statistics
* @param cdev
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
*/
struct qed_iscsi_ops {
const struct qed_common_ops *common;
const struct qed_ll2_ops *ll2;
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_iscsi_info *info);
void (*register_ops)(struct qed_dev *cdev,
struct qed_iscsi_cb_ops *ops, void *cookie);
int (*start)(struct qed_dev *cdev,
struct qed_iscsi_tid *tasks,
void *event_context, iscsi_event_cb_t async_event_cb);
int (*stop)(struct qed_dev *cdev);
int (*acquire_conn)(struct qed_dev *cdev,
u32 *handle,
u32 *fw_cid, void __iomem **p_doorbell);
int (*release_conn)(struct qed_dev *cdev, u32 handle);
int (*offload_conn)(struct qed_dev *cdev,
u32 handle,
struct qed_iscsi_params_offload *conn_info);
int (*update_conn)(struct qed_dev *cdev,
u32 handle,
struct qed_iscsi_params_update *conn_info);
int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
int (*clear_sq)(struct qed_dev *cdev, u32 handle);
int (*get_stats)(struct qed_dev *cdev,
struct qed_iscsi_stats *stats);
};
const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
void qed_put_iscsi_ops(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment