Commit f013820f authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-hw-debug-logs'

Rahul Lakkireddy says:

====================
cxgb4: add support to get hardware debug logs via ethtool

This series of patches add support to collect hardware debug logs
via ethtool --get-dump facility.

Currently supports:

Memory dumps - Collects on-chip EDC0 and EDC1 dumps.
Hardware dumps - Collects firmware and hardware dumps.

Patch 1 adds ethtool set/get dump data.  It also adds template header
that precedes dump data.  This template header gives additional
information needed for extracting and decoding the collected dump
data.

Patch 2 adds base to collect dumps.  Also collects regdump.

Patch 3 collects on-chip EDC0 and EDC1 memory dumps.

Patch 4 collects firmware mbox log and device log.

Patch 5 updates base API for accessing TP indirect registers.

Patch 6 collects hardware TP module dump.

Patch 7 collects hardware SGE, PCIE, PM, UP CIM, MA, and HMA
module dumps.

Patch 8 collects hardware IBQ and OBQ dump.

Thanks,
Rahul

---
v2:
- Prefix symbols that pollute global namespace in files starting
  with cxgb4_* with "cxgb4_"
- Prefix symbols that pollute global namespace in files starting
  with cudbg_* with "cudbg_"
- Make cudbg_collect_mem_info() static.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4c7787ba 7c075ce2
......@@ -6,7 +6,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
cudbg_common.o cudbg_lib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#include "cxgb4.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff)
{
u32 offset;
offset = pdbg_buff->offset;
if (offset + size > pdbg_buff->size)
return CUDBG_STATUS_NO_MEM;
pin_buff->data = (char *)pdbg_buff->data + offset;
pin_buff->offset = offset;
pin_buff->size = size;
pdbg_buff->size -= size;
return 0;
}
void cudbg_put_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pdbg_buff)
{
pdbg_buff->size += pin_buff->size;
pin_buff->data = NULL;
pin_buff->offset = 0;
pin_buff->size = 0;
}
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff)
{
/* We already write to buffer provided by ethool, so just
* increment offset to next free space.
*/
pout_buff->offset += pin_buff->size;
}
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__
#define EDC0_FLAG 3
#define EDC1_FLAG 4
struct card_mem {
u16 size_edc0;
u16 size_edc1;
u16 mem_flag;
};
struct cudbg_mbox_log {
struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8];
u32 lo[MBOX_LEN / 8];
};
struct ireg_field {
u32 ireg_addr;
u32 ireg_data;
u32 ireg_local_offset;
u32 ireg_offset_range;
};
struct ireg_buf {
struct ireg_field tp_pio;
u32 outbuf[32];
};
#define IREG_NUM_ELEM 4
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
{0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */
{0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */
{0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */
{0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */
{0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */
{0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */
{0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */
{0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */
{0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */
{0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */
{0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */
};
static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */
{0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */
{0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */
{0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */
{0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */
{0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */
{0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */
{0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */
{0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */
{0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */
{0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */
};
static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = {
{0x7e18, 0x7e1c, 0x0, 12}
};
static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = {
{0x7e18, 0x7e1c, 0x0, 12}
};
static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = {
{0x7e50, 0x7e54, 0x0, 13},
{0x7e50, 0x7e54, 0x10, 6},
{0x7e50, 0x7e54, 0x18, 21},
{0x7e50, 0x7e54, 0x30, 32},
{0x7e50, 0x7e54, 0x50, 22},
{0x7e50, 0x7e54, 0x68, 12}
};
static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = {
{0x7e50, 0x7e54, 0x0, 13},
{0x7e50, 0x7e54, 0x10, 6},
{0x7e50, 0x7e54, 0x18, 8},
{0x7e50, 0x7e54, 0x20, 13},
{0x7e50, 0x7e54, 0x30, 16},
{0x7e50, 0x7e54, 0x40, 16},
{0x7e50, 0x7e54, 0x50, 16},
{0x7e50, 0x7e54, 0x60, 6},
{0x7e50, 0x7e54, 0x68, 4}
};
static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
{0x10cc, 0x10d0, 0x0, 16},
{0x10cc, 0x10d4, 0x0, 16},
};
static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
{0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
{0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
{0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */
};
static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = {
{0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */
{0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */
};
static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = {
{0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */
{0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */
};
static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
{0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */
{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
};
static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
{0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */
};
static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */
{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
};
static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
{0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
{0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
{0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
{0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
{0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
{0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
{0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
{0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
{0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
{0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
{0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
{0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
{0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
};
static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
{0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
{0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
{0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
{0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
{0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
{0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
{0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
{0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
{0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
{0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
{0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
{0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
{0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
{0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */
};
#endif /* __CUDBG_ENTITY_H__ */
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#ifndef __CUDBG_IF_H__
#define __CUDBG_IF_H__
/* Error codes */
#define CUDBG_STATUS_NO_MEM -19
#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_MAJOR_VERSION 1
#define CUDBG_MINOR_VERSION 14
enum cudbg_dbg_entity_type {
CUDBG_REG_DUMP = 1,
CUDBG_DEV_LOG = 2,
CUDBG_CIM_IBQ_TP0 = 6,
CUDBG_CIM_IBQ_TP1 = 7,
CUDBG_CIM_IBQ_ULP = 8,
CUDBG_CIM_IBQ_SGE0 = 9,
CUDBG_CIM_IBQ_SGE1 = 10,
CUDBG_CIM_IBQ_NCSI = 11,
CUDBG_CIM_OBQ_ULP0 = 12,
CUDBG_CIM_OBQ_ULP1 = 13,
CUDBG_CIM_OBQ_ULP2 = 14,
CUDBG_CIM_OBQ_ULP3 = 15,
CUDBG_CIM_OBQ_SGE = 16,
CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19,
CUDBG_TP_INDIRECT = 36,
CUDBG_SGE_INDIRECT = 37,
CUDBG_CIM_OBQ_RXQ0 = 47,
CUDBG_CIM_OBQ_RXQ1 = 48,
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_MA_INDIRECT = 61,
CUDBG_UP_CIM_INDIRECT = 64,
CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67,
CUDBG_MAX_ENTITY = 70,
};
struct cudbg_init {
struct adapter *adap; /* Pointer to adapter structure */
void *outbuf; /* Output buffer */
u32 outbuf_size; /* Output buffer size */
};
static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
{
return size * 1024 * 1024;
}
#endif /* __CUDBG_IF_H__ */
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#include "t4_regs.h"
#include "cxgb4.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
#include "cudbg_lib.h"
#include "cudbg_entity.h"
static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *dbg_buff)
{
cudbg_update_buff(pin_buff, dbg_buff);
cudbg_put_buff(pin_buff, dbg_buff);
}
static int is_fw_attached(struct cudbg_init *pdbg_init)
{
struct adapter *padap = pdbg_init->adap;
if (!(padap->flags & FW_OK) || padap->use_bd)
return 0;
return 1;
}
/* This function will add additional padding bytes into debug_buffer to make it
* 4 byte aligned.
*/
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
struct cudbg_entity_hdr *entity_hdr)
{
u8 zero_buf[4] = {0};
u8 padding, remain;
remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
padding = 4 - remain;
if (remain) {
memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
padding);
dbg_buff->offset += padding;
entity_hdr->num_pad = padding;
}
entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
}
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
{
struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
return (struct cudbg_entity_hdr *)
((char *)outbuf + cudbg_hdr->hdr_len +
(sizeof(struct cudbg_entity_hdr) * (i - 1)));
}
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
u32 buf_size = 0;
int rc = 0;
if (is_t4(padap->params.chip))
buf_size = T4_REGMAP_SIZE;
else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
buf_size = T5_REGMAP_SIZE;
rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
if (rc)
return rc;
t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct devlog_params *dparams;
int rc = 0;
rc = t4_init_devlog_params(padap);
if (rc < 0) {
cudbg_err->sys_err = rc;
return rc;
}
dparams = &padap->params.devlog;
rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
if (rc)
return rc;
/* Collect FW devlog */
if (dparams->start != 0) {
spin_lock(&padap->win0_lock);
rc = t4_memory_rw(padap, padap->params.drv_memwin,
dparams->memtype, dparams->start,
dparams->size,
(__be32 *)(char *)temp_buff.data,
1);
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
cudbg_put_buff(&temp_buff, dbg_buff);
return rc;
}
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err, int qid)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
int no_of_read_words, rc = 0;
u32 qsize;
/* collect CIM IBQ */
qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
/* t4_read_cim_ibq will return no. of read words or error */
no_of_read_words = t4_read_cim_ibq(padap, qid,
(u32 *)((u32 *)temp_buff.data +
temp_buff.offset), qsize);
/* no_of_read_words is less than or equal to 0 means error */
if (no_of_read_words <= 0) {
if (!no_of_read_words)
rc = CUDBG_SYSTEM_ERROR;
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
cudbg_put_buff(&temp_buff, dbg_buff);
return rc;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
}
int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
}
int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
}
int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
}
int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
}
int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
}
static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err, int qid)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
int no_of_read_words, rc = 0;
u32 qsize;
/* collect CIM OBQ */
qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32);
rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
/* t4_read_cim_obq will return no. of read words or error */
no_of_read_words = t4_read_cim_obq(padap, qid,
(u32 *)((u32 *)temp_buff.data +
temp_buff.offset), qsize);
/* no_of_read_words is less than or equal to 0 means error */
if (no_of_read_words <= 0) {
if (!no_of_read_words)
rc = CUDBG_SYSTEM_ERROR;
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
cudbg_put_buff(&temp_buff, dbg_buff);
return rc;
}
temp_buff.size = no_of_read_words * 4;
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
}
int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
}
int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
}
int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
}
int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
}
int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
}
int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
}
int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
}
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, u8 mem_type,
unsigned long tot_len,
struct cudbg_error *cudbg_err)
{
unsigned long bytes, bytes_left, bytes_read = 0;
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
int rc = 0;
bytes_left = tot_len;
while (bytes_left > 0) {
bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE);
rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
if (rc)
return rc;
spin_lock(&padap->win0_lock);
rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
bytes_read, bytes,
(__be32 *)temp_buff.data,
1);
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
cudbg_put_buff(&temp_buff, dbg_buff);
return rc;
}
bytes_left -= bytes;
bytes_read += bytes;
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
}
return rc;
}
static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
struct card_mem *mem_info)
{
struct adapter *padap = pdbg_init->adap;
u32 value;
value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
value = EDRAM0_SIZE_G(value);
mem_info->size_edc0 = (u16)value;
value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
value = EDRAM1_SIZE_G(value);
mem_info->size_edc1 = (u16)value;
value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
if (value & EDRAM0_ENABLE_F)
mem_info->mem_flag |= (1 << EDC0_FLAG);
if (value & EDRAM1_ENABLE_F)
mem_info->mem_flag |= (1 << EDC1_FLAG);
}
static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
int rc;
if (is_fw_attached(pdbg_init)) {
/* Flush uP dcache before reading edcX/mcX */
rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
if (rc)
cudbg_err->sys_warn = rc;
}
}
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
struct card_mem mem_info = {0};
unsigned long flag, size;
int rc;
cudbg_t4_fwcache(pdbg_init, cudbg_err);
cudbg_collect_mem_info(pdbg_init, &mem_info);
switch (mem_type) {
case MEM_EDC0:
flag = (1 << EDC0_FLAG);
size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
break;
case MEM_EDC1:
flag = (1 << EDC1_FLAG);
size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
break;
default:
rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
goto err;
}
if (mem_info.mem_flag & flag) {
rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
size, cudbg_err);
if (rc)
goto err;
} else {
rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
goto err;
}
err:
return rc;
}
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
MEM_EDC0);
}
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
MEM_EDC1);
}
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *ch_tp_pio;
int i, rc, n = 0;
u32 size;
if (is_t5(padap->params.chip))
n = sizeof(t5_tp_pio_array) +
sizeof(t5_tp_tm_pio_array) +
sizeof(t5_tp_mib_index_array);
else
n = sizeof(t6_tp_pio_array) +
sizeof(t6_tp_tm_pio_array) +
sizeof(t6_tp_mib_index_array);
n = n / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
ch_tp_pio = (struct ireg_buf *)temp_buff.data;
/* TP_PIO */
if (is_t5(padap->params.chip))
n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
else if (is_t6(padap->params.chip))
n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
for (i = 0; i < n; i++) {
struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
u32 *buff = ch_tp_pio->outbuf;
if (is_t5(padap->params.chip)) {
tp_pio->ireg_addr = t5_tp_pio_array[i][0];
tp_pio->ireg_data = t5_tp_pio_array[i][1];
tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
} else if (is_t6(padap->params.chip)) {
tp_pio->ireg_addr = t6_tp_pio_array[i][0];
tp_pio->ireg_data = t6_tp_pio_array[i][1];
tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
}
t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
/* TP_TM_PIO */
if (is_t5(padap->params.chip))
n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
else if (is_t6(padap->params.chip))
n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
for (i = 0; i < n; i++) {
struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
u32 *buff = ch_tp_pio->outbuf;
if (is_t5(padap->params.chip)) {
tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
} else if (is_t6(padap->params.chip)) {
tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
}
t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
/* TP_MIB_INDEX */
if (is_t5(padap->params.chip))
n = sizeof(t5_tp_mib_index_array) /
(IREG_NUM_ELEM * sizeof(u32));
else if (is_t6(padap->params.chip))
n = sizeof(t6_tp_mib_index_array) /
(IREG_NUM_ELEM * sizeof(u32));
for (i = 0; i < n ; i++) {
struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
u32 *buff = ch_tp_pio->outbuf;
if (is_t5(padap->params.chip)) {
tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
tp_pio->ireg_local_offset =
t5_tp_mib_index_array[i][2];
tp_pio->ireg_offset_range =
t5_tp_mib_index_array[i][3];
} else if (is_t6(padap->params.chip)) {
tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
tp_pio->ireg_local_offset =
t6_tp_mib_index_array[i][2];
tp_pio->ireg_offset_range =
t6_tp_mib_index_array[i][3];
}
t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *ch_sge_dbg;
int i, rc;
rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
if (rc)
return rc;
ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
for (i = 0; i < 2; i++) {
struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
u32 *buff = ch_sge_dbg->outbuf;
sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
t4_read_indirect(padap,
sge_pio->ireg_addr,
sge_pio->ireg_data,
buff,
sge_pio->ireg_offset_range,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *ch_pcie;
int i, rc, n;
u32 size;
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
ch_pcie = (struct ireg_buf *)temp_buff.data;
/* PCIE_PDBG */
for (i = 0; i < n; i++) {
struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
u32 *buff = ch_pcie->outbuf;
pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
t4_read_indirect(padap,
pcie_pio->ireg_addr,
pcie_pio->ireg_data,
buff,
pcie_pio->ireg_offset_range,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
/* PCIE_CDBG */
n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
for (i = 0; i < n; i++) {
struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
u32 *buff = ch_pcie->outbuf;
pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
t4_read_indirect(padap,
pcie_pio->ireg_addr,
pcie_pio->ireg_data,
buff,
pcie_pio->ireg_offset_range,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *ch_pm;
int i, rc, n;
u32 size;
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
ch_pm = (struct ireg_buf *)temp_buff.data;
/* PM_RX */
for (i = 0; i < n; i++) {
struct ireg_field *pm_pio = &ch_pm->tp_pio;
u32 *buff = ch_pm->outbuf;
pm_pio->ireg_addr = t5_pm_rx_array[i][0];
pm_pio->ireg_data = t5_pm_rx_array[i][1];
pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
t4_read_indirect(padap,
pm_pio->ireg_addr,
pm_pio->ireg_data,
buff,
pm_pio->ireg_offset_range,
pm_pio->ireg_local_offset);
ch_pm++;
}
/* PM_TX */
n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
for (i = 0; i < n; i++) {
struct ireg_field *pm_pio = &ch_pm->tp_pio;
u32 *buff = ch_pm->outbuf;
pm_pio->ireg_addr = t5_pm_tx_array[i][0];
pm_pio->ireg_data = t5_pm_tx_array[i][1];
pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
t4_read_indirect(padap,
pm_pio->ireg_addr,
pm_pio->ireg_data,
buff,
pm_pio->ireg_offset_range,
pm_pio->ireg_local_offset);
ch_pm++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *ma_indr;
int i, rc, n;
u32 size, j;
if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
return CUDBG_STATUS_ENTITY_NOT_FOUND;
n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
ma_indr = (struct ireg_buf *)temp_buff.data;
for (i = 0; i < n; i++) {
struct ireg_field *ma_fli = &ma_indr->tp_pio;
u32 *buff = ma_indr->outbuf;
ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
ma_fli->ireg_data = t6_ma_ireg_array[i][1];
ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
buff, ma_fli->ireg_offset_range,
ma_fli->ireg_local_offset);
ma_indr++;
}
n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
for (i = 0; i < n; i++) {
struct ireg_field *ma_fli = &ma_indr->tp_pio;
u32 *buff = ma_indr->outbuf;
ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
t4_read_indirect(padap, ma_fli->ireg_addr,
ma_fli->ireg_data, buff, 1,
ma_fli->ireg_local_offset);
buff++;
ma_fli->ireg_local_offset += 0x20;
}
ma_indr++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *up_cim;
int i, rc, n;
u32 size;
n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
up_cim = (struct ireg_buf *)temp_buff.data;
for (i = 0; i < n; i++) {
struct ireg_field *up_cim_reg = &up_cim->tp_pio;
u32 *buff = up_cim->outbuf;
if (is_t5(padap->params.chip)) {
up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
up_cim_reg->ireg_local_offset =
t5_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t5_up_cim_reg_array[i][3];
} else if (is_t6(padap->params.chip)) {
up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
up_cim_reg->ireg_local_offset =
t6_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t6_up_cim_reg_array[i][3];
}
rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
up_cim_reg->ireg_offset_range, buff);
if (rc) {
cudbg_put_buff(&temp_buff, dbg_buff);
return rc;
}
up_cim++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_mbox_log *mboxlog = NULL;
struct cudbg_buffer temp_buff = { 0 };
struct mbox_cmd_log *log = NULL;
struct mbox_cmd *entry;
unsigned int entry_idx;
u16 mbox_cmds;
int i, k, rc;
u64 flit;
u32 size;
log = padap->mbox_log;
mbox_cmds = padap->mbox_log->size;
size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
for (k = 0; k < mbox_cmds; k++) {
entry_idx = log->cursor + k;
if (entry_idx >= log->size)
entry_idx -= log->size;
entry = mbox_cmd_log_entry(log, entry_idx);
/* skip over unused entries */
if (entry->timestamp == 0)
continue;
memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
for (i = 0; i < MBOX_LEN / 8; i++) {
flit = entry->cmd[i];
mboxlog->hi[i] = (u32)(flit >> 32);
mboxlog->lo[i] = (u32)flit;
}
mboxlog++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct ireg_buf *hma_indr;
int i, rc, n;
u32 size;
if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
return CUDBG_STATUS_ENTITY_NOT_FOUND;
n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
if (rc)
return rc;
hma_indr = (struct ireg_buf *)temp_buff.data;
for (i = 0; i < n; i++) {
struct ireg_field *hma_fli = &hma_indr->tp_pio;
u32 *buff = hma_indr->outbuf;
hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
hma_fli->ireg_data = t6_hma_ireg_array[i][1];
hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
buff, hma_fli->ireg_offset_range,
hma_fli->ireg_local_offset);
hma_indr++;
}
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
return rc;
}
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#ifndef __CUDBG_LIB_H__
#define __CUDBG_LIB_H__
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
struct cudbg_entity_hdr *entity_hdr);
#endif /* __CUDBG_LIB_H__ */
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#ifndef __CUDBG_LIB_COMMON_H__
#define __CUDBG_LIB_COMMON_H__
#define CUDBG_SIGNATURE 67856866 /* CUDB in ascii */
enum cudbg_dump_type {
CUDBG_DUMP_TYPE_MINI = 1,
};
enum cudbg_compression_type {
CUDBG_COMPRESSION_NONE = 1,
};
struct cudbg_hdr {
u32 signature;
u32 hdr_len;
u16 major_ver;
u16 minor_ver;
u32 data_len;
u32 hdr_flags;
u16 max_entities;
u8 chip_ver;
u8 dump_type:3;
u8 reserved1:1;
u8 compress_type:4;
u32 reserved[8];
};
struct cudbg_entity_hdr {
u32 entity_type;
u32 start_offset;
u32 size;
int hdr_flags;
u32 sys_warn;
u32 sys_err;
u8 num_pad;
u8 flag; /* bit 0 is used to indicate ext data */
u8 reserved1[2];
u32 next_ext_offset; /* pointer to next extended entity meta data */
u32 reserved[5];
};
struct cudbg_buffer {
u32 size;
u32 offset;
char *data;
};
struct cudbg_error {
int sys_err;
int sys_warn;
int app_err;
};
#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff);
void cudbg_put_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pdbg_buff);
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff);
#endif /* __CUDBG_LIB_COMMON_H__ */
......@@ -909,6 +909,9 @@ struct adapter {
/* TC flower offload */
DECLARE_HASHTABLE(flower_anymatch_tbl, 9);
struct timer_list flower_stats_timer;
/* Ethtool Dump */
struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
......@@ -1456,7 +1459,7 @@ unsigned int qtimer_val(const struct adapter *adap,
int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_init_tp_params(struct adapter *adap, bool sleep_ok);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_init_portinfo(struct port_info *pi, int mbox,
......@@ -1470,14 +1473,15 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
unsigned int flags, unsigned int defq);
int t4_read_rss(struct adapter *adapter, u16 *entries);
void t4_read_rss_key(struct adapter *adapter, u32 *key);
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok);
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
bool sleep_ok);
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp);
u32 *valp, bool sleep_ok);
void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 *vfl, u32 *vfh);
u32 t4_read_rss_pf_map(struct adapter *adapter);
u32 t4_read_rss_pf_mask(struct adapter *adapter);
u32 *vfl, u32 *vfh, bool sleep_ok);
u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok);
u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok);
unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx);
unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx);
......@@ -1508,14 +1512,18 @@ void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
bool sleep_ok);
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
bool sleep_ok);
void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
bool sleep_ok);
void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
bool sleep_ok);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
struct tp_tcp_stats *v6, bool sleep_ok);
void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
struct tp_fcoe_stats *st);
struct tp_fcoe_stats *st, bool sleep_ok);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
......@@ -1624,6 +1632,13 @@ void t4_idma_monitor(struct adapter *adapter,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr);
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_uld_mem_free(struct adapter *adap);
int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
......
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#include "t4_regs.h"
#include "cxgb4.h"
#include "cxgb4_cudbg.h"
#include "cudbg_entity.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
};
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
{ CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
{ CUDBG_REG_DUMP, cudbg_collect_reg_dump },
{ CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
{ CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
{ CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
{ CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
{ CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
{ CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
{ CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
{ CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
{ CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
{ CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
{ CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
{ CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
{ CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
{ CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
{ CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
{ CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
};
static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
{
u32 value, n = 0, len = 0;
switch (entity) {
case CUDBG_REG_DUMP:
switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
case CHELSIO_T4:
len = T4_REGMAP_SIZE;
break;
case CHELSIO_T5:
case CHELSIO_T6:
len = T5_REGMAP_SIZE;
break;
default:
break;
}
break;
case CUDBG_DEV_LOG:
len = adap->params.devlog.size;
break;
case CUDBG_CIM_IBQ_TP0:
case CUDBG_CIM_IBQ_TP1:
case CUDBG_CIM_IBQ_ULP:
case CUDBG_CIM_IBQ_SGE0:
case CUDBG_CIM_IBQ_SGE1:
case CUDBG_CIM_IBQ_NCSI:
len = CIM_IBQ_SIZE * 4 * sizeof(u32);
break;
case CUDBG_CIM_OBQ_ULP0:
case CUDBG_CIM_OBQ_ULP1:
case CUDBG_CIM_OBQ_ULP2:
case CUDBG_CIM_OBQ_ULP3:
case CUDBG_CIM_OBQ_SGE:
case CUDBG_CIM_OBQ_NCSI:
case CUDBG_CIM_OBQ_RXQ0:
case CUDBG_CIM_OBQ_RXQ1:
len = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32);
break;
case CUDBG_EDC0:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & EDRAM0_ENABLE_F) {
value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
len = EDRAM0_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
case CUDBG_EDC1:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & EDRAM1_ENABLE_F) {
value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
len = EDRAM1_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
case CUDBG_TP_INDIRECT:
switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
case CHELSIO_T5:
n = sizeof(t5_tp_pio_array) +
sizeof(t5_tp_tm_pio_array) +
sizeof(t5_tp_mib_index_array);
break;
case CHELSIO_T6:
n = sizeof(t6_tp_pio_array) +
sizeof(t6_tp_tm_pio_array) +
sizeof(t6_tp_mib_index_array);
break;
default:
break;
}
n = n / (IREG_NUM_ELEM * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_SGE_INDIRECT:
len = sizeof(struct ireg_buf) * 2;
break;
case CUDBG_PCIE_INDIRECT:
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
len = sizeof(struct ireg_buf) * n * 2;
break;
case CUDBG_PM_INDIRECT:
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
len = sizeof(struct ireg_buf) * n * 2;
break;
case CUDBG_MA_INDIRECT:
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
n = sizeof(t6_ma_ireg_array) /
(IREG_NUM_ELEM * sizeof(u32));
len = sizeof(struct ireg_buf) * n * 2;
}
break;
case CUDBG_UP_CIM_INDIRECT:
n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_MBOX_LOG:
len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
break;
case CUDBG_HMA_INDIRECT:
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
n = sizeof(t6_hma_ireg_array) /
(IREG_NUM_ELEM * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
}
break;
default:
break;
}
return len;
}
u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
{
u32 i, entity;
u32 len = 0;
if (flag & CXGB4_ETH_DUMP_HW) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
entity = cxgb4_collect_hw_dump[i].entity;
len += cxgb4_get_entity_length(adap, entity);
}
}
if (flag & CXGB4_ETH_DUMP_MEM) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
entity = cxgb4_collect_mem_dump[i].entity;
len += cxgb4_get_entity_length(adap, entity);
}
}
return len;
}
static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
const struct cxgb4_collect_entity *e_arr,
u32 arr_size, void *buf, u32 *tot_size)
{
struct adapter *adap = pdbg_init->adap;
struct cudbg_error cudbg_err = { 0 };
struct cudbg_entity_hdr *entity_hdr;
u32 entity_size, i;
u32 total_size = 0;
int ret;
for (i = 0; i < arr_size; i++) {
const struct cxgb4_collect_entity *e = &e_arr[i];
/* Skip entities that won't fit in output buffer */
entity_size = cxgb4_get_entity_length(adap, e->entity);
if (entity_size >
pdbg_init->outbuf_size - *tot_size - total_size)
continue;
entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
entity_hdr->entity_type = e->entity;
entity_hdr->start_offset = dbg_buff->offset;
memset(&cudbg_err, 0, sizeof(struct cudbg_error));
ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
if (ret) {
entity_hdr->size = 0;
dbg_buff->offset = entity_hdr->start_offset;
} else {
cudbg_align_debug_buffer(dbg_buff, entity_hdr);
}
/* Log error and continue with next entity */
if (cudbg_err.sys_err)
ret = CUDBG_SYSTEM_ERROR;
entity_hdr->hdr_flags = ret;
entity_hdr->sys_err = cudbg_err.sys_err;
entity_hdr->sys_warn = cudbg_err.sys_warn;
total_size += entity_hdr->size;
}
*tot_size += total_size;
}
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag)
{
struct cudbg_init cudbg_init = { 0 };
struct cudbg_buffer dbg_buff = { 0 };
u32 size, min_size, total_size = 0;
struct cudbg_hdr *cudbg_hdr;
size = *buf_size;
cudbg_init.adap = adap;
cudbg_init.outbuf = buf;
cudbg_init.outbuf_size = size;
dbg_buff.data = buf;
dbg_buff.size = size;
dbg_buff.offset = 0;
cudbg_hdr = (struct cudbg_hdr *)buf;
cudbg_hdr->signature = CUDBG_SIGNATURE;
cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
cudbg_hdr->chip_ver = adap->params.chip;
cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
min_size = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) *
cudbg_hdr->max_entities;
if (size < min_size)
return -ENOMEM;
dbg_buff.offset += min_size;
total_size = dbg_buff.offset;
if (flag & CXGB4_ETH_DUMP_HW)
cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
cxgb4_collect_hw_dump,
ARRAY_SIZE(cxgb4_collect_hw_dump),
buf,
&total_size);
if (flag & CXGB4_ETH_DUMP_MEM)
cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
cxgb4_collect_mem_dump,
ARRAY_SIZE(cxgb4_collect_mem_dump),
buf,
&total_size);
cudbg_hdr->data_len = total_size;
*buf_size = total_size;
return 0;
}
void cxgb4_init_ethtool_dump(struct adapter *adapter)
{
adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
adapter->eth_dump.version = adapter->params.fw_vers;
adapter->eth_dump.len = 0;
}
/*
* Copyright (C) 2017 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#ifndef __CXGB4_CUDBG_H__
#define __CXGB4_CUDBG_H__
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
#include "cudbg_lib.h"
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
struct cxgb4_collect_entity {
enum cudbg_dbg_entity_type entity;
cudbg_collect_callback_t collect_cb;
};
enum CXGB4_ETHTOOL_DUMP_FLAGS {
CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE,
CXGB4_ETH_DUMP_MEM = (1 << 0), /* On-Chip Memory Dumps */
CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */
};
u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag);
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag);
void cxgb4_init_ethtool_dump(struct adapter *adapter);
#endif /* __CXGB4_CUDBG_H__ */
......@@ -2211,7 +2211,7 @@ static int rss_key_show(struct seq_file *seq, void *v)
{
u32 key[10];
t4_read_rss_key(seq->private, key);
t4_read_rss_key(seq->private, key, true);
seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
key[9], key[8], key[7], key[6], key[5], key[4], key[3],
key[2], key[1], key[0]);
......@@ -2248,7 +2248,7 @@ static ssize_t rss_key_write(struct file *file, const char __user *buf,
}
}
t4_write_rss_key(adap, key, -1);
t4_write_rss_key(adap, key, -1, true);
return count;
}
......@@ -2325,12 +2325,13 @@ static int rss_pf_config_open(struct inode *inode, struct file *file)
return -ENOMEM;
pfconf = (struct rss_pf_conf *)p->data;
rss_pf_map = t4_read_rss_pf_map(adapter);
rss_pf_mask = t4_read_rss_pf_mask(adapter);
rss_pf_map = t4_read_rss_pf_map(adapter, true);
rss_pf_mask = t4_read_rss_pf_mask(adapter, true);
for (pf = 0; pf < 8; pf++) {
pfconf[pf].rss_pf_map = rss_pf_map;
pfconf[pf].rss_pf_mask = rss_pf_mask;
t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config,
true);
}
return 0;
}
......@@ -2393,7 +2394,7 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
vfconf = (struct rss_vf_conf *)p->data;
for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh);
&vfconf[vf].rss_vf_vfh, true);
}
return 0;
}
......
......@@ -21,6 +21,7 @@
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4fw_api.h"
#include "cxgb4_cudbg.h"
#define EEPROM_MAGIC 0x38E2F10C
......@@ -335,10 +336,10 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
memset(s, 0, sizeof(*s));
spin_lock(&adap->stats_lock);
t4_tp_get_tcp_stats(adap, &v4, &v6);
t4_tp_get_rdma_stats(adap, &rdma_stats);
t4_get_usm_stats(adap, &usm_stats);
t4_tp_get_err_stats(adap, &err_stats);
t4_tp_get_tcp_stats(adap, &v4, &v6, false);
t4_tp_get_rdma_stats(adap, &rdma_stats, false);
t4_get_usm_stats(adap, &usm_stats, false);
t4_tp_get_err_stats(adap, &err_stats, false);
spin_unlock(&adap->stats_lock);
s->db_drop = adap->db_stats.db_drop;
......@@ -388,9 +389,9 @@ static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
memset(s, 0, sizeof(*s));
spin_lock(&adap->stats_lock);
t4_tp_get_cpl_stats(adap, &cpl_stats);
t4_tp_get_err_stats(adap, &err_stats);
t4_get_fcoe_stats(adap, i, &fcoe_stats);
t4_tp_get_cpl_stats(adap, &cpl_stats, false);
t4_tp_get_err_stats(adap, &err_stats, false);
t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
spin_unlock(&adap->stats_lock);
s->cpl_req = cpl_stats.req[i];
......@@ -1374,6 +1375,56 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return -EOPNOTSUPP;
}
static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
{
struct adapter *adapter = netdev2adap(dev);
u32 len = 0;
len = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
len += cxgb4_get_dump_length(adapter, eth_dump->flag);
adapter->eth_dump.flag = eth_dump->flag;
adapter->eth_dump.len = len;
return 0;
}
static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
{
struct adapter *adapter = netdev2adap(dev);
eth_dump->flag = adapter->eth_dump.flag;
eth_dump->len = adapter->eth_dump.len;
eth_dump->version = adapter->eth_dump.version;
return 0;
}
static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
void *buf)
{
struct adapter *adapter = netdev2adap(dev);
u32 len = 0;
int ret = 0;
if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
return -ENOENT;
len = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
if (eth_dump->len < len)
return -ENOMEM;
ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
if (ret)
return ret;
eth_dump->flag = adapter->eth_dump.flag;
eth_dump->len = len;
eth_dump->version = adapter->eth_dump.version;
return 0;
}
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
......@@ -1404,7 +1455,10 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
.flash_device = set_flash,
.get_ts_info = get_ts_info
.get_ts_info = get_ts_info,
.set_dump = set_dump,
.get_dump_flag = get_dump_flag,
.get_dump_data = get_dump_data,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
......
......@@ -81,6 +81,7 @@
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
......@@ -1638,7 +1639,7 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct adapter *adap = pci_get_drvdata(pdev);
spin_lock(&adap->stats_lock);
t4_tp_get_tcp_stats(adap, v4, v6);
t4_tp_get_tcp_stats(adap, v4, v6, false);
spin_unlock(&adap->stats_lock);
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
......@@ -4076,7 +4077,7 @@ static int adap_init0(struct adapter *adap)
}
t4_init_sge_params(adap);
adap->flags |= FW_OK;
t4_init_tp_params(adap);
t4_init_tp_params(adap, true);
return 0;
/*
......@@ -5035,6 +5036,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cxgb4_set_ethtool_ops(netdev);
}
cxgb4_init_ethtool_dump(adapter);
pci_set_drvdata(pdev, adapter);
if (adapter->flags & FW_OK) {
......
......@@ -5052,23 +5052,26 @@ static unsigned int t4_use_ldst(struct adapter *adap)
}
/**
* t4_fw_tp_pio_rw - Access TP PIO through LDST
* @adap: the adapter
* @vals: where the indirect register values are stored/written
* @nregs: how many indirect registers to read/write
* @start_idx: index of first indirect register to read/write
* @rw: Read (1) or Write (0)
* t4_tp_fw_ldst_rw - Access TP indirect register through LDST
* @adap: the adapter
* @cmd: TP fw ldst address space type
* @vals: where the indirect register values are stored/written
* @nregs: how many indirect registers to read/write
* @start_idx: index of first indirect register to read/write
* @rw: Read (1) or Write (0)
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Access TP PIO registers through LDST
* Access TP indirect registers through LDST
*/
static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
unsigned int start_index, unsigned int rw)
static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
unsigned int nregs, unsigned int start_index,
unsigned int rw, bool sleep_ok)
{
int ret, i;
int cmd = FW_LDST_ADDRSPC_TP_PIO;
int ret = 0;
unsigned int i;
struct fw_ldst_cmd c;
for (i = 0 ; i < nregs; i++) {
for (i = 0; i < nregs; i++) {
memset(&c, 0, sizeof(c));
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
......@@ -5079,26 +5082,147 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
c.u.addrval.addr = cpu_to_be32(start_index + i);
c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (!ret && rw)
ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
sleep_ok);
if (ret)
return ret;
if (rw)
vals[i] = be32_to_cpu(c.u.addrval.val);
}
return 0;
}
/**
* t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
* @adap: the adapter
* @reg_addr: Address Register
* @reg_data: Data register
* @buff: where the indirect register values are stored/written
* @nregs: how many indirect registers to read/write
* @start_index: index of first indirect register to read/write
* @rw: READ(1) or WRITE(0)
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Read/Write TP indirect registers through LDST if possible.
* Else, use backdoor access
**/
static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
u32 *buff, u32 nregs, u32 start_index, int rw,
bool sleep_ok)
{
int rc = -EINVAL;
int cmd;
switch (reg_addr) {
case TP_PIO_ADDR_A:
cmd = FW_LDST_ADDRSPC_TP_PIO;
break;
case TP_TM_PIO_ADDR_A:
cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
break;
case TP_MIB_INDEX_A:
cmd = FW_LDST_ADDRSPC_TP_MIB;
break;
default:
goto indirect_access;
}
if (t4_use_ldst(adap))
rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
sleep_ok);
indirect_access:
if (rc) {
if (rw)
t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
start_index);
else
t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
start_index);
}
}
/**
* t4_tp_pio_read - Read TP PIO registers
* @adap: the adapter
* @buff: where the indirect register values are written
* @nregs: how many indirect registers to read
* @start_index: index of first indirect register to read
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Read TP PIO Registers
**/
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok)
{
t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
start_index, 1, sleep_ok);
}
/**
* t4_tp_pio_write - Write TP PIO registers
* @adap: the adapter
* @buff: where the indirect register values are stored
* @nregs: how many indirect registers to write
* @start_index: index of first indirect register to write
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Write TP PIO Registers
**/
static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok)
{
t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
start_index, 0, sleep_ok);
}
/**
* t4_tp_tm_pio_read - Read TP TM PIO registers
* @adap: the adapter
* @buff: where the indirect register values are written
* @nregs: how many indirect registers to read
* @start_index: index of first indirect register to read
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Read TP TM PIO Registers
**/
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok)
{
t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
nregs, start_index, 1, sleep_ok);
}
/**
* t4_tp_mib_read - Read TP MIB registers
* @adap: the adapter
* @buff: where the indirect register values are written
* @nregs: how many indirect registers to read
* @start_index: index of first indirect register to read
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Read TP MIB Registers
**/
void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
bool sleep_ok)
{
t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
start_index, 1, sleep_ok);
}
/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the global 320-bit RSS key.
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
{
if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
else
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
TP_RSS_SECRET_KEY0_A);
t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
}
/**
......@@ -5106,12 +5230,14 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
* @idx: which RSS key to write
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Writes one of the RSS keys with the given 320-bit value. If @idx is
* 0..15 the corresponding entry in the RSS key table is written,
* otherwise the global RSS key is written.
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
bool sleep_ok)
{
u8 rss_key_addr_cnt = 16;
u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
......@@ -5124,11 +5250,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
else
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
TP_RSS_SECRET_KEY0_A);
t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
if (idx >= 0 && idx < rss_key_addr_cnt) {
if (rss_key_addr_cnt > 16)
......@@ -5146,19 +5268,15 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
* @adapter: the adapter
* @index: the entry in the PF RSS table to read
* @valp: where to store the returned value
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the PF RSS Configuration Table at the specified index and returns
* the value found there.
*/
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
u32 *valp, bool sleep_ok)
{
if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, valp, 1,
TP_RSS_PF0_CONFIG_A + index, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
valp, 1, TP_RSS_PF0_CONFIG_A + index);
t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
}
/**
......@@ -5167,12 +5285,13 @@ void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
* @index: the entry in the VF RSS table to read
* @vfl: where to store the returned VFL
* @vfh: where to store the returned VFH
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the VF RSS Configuration Table at the specified index and returns
* the (VFL, VFH) values found there.
*/
void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 *vfl, u32 *vfh)
u32 *vfl, u32 *vfh, bool sleep_ok)
{
u32 vrt, mask, data;
......@@ -5193,50 +5312,37 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
if (t4_use_ldst(adapter)) {
t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
} else {
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
vfl, 1, TP_RSS_VFL_CONFIG_A);
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
vfh, 1, TP_RSS_VFH_CONFIG_A);
}
t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
}
/**
* t4_read_rss_pf_map - read PF RSS Map
* @adapter: the adapter
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the PF RSS Map register and returns its value.
*/
u32 t4_read_rss_pf_map(struct adapter *adapter)
u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
{
u32 pfmap;
if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&pfmap, 1, TP_RSS_PF_MAP_A);
t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
return pfmap;
}
/**
* t4_read_rss_pf_mask - read PF RSS Mask
* @adapter: the adapter
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the PF RSS Mask register and returns its value.
*/
u32 t4_read_rss_pf_mask(struct adapter *adapter)
u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
{
u32 pfmask;
if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&pfmask, 1, TP_RSS_PF_MSK_A);
t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
return pfmask;
}
......@@ -5245,12 +5351,13 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
* @adap: the adapter
* @v4: holds the TCP/IP counter values
* @v6: holds the TCP/IPv6 counter values
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
* Either @v4 or @v6 may be %NULL to skip the corresponding stats.
*/
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
struct tp_tcp_stats *v6, bool sleep_ok)
{
u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
......@@ -5259,16 +5366,16 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
TP_MIB_TCP_OUT_RST_A, sleep_ok);
v4->tcp_out_rsts = STAT(OUT_RST);
v4->tcp_in_segs = STAT64(IN_SEG);
v4->tcp_out_segs = STAT64(OUT_SEG);
v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
v6->tcp_out_rsts = STAT(OUT_RST);
v6->tcp_in_segs = STAT64(IN_SEG);
v6->tcp_out_segs = STAT64(OUT_SEG);
......@@ -5283,63 +5390,66 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
* t4_tp_get_err_stats - read TP's error MIB counters
* @adap: the adapter
* @st: holds the counter values
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's error counters.
*/
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
bool sleep_ok)
{
int nchan = adap->params.arch.nchan;
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
&st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
sleep_ok);
t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
sleep_ok);
t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
sleep_ok);
t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
sleep_ok);
t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
sleep_ok);
}
/**
* t4_tp_get_cpl_stats - read TP's CPL MIB counters
* @adap: the adapter
* @st: holds the counter values
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's CPL counters.
*/
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
bool sleep_ok)
{
int nchan = adap->params.arch.nchan;
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
nchan, TP_MIB_CPL_IN_REQ_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
nchan, TP_MIB_CPL_OUT_RSP_0_A);
t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
}
/**
* t4_tp_get_rdma_stats - read TP's RDMA MIB counters
* @adap: the adapter
* @st: holds the counter values
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's RDMA counters.
*/
void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
bool sleep_ok)
{
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
2, TP_MIB_RQE_DFR_PKT_A);
t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
sleep_ok);
}
/**
......@@ -5347,20 +5457,24 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
* @adap: the adapter
* @idx: the port index
* @st: holds the counter values
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's FCoE counters for the selected port.
*/
void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
struct tp_fcoe_stats *st)
struct tp_fcoe_stats *st, bool sleep_ok)
{
u32 val[2];
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
1, TP_MIB_FCOE_DDP_0_A + idx);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
1, TP_MIB_FCOE_DROP_0_A + idx);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
sleep_ok);
t4_tp_mib_read(adap, &st->frames_drop, 1,
TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
sleep_ok);
st->octets_ddp = ((u64)val[0] << 32) | val[1];
}
......@@ -5368,15 +5482,16 @@ void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
* t4_get_usm_stats - read TP's non-TCP DDP MIB counters
* @adap: the adapter
* @st: holds the counter values
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's counters for non-TCP directly-placed packets.
*/
void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
bool sleep_ok)
{
u32 val[4];
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
TP_MIB_USM_PKTS_A);
t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
st->frames = val[0];
st->drops = val[1];
st->octets = ((u64)val[2] << 32) | val[3];
......@@ -8663,10 +8778,11 @@ int t4_init_sge_params(struct adapter *adapter)
/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Initialize various fields of the adapter's TP Parameters structure.
*/
int t4_init_tp_params(struct adapter *adap)
int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
{
int chan;
u32 v;
......@@ -8682,19 +8798,11 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
if (t4_use_ldst(adap)) {
t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A, 1);
t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A, 1);
} else {
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A);
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A);
}
t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A, sleep_ok);
t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A, sleep_ok);
/* For T6, cache the adapter's compressed error vector
* and passing outer header info for encapsulated packets.
*/
......
......@@ -1447,6 +1447,8 @@
#define LKPTBLQUEUE0_M 0x3ffU
#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
#define TP_TM_PIO_ADDR_A 0x7e18
#define TP_TM_PIO_DATA_A 0x7e1c
#define TP_PIO_ADDR_A 0x7e40
#define TP_PIO_DATA_A 0x7e44
#define TP_MIB_INDEX_A 0x7e50
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment