Commit d4d0249a authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-06-23' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-06-23

This series provides some updates to the mlx5 core and netdevice drivers.

Three patches from Tariq, Introduces page reuse mechanism in non-Striding
RQ RX datapath, we allow the the RX descriptor to reuse its allocated page
as much as it could, until the page is fully consumed. RX page reuse
reduces the stress on page allocator and improves RX performance especially
with high speeds (100Gb/s).

Next four patches of the series from Or allows to offload tc flower matching
on ttl/hoplimit and header re-write of hoplimit.

The rest of  the series from Yotam and Or enhances mlx5 to support FW flashing
through the mlxfw module, in a similar manner done by the mlxsw driver.
Currently, only ethtool based flashing is implemented, where both Eth and IB ports
are supported.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8f46d467 137ffd15
...@@ -72,6 +72,8 @@ ...@@ -72,6 +72,8 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
...@@ -215,6 +217,7 @@ struct mlx5e_cq_moder { ...@@ -215,6 +217,7 @@ struct mlx5e_cq_moder {
struct mlx5e_params { struct mlx5e_params {
u8 log_sq_size; u8 log_sq_size;
u8 rq_wq_type; u8 rq_wq_type;
u16 rq_headroom;
u8 mpwqe_log_stride_sz; u8 mpwqe_log_stride_sz;
u8 mpwqe_log_num_strides; u8 mpwqe_log_num_strides;
u8 log_rq_size; u8 log_rq_size;
...@@ -445,6 +448,11 @@ struct mlx5e_dma_info { ...@@ -445,6 +448,11 @@ struct mlx5e_dma_info {
dma_addr_t addr; dma_addr_t addr;
}; };
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info di;
u32 offset;
};
struct mlx5e_umr_dma_info { struct mlx5e_umr_dma_info {
__be64 *mtt; __be64 *mtt;
dma_addr_t mtt_addr; dma_addr_t mtt_addr;
...@@ -506,7 +514,12 @@ struct mlx5e_rq { ...@@ -506,7 +514,12 @@ struct mlx5e_rq {
struct mlx5_wq_ll wq; struct mlx5_wq_ll wq;
union { union {
struct mlx5e_dma_info *dma_info; struct {
struct mlx5e_wqe_frag_info *frag_info;
u32 frag_sz; /* max possible skb frag_sz */
bool page_reuse;
bool xdp_xmit;
} wqe;
struct { struct {
struct mlx5e_mpw_info *info; struct mlx5e_mpw_info *info;
void *mtt_no_align; void *mtt_no_align;
...@@ -1047,6 +1060,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, ...@@ -1047,6 +1060,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal); struct ethtool_coalesce *coal);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info); struct ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
/* mlx5e generic netdev management API */ /* mlx5e generic netdev management API */
struct net_device* struct net_device*
......
...@@ -1795,6 +1795,40 @@ static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -1795,6 +1795,40 @@ static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err; return err;
} }
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *dev = priv->netdev;
const struct firmware *fw;
int err;
if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
return -EOPNOTSUPP;
err = request_firmware_direct(&fw, flash->data, &dev->dev);
if (err)
return err;
dev_hold(dev);
rtnl_unlock();
err = mlx5_firmware_flash(mdev, fw);
release_firmware(fw);
rtnl_lock();
dev_put(dev);
return err;
}
static int mlx5e_flash_device(struct net_device *dev,
struct ethtool_flash *flash)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_flash_device(priv, flash);
}
const struct ethtool_ops mlx5e_ethtool_ops = { const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo, .get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -1815,6 +1849,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -1815,6 +1849,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_rxfh = mlx5e_set_rxfh, .set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc, .get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc, .set_rxnfc = mlx5e_set_rxnfc,
.flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable, .get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable, .set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam, .get_pauseparam = mlx5e_get_pauseparam,
......
...@@ -96,9 +96,12 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, ...@@ -96,9 +96,12 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_size = is_kdump_kernel() ? params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
params->rq_headroom = params->xdp_prog ?
XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
params->rq_headroom += NET_IP_ALIGN;
/* Extra room needed for build_skb */ /* Extra room needed for build_skb */
params->lro_wqe_sz -= MLX5_RX_HEADROOM + params->lro_wqe_sz -= params->rq_headroom +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} }
...@@ -197,6 +200,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) ...@@ -197,6 +200,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
s->rx_page_reuse += rq_stats->page_reuse;
s->rx_cache_reuse += rq_stats->cache_reuse; s->rx_cache_reuse += rq_stats->cache_reuse;
s->rx_cache_full += rq_stats->cache_full; s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_empty += rq_stats->cache_empty;
...@@ -547,7 +551,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -547,7 +551,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
void *rqc = rqp->rqc; void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count; u32 byte_count;
u32 frag_sz;
int npages; int npages;
int wq_sz; int wq_sz;
int err; int err;
...@@ -579,13 +582,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -579,13 +582,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy; goto err_rq_wq_destroy;
} }
if (rq->xdp_prog) { rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.map_dir = DMA_BIDIRECTIONAL; rq->rx_headroom = params->rq_headroom;
rq->rx_headroom = XDP_PACKET_HEADROOM;
} else {
rq->buff.map_dir = DMA_FROM_DEVICE;
rq->rx_headroom = MLX5_RX_HEADROOM;
}
switch (rq->wq_type) { switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
...@@ -616,9 +614,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -616,9 +614,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_destroy_umr_mkey; goto err_destroy_umr_mkey;
break; break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */ default: /* MLX5_WQ_TYPE_LINKED_LIST */
rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info), rq->wqe.frag_info =
GFP_KERNEL, cpu_to_node(c->cpu)); kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
if (!rq->dma_info) { GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->wqe.frag_info) {
err = -ENOMEM; err = -ENOMEM;
goto err_rq_wq_destroy; goto err_rq_wq_destroy;
} }
...@@ -627,7 +626,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -627,7 +626,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
kfree(rq->dma_info); kfree(rq->wqe.frag_info);
err = -EINVAL; err = -EINVAL;
netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
goto err_rq_wq_destroy; goto err_rq_wq_destroy;
...@@ -636,15 +635,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -636,15 +635,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->buff.wqe_sz = params->lro_en ? rq->buff.wqe_sz = params->lro_en ?
params->lro_wqe_sz : params->lro_wqe_sz :
MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
byte_count = rq->buff.wqe_sz; byte_count = rq->buff.wqe_sz;
/* calc the required page order */ /* calc the required page order */
frag_sz = rq->rx_headroom + rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
byte_count /* packet data */ + npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
frag_sz = SKB_DATA_ALIGN(frag_sz);
npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages); rq->buff.page_order = order_base_2(npages);
byte_count |= MLX5_HW_START_PADDING; byte_count |= MLX5_HW_START_PADDING;
...@@ -689,7 +685,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -689,7 +685,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break; break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */ default: /* MLX5_WQ_TYPE_LINKED_LIST */
kfree(rq->dma_info); kfree(rq->wqe.frag_info);
} }
for (i = rq->page_cache.head; i != rq->page_cache.tail; for (i = rq->page_cache.head; i != rq->page_cache.tail;
...@@ -871,6 +867,16 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) ...@@ -871,6 +867,16 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index); &wqe->next.next_wqe_index);
} }
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
/* Clean outstanding pages on handled WQEs that decided to do page-reuse,
* but yet to be re-posted.
*/
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
rq->dealloc_wqe(rq, wqe_ix);
}
} }
static int mlx5e_open_rq(struct mlx5e_channel *c, static int mlx5e_open_rq(struct mlx5e_channel *c,
......
...@@ -160,6 +160,11 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, ...@@ -160,6 +160,11 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
static inline bool mlx5e_page_is_reserved(struct page *page)
{
return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id();
}
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info) struct mlx5e_dma_info *dma_info)
{ {
...@@ -238,22 +243,54 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, ...@@ -238,22 +243,54 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
put_page(dma_info->page); put_page(dma_info->page);
} }
static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi)
{
return rq->wqe.page_reuse && wi->di.page &&
(wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) &&
!mlx5e_page_is_reserved(wi->di.page);
}
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{ {
struct mlx5e_dma_info *di = &rq->dma_info[ix]; struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
if (unlikely(mlx5e_page_alloc_mapped(rq, di))) /* check if page exists, hence can be reused */
return -ENOMEM; if (!wi->di.page) {
if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di)))
return -ENOMEM;
wi->offset = 0;
}
wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom); wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset +
rq->rx_headroom);
return 0; return 0;
} }
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi)
{
mlx5e_page_release(rq, &wi->di, true);
wi->di.page = NULL;
}
static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi)
{
if (mlx5e_page_reuse(rq, wi)) {
rq->stats.page_reuse++;
return;
}
mlx5e_free_rx_wqe(rq, wi);
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_dma_info *di = &rq->dma_info[ix]; struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
mlx5e_page_release(rq, di, true); if (wi->di.page)
mlx5e_free_rx_wqe(rq, wi);
} }
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
...@@ -650,7 +687,6 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -650,7 +687,6 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) { MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++; rq->stats.xdp_drop++;
mlx5e_page_release(rq, di, true);
return false; return false;
} }
...@@ -661,7 +697,6 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -661,7 +697,6 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.doorbell = false; sq->db.doorbell = false;
} }
rq->stats.xdp_tx_full++; rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true);
return false; return false;
} }
...@@ -686,10 +721,15 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -686,10 +721,15 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
/* move page to reference to sq responsibility,
* and mark so it's not put back in page-cache.
*/
rq->wqe.xdp_xmit = true;
sq->db.di[pi] = *di; sq->db.di[pi] = *di;
sq->pc++; sq->pc++;
sq->db.doorbell = true; sq->db.doorbell = true;
rq->stats.xdp_tx++; rq->stats.xdp_tx++;
return true; return true;
} }
...@@ -726,35 +766,34 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, ...@@ -726,35 +766,34 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
trace_xdp_exception(rq->netdev, prog, act); trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP: case XDP_DROP:
rq->stats.xdp_drop++; rq->stats.xdp_drop++;
mlx5e_page_release(rq, di, true);
return true; return true;
} }
} }
static inline static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 wqe_counter, u32 cqe_bcnt) struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{ {
struct mlx5e_dma_info *di; struct mlx5e_dma_info *di = &wi->di;
struct sk_buff *skb; struct sk_buff *skb;
void *va, *data; void *va, *data;
u16 rx_headroom = rq->rx_headroom; u16 rx_headroom = rq->rx_headroom;
bool consumed; bool consumed;
u32 frag_size;
di = &rq->dma_info[wqe_counter]; va = page_address(di->page) + wi->offset;
va = page_address(di->page);
data = va + rx_headroom; data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev, dma_sync_single_range_for_cpu(rq->pdev,
di->addr, di->addr + wi->offset,
rx_headroom, 0, frag_size,
rq->buff.wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
prefetch(data); prefetch(data);
wi->offset += frag_size;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++; rq->stats.wqe_err++;
mlx5e_page_release(rq, di, true);
return NULL; return NULL;
} }
...@@ -764,16 +803,14 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -764,16 +803,14 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
if (consumed) if (consumed)
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq)); skb = build_skb(va, frag_size);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats.buff_alloc_err++; rq->stats.buff_alloc_err++;
mlx5e_page_release(rq, di, true);
return NULL; return NULL;
} }
/* queue up for recycling ..*/ /* queue up for recycling/reuse */
page_ref_inc(di->page); page_ref_inc(di->page);
mlx5e_page_release(rq, di, true);
skb_reserve(skb, rx_headroom); skb_reserve(skb, rx_headroom);
skb_put(skb, cqe_bcnt); skb_put(skb, cqe_bcnt);
...@@ -783,6 +820,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -783,6 +820,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe; struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be; __be16 wqe_counter_be;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -792,15 +830,27 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -792,15 +830,27 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter; wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be); wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt); cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) if (!skb) {
/* probably for XDP */
if (rq->wqe.xdp_xmit) {
wi->di.page = NULL;
rq->wqe.xdp_xmit = false;
/* do not return page to cache, it will be returned on XDP_TX completion */
goto wq_ll_pop;
}
/* probably an XDP_DROP, save the page-reuse checks */
mlx5e_free_rx_wqe(rq, wi);
goto wq_ll_pop; goto wq_ll_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop: wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index); &wqe->next.next_wqe_index);
...@@ -812,6 +862,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -812,6 +862,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe; struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb; struct sk_buff *skb;
__be16 wqe_counter_be; __be16 wqe_counter_be;
...@@ -821,11 +872,21 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -821,11 +872,21 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter; wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be); wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt); cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) if (!skb) {
if (rq->wqe.xdp_xmit) {
wi->di.page = NULL;
rq->wqe.xdp_xmit = false;
/* do not return page to cache, it will be returned on XDP_TX completion */
goto wq_ll_pop;
}
/* probably an XDP_DROP, save the page-reuse checks */
mlx5e_free_rx_wqe(rq, wi);
goto wq_ll_pop; goto wq_ll_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
...@@ -834,6 +895,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -834,6 +895,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop: wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index); &wqe->next.next_wqe_index);
...@@ -1094,6 +1156,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -1094,6 +1156,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe; struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be; __be16 wqe_counter_be;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1103,16 +1166,18 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1103,16 +1166,18 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter; wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be); wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt); cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) if (!skb)
goto wq_ll_pop; goto wq_free_wqe;
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
wq_ll_pop: wq_free_wqe:
mlx5e_free_rx_wqe_reuse(rq, wi);
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index); &wqe->next.next_wqe_index);
} }
......
...@@ -79,6 +79,7 @@ struct mlx5e_sw_stats { ...@@ -79,6 +79,7 @@ struct mlx5e_sw_stats {
u64 rx_buff_alloc_err; u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks; u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts; u64 rx_cqe_compress_pkts;
u64 rx_page_reuse;
u64 rx_cache_reuse; u64 rx_cache_reuse;
u64 rx_cache_full; u64 rx_cache_full;
u64 rx_cache_empty; u64 rx_cache_empty;
...@@ -117,6 +118,7 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -117,6 +118,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
...@@ -319,6 +321,7 @@ struct mlx5e_rq_stats { ...@@ -319,6 +321,7 @@ struct mlx5e_rq_stats {
u64 buff_alloc_err; u64 buff_alloc_err;
u64 cqe_compress_blks; u64 cqe_compress_blks;
u64 cqe_compress_pkts; u64 cqe_compress_pkts;
u64 page_reuse;
u64 cache_reuse; u64 cache_reuse;
u64 cache_full; u64 cache_full;
u64 cache_empty; u64 cache_empty;
...@@ -341,6 +344,7 @@ static const struct counter_desc rq_stats_desc[] = { ...@@ -341,6 +344,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
......
...@@ -888,6 +888,34 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -888,6 +888,34 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*min_inline = MLX5_INLINE_MODE_IP; *min_inline = MLX5_INLINE_MODE_IP;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
struct flow_dissector_key_ip *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IP,
f->key);
struct flow_dissector_key_ip *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IP,
f->mask);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
if (mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_ipv4_ttl))
return -EOPNOTSUPP;
if (mask->tos || mask->ttl)
*min_inline = MLX5_INLINE_MODE_IP;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key = struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector, skb_flow_dissector_target(f->dissector,
...@@ -931,29 +959,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -931,29 +959,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*min_inline = MLX5_INLINE_MODE_TCP_UDP; *min_inline = MLX5_INLINE_MODE_TCP_UDP;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
struct flow_dissector_key_ip *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IP,
f->key);
struct flow_dissector_key_ip *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IP,
f->mask);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
if (mask->tos)
*min_inline = MLX5_INLINE_MODE_IP;
if (mask->ttl) /* currently not supported */
return -EOPNOTSUPP;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) { if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_dissector_key_tcp *key = struct flow_dissector_key_tcp *key =
skb_flow_dissector_target(f->dissector, skb_flow_dissector_target(f->dissector,
...@@ -1053,32 +1058,37 @@ struct mlx5_fields { ...@@ -1053,32 +1058,37 @@ struct mlx5_fields {
u32 offset; u32 offset;
}; };
#define OFFLOAD(fw_field, size, field, off) \
{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
static struct mlx5_fields fields[] = { static struct mlx5_fields fields[] = {
{MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])}, OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
{MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])}, OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
{MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])}, OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
{MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
{MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
{MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
{MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
{MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, OFFLOAD(SIPV4, 4, ip4.saddr, 0),
OFFLOAD(DIPV4, 4, ip4.daddr, 0),
{MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
{MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])}, OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
{MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])}, OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
{MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])}, OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
{MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])}, OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
{MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])}, OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
{MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])}, OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
{MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])}, OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
{MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)}, OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
{MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
{MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5}, OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
{MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)}, OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
{MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
OFFLOAD(UDP_SPORT, 2, udp.source, 0),
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
}; };
/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include <linux/module.h> #include <linux/module.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen) int outlen)
...@@ -223,3 +224,270 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) ...@@ -223,3 +224,270 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0; return 0;
} }
enum mlxsw_reg_mcc_instruction {
MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
};
static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
enum mlxsw_reg_mcc_instruction instr,
u16 component_index, u32 update_handle,
u32 component_size)
{
u32 out[MLX5_ST_SZ_DW(mcc_reg)];
u32 in[MLX5_ST_SZ_DW(mcc_reg)];
memset(in, 0, sizeof(in));
MLX5_SET(mcc_reg, in, instruction, instr);
MLX5_SET(mcc_reg, in, component_index, component_index);
MLX5_SET(mcc_reg, in, update_handle, update_handle);
MLX5_SET(mcc_reg, in, component_size, component_size);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MCC, 0, 1);
}
static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
u32 *update_handle, u8 *error_code,
u8 *control_state)
{
u32 out[MLX5_ST_SZ_DW(mcc_reg)];
u32 in[MLX5_ST_SZ_DW(mcc_reg)];
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(mcc_reg, in, update_handle, *update_handle);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MCC, 0, 0);
if (err)
goto out;
*update_handle = MLX5_GET(mcc_reg, out, update_handle);
*error_code = MLX5_GET(mcc_reg, out, error_code);
*control_state = MLX5_GET(mcc_reg, out, control_state);
out:
return err;
}
static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
u32 update_handle,
u32 offset, u16 size,
u8 *data)
{
int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
u32 out[MLX5_ST_SZ_DW(mcda_reg)];
int i, j, dw_size = size >> 2;
__be32 data_element;
u32 *in;
in = kzalloc(in_size, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(mcda_reg, in, update_handle, update_handle);
MLX5_SET(mcda_reg, in, offset, offset);
MLX5_SET(mcda_reg, in, size, size);
for (i = 0; i < dw_size; i++) {
j = i * 4;
data_element = htonl(*(u32 *)&data[j]);
memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
}
err = mlx5_core_access_reg(dev, in, in_size, out,
sizeof(out), MLX5_REG_MCDA, 0, 1);
kfree(in);
return err;
}
static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
u16 component_index,
u32 *max_component_size,
u8 *log_mcda_word_size,
u16 *mcda_max_write_size)
{
u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
int offset = MLX5_ST_SZ_DW(mcqi_reg);
u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(mcqi_reg, in, component_index, component_index);
MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MCQI, 0, 0);
if (err)
goto out;
*max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
*log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
*mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
out:
return err;
}
struct mlx5_mlxfw_dev {
struct mlxfw_dev mlxfw_dev;
struct mlx5_core_dev *mlx5_core_dev;
};
static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
u16 component_index, u32 *p_max_size,
u8 *p_align_bits, u16 *p_max_write_size)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
p_align_bits, p_max_write_size);
}
static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
u8 control_state, error_code;
int err;
*fwhandle = 0;
err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
if (err)
return err;
if (control_state != MLXFW_FSM_STATE_IDLE)
return -EBUSY;
return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
0, *fwhandle, 0);
}
static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
u16 component_index, u32 component_size)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
component_index, fwhandle, component_size);
}
static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
u8 *data, u16 size, u32 offset)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
}
static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
u16 component_index)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
component_index, fwhandle, 0);
}
static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0,
fwhandle, 0);
}
static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
enum mlxfw_fsm_state *fsm_state,
enum mlxfw_fsm_state_err *fsm_state_err)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
u8 control_state, error_code;
int err;
err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
if (err)
return err;
*fsm_state = control_state;
*fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
MLXFW_FSM_STATE_ERR_MAX);
return 0;
}
static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
}
static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
{
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
fwhandle, 0);
}
static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
.component_query = mlx5_component_query,
.fsm_lock = mlx5_fsm_lock,
.fsm_component_update = mlx5_fsm_component_update,
.fsm_block_download = mlx5_fsm_block_download,
.fsm_component_verify = mlx5_fsm_component_verify,
.fsm_activate = mlx5_fsm_activate,
.fsm_query_state = mlx5_fsm_query_state,
.fsm_cancel = mlx5_fsm_cancel,
.fsm_release = mlx5_fsm_release
};
int mlx5_firmware_flash(struct mlx5_core_dev *dev,
const struct firmware *firmware)
{
struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
.mlxfw_dev = {
.ops = &mlx5_mlxfw_dev_ops,
.psid = dev->board_id,
.psid_size = strlen(dev->board_id),
},
.mlx5_core_dev = dev
};
if (!MLX5_CAP_GEN(dev, mcam_reg) ||
!MLX5_CAP_MCAM_REG(dev, mcqi) ||
!MLX5_CAP_MCAM_REG(dev, mcc) ||
!MLX5_CAP_MCAM_REG(dev, mcda)) {
pr_info("%s flashing isn't supported by the running FW\n", __func__);
return -EOPNOTSUPP;
}
return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
}
...@@ -121,6 +121,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev, ...@@ -121,6 +121,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
return mlx5e_ethtool_get_ts_info(priv, info); return mlx5e_ethtool_get_ts_info(priv, info);
} }
static int mlx5i_flash_device(struct net_device *netdev,
struct ethtool_flash *flash)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
return mlx5e_ethtool_flash_device(priv, flash);
}
const struct ethtool_ops mlx5i_ethtool_ops = { const struct ethtool_ops mlx5i_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo, .get_drvinfo = mlx5i_get_drvinfo,
.get_strings = mlx5i_get_strings, .get_strings = mlx5i_get_strings,
...@@ -128,6 +136,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = { ...@@ -128,6 +136,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_ethtool_stats = mlx5i_get_ethtool_stats, .get_ethtool_stats = mlx5i_get_ethtool_stats,
.get_ringparam = mlx5i_get_ringparam, .get_ringparam = mlx5i_get_ringparam,
.set_ringparam = mlx5i_set_ringparam, .set_ringparam = mlx5i_set_ringparam,
.flash_device = mlx5i_flash_device,
.get_channels = mlx5i_get_channels, .get_channels = mlx5i_get_channels,
.set_channels = mlx5i_set_channels, .set_channels = mlx5i_set_channels,
.get_coalesce = mlx5i_get_coalesce, .get_coalesce = mlx5i_get_coalesce,
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/if_link.h> #include <linux/if_link.h>
#include <linux/firmware.h>
#define DRIVER_NAME "mlx5_core" #define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0" #define DRIVER_VERSION "5.0-0"
...@@ -153,6 +154,8 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); ...@@ -153,6 +154,8 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void); void mlx5e_init(void);
void mlx5e_cleanup(void); void mlx5e_cleanup(void);
......
...@@ -3,5 +3,11 @@ ...@@ -3,5 +3,11 @@
# #
config MLXFW config MLXFW
tristate "mlxfw" if COMPILE_TEST tristate "Mellanox Technologies firmware flash module"
---help---
This driver supports Mellanox Technologies Firmware
flashing common logic.
To compile this driver as a module, choose M here: the
module will be called mlxfw.
select XZ_DEC select XZ_DEC
...@@ -96,7 +96,16 @@ struct mlxfw_dev { ...@@ -96,7 +96,16 @@ struct mlxfw_dev {
u16 psid_size; u16 psid_size;
}; };
#if IS_ENABLED(CONFIG_MLXFW)
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev, int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
const struct firmware *firmware); const struct firmware *firmware);
#else
static inline
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
const struct firmware *firmware)
{
return -EOPNOTSUPP;
}
#endif
#endif #endif
...@@ -1094,6 +1094,9 @@ enum mlx5_mcam_feature_groups { ...@@ -1094,6 +1094,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
#define MLX5_CAP_MCAM_REG(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
......
...@@ -131,6 +131,9 @@ enum { ...@@ -131,6 +131,9 @@ enum {
MLX5_REG_MPCNT = 0x9051, MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f, MLX5_REG_MCAM = 0x907f,
}; };
......
...@@ -243,7 +243,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits { ...@@ -243,7 +243,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_first_prio[0x1]; u8 outer_first_prio[0x1];
u8 outer_first_cfi[0x1]; u8 outer_first_cfi[0x1];
u8 outer_first_vid[0x1]; u8 outer_first_vid[0x1];
u8 reserved_at_7[0x1]; u8 outer_ipv4_ttl[0x1];
u8 outer_second_prio[0x1]; u8 outer_second_prio[0x1];
u8 outer_second_cfi[0x1]; u8 outer_second_cfi[0x1];
u8 outer_second_vid[0x1]; u8 outer_second_vid[0x1];
...@@ -380,7 +380,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { ...@@ -380,7 +380,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10]; u8 tcp_sport[0x10];
u8 tcp_dport[0x10]; u8 tcp_dport[0x10];
u8 reserved_at_c0[0x20]; u8 reserved_at_c0[0x18];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10]; u8 udp_sport[0x10];
u8 udp_dport[0x10]; u8 udp_dport[0x10];
...@@ -823,7 +824,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -823,7 +824,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cc_modify_allowed[0x1]; u8 cc_modify_allowed[0x1];
u8 start_pad[0x1]; u8 start_pad[0x1];
u8 cache_line_128byte[0x1]; u8 cache_line_128byte[0x1];
u8 reserved_at_163[0xb]; u8 reserved_at_165[0xb];
u8 gid_table_size[0x10]; u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1]; u8 out_of_seq_cnt[0x1];
...@@ -4619,6 +4620,7 @@ enum { ...@@ -4619,6 +4620,7 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
}; };
struct mlx5_ifc_alloc_modify_header_context_out_bits { struct mlx5_ifc_alloc_modify_header_context_out_bits {
...@@ -7743,6 +7745,18 @@ struct mlx5_ifc_mcam_enhanced_features_bits { ...@@ -7743,6 +7745,18 @@ struct mlx5_ifc_mcam_enhanced_features_bits {
u8 pcie_performance_group[0x1]; u8 pcie_performance_group[0x1];
}; };
struct mlx5_ifc_mcam_access_reg_bits {
u8 reserved_at_0[0x1c];
u8 mcda[0x1];
u8 mcc[0x1];
u8 mcqi[0x1];
u8 reserved_at_1f[0x1];
u8 regs_95_to_64[0x20];
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
struct mlx5_ifc_mcam_reg_bits { struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 feature_group[0x8]; u8 feature_group[0x8];
...@@ -7752,6 +7766,7 @@ struct mlx5_ifc_mcam_reg_bits { ...@@ -7752,6 +7766,7 @@ struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_20[0x20]; u8 reserved_at_20[0x20];
union { union {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
u8 reserved_at_0[0x80]; u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask; } mng_access_reg_cap_mask;
...@@ -8163,6 +8178,85 @@ struct mlx5_ifc_mtppse_reg_bits { ...@@ -8163,6 +8178,85 @@ struct mlx5_ifc_mtppse_reg_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_mcqi_cap_bits {
u8 supported_info_bitmask[0x20];
u8 component_size[0x20];
u8 max_component_size[0x20];
u8 log_mcda_word_size[0x4];
u8 reserved_at_64[0xc];
u8 mcda_max_write_size[0x10];
u8 rd_en[0x1];
u8 reserved_at_81[0x1];
u8 match_chip_id[0x1];
u8 match_psid[0x1];
u8 check_user_timestamp[0x1];
u8 match_base_guid_mac[0x1];
u8 reserved_at_86[0x1a];
};
struct mlx5_ifc_mcqi_reg_bits {
u8 read_pending_component[0x1];
u8 reserved_at_1[0xf];
u8 component_index[0x10];
u8 reserved_at_20[0x20];
u8 reserved_at_40[0x1b];
u8 info_type[0x5];
u8 info_size[0x20];
u8 offset[0x20];
u8 reserved_at_a0[0x10];
u8 data_size[0x10];
u8 data[0][0x20];
};
struct mlx5_ifc_mcc_reg_bits {
u8 reserved_at_0[0x4];
u8 time_elapsed_since_last_cmd[0xc];
u8 reserved_at_10[0x8];
u8 instruction[0x8];
u8 reserved_at_20[0x10];
u8 component_index[0x10];
u8 reserved_at_40[0x8];
u8 update_handle[0x18];
u8 handle_owner_type[0x4];
u8 handle_owner_host_id[0x4];
u8 reserved_at_68[0x1];
u8 control_progress[0x7];
u8 error_code[0x8];
u8 reserved_at_78[0x4];
u8 control_state[0x4];
u8 component_size[0x20];
u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_mcda_reg_bits {
u8 reserved_at_0[0x8];
u8 update_handle[0x18];
u8 offset[0x20];
u8 reserved_at_40[0x10];
u8 size[0x10];
u8 reserved_at_60[0x20];
u8 data[0][0x20];
};
union mlx5_ifc_ports_control_registers_document_bits { union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
...@@ -8212,6 +8306,9 @@ union mlx5_ifc_ports_control_registers_document_bits { ...@@ -8212,6 +8306,9 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mtppse_reg_bits mtppse_reg; struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits;
struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; struct mlx5_ifc_fpga_cap_bits fpga_cap_bits;
struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
u8 reserved_at_0[0x60e0]; u8 reserved_at_0[0x60e0];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment