Commit 2c758cef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'platform-drivers-x86-v6.6-2' of...

Merge tag 'platform-drivers-x86-v6.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86

Pull x86 platform driver fixes from Hans de Goede:

 - various platform/mellanox fixes

 - one new DMI quirk for asus-wmi

* tag 'platform-drivers-x86-v6.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86:
  platform/x86: asus-wmi: Support 2023 ROG X16 tablet mode
  platform/mellanox: NVSW_SN2201 should depend on ACPI
  platform/mellanox: mlxbf-bootctl: add NET dependency into Kconfig
  platform/mellanox: mlxbf-pmc: Fix reading of unprogrammed events
  platform/mellanox: mlxbf-pmc: Fix potential buffer overflows
  platform/mellanox: mlxbf-tmfifo: Drop jumbo frames
  platform/mellanox: mlxbf-tmfifo: Drop the Rx packet if no more descriptors
parents a747acc0 4106a70d
......@@ -60,6 +60,7 @@ config MLXBF_BOOTCTL
tristate "Mellanox BlueField Firmware Boot Control driver"
depends on ARM64
depends on ACPI
depends on NET
help
The Mellanox BlueField firmware implements functionality to
request swapping the primary and alternate eMMC boot partition,
......@@ -80,8 +81,8 @@ config MLXBF_PMC
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
depends on HWMON
depends on I2C
depends on HWMON && I2C
depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
......
......@@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
{ 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
......@@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
{ 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
......@@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
{ 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
......@@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
{ 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
......@@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
{ 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
......@@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
{ 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
......@@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
uint64_t perfmon_cfg, perfevt, perfctl;
uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
......@@ -904,25 +910,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
perfval_offset = perfcfg_offset +
pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFCTL);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
return -EFAULT;
/* Check if the counter is enabled */
if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
MLXBF_PMC_READ_REG_64, &perfctl))
return -EFAULT;
if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
return -EINVAL;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
......@@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
} else
return -EINVAL;
return sprintf(buf, "0x%llx\n", value);
return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
......@@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
return sprintf(buf, "No event being monitored\n");
return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
......@@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
events[i].evt_name);
if (len > PAGE_SIZE)
len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
events[i].evt_num, events[i].evt_name);
if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
......@@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
return sprintf(buf, "%d\n", value);
return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */
......
......@@ -59,6 +59,7 @@ struct mlxbf_tmfifo;
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
* @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
......@@ -75,6 +76,7 @@ struct mlxbf_tmfifo_vring {
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
......@@ -86,6 +88,14 @@ struct mlxbf_tmfifo_vring {
struct mlxbf_tmfifo *fifo;
};
/* Check whether vring is in drop mode. */
#define IS_VRING_DROP(_r) ({ \
typeof(_r) (r) = (_r); \
(r->desc_head == &r->drop_desc ? true : false); })
/* A stub length to drop maximum length packet. */
#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
......@@ -214,7 +224,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
......@@ -262,6 +272,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
......@@ -367,7 +378,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
return len;
}
static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
......@@ -596,19 +607,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
memcpy(addr + vring->cur_len, &data, sizeof(u64));
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
else
memcpy(&data, addr + vring->cur_len, sizeof(u64));
memcpy(&data, addr + vring->cur_len,
sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
}
vring->cur_len = len;
}
......@@ -625,13 +642,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc,
struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
......@@ -652,7 +670,7 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
MLXBF_TMFIFO_NET_L2_OVERHEAD)
return;
drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
......@@ -667,16 +685,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (!tm_dev2)
return;
vring->desc = desc;
vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
if (drop_rx && !IS_VRING_DROP(vring)) {
if (vring->desc_head)
mlxbf_tmfifo_release_pkt(vring);
*desc = &vring->drop_desc;
vring->desc_head = *desc;
vring->desc = *desc;
}
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
......@@ -709,15 +736,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
if (!desc)
if (!desc) {
/* Drop next Rx packet to avoid stuck. */
if (is_rx) {
desc = &vring->drop_desc;
vring->desc_head = desc;
vring->desc = desc;
} else {
return false;
}
}
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
......@@ -743,17 +778,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
vring->rem_len -= len;
/* Get the next desc on the chain. */
if (vring->rem_len > 0 &&
if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
/* Done and release the pending packet. */
mlxbf_tmfifo_release_pending_pkt(vring);
/* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
if (!IS_VRING_DROP(vring)) {
mlxbf_tmfifo_release_pkt(vring);
} else {
vring->pkt_len = 0;
vring->desc_head = NULL;
vring->desc = NULL;
return false;
}
/*
* Make sure the load/store are in order before
......@@ -933,7 +975,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
/* Release the pending packet. */
if (vring->desc)
mlxbf_tmfifo_release_pending_pkt(vring);
mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
......
......@@ -478,6 +478,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_tablet_mode,
},
{
.callback = dmi_matched,
.ident = "ASUS ROG FLOW X16",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
},
.driver_data = &quirk_asus_tablet_mode,
},
{
.callback = dmi_matched,
.ident = "ASUS VivoBook E410MA",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment