Commit 18ec9392 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v5.4' of...

Merge tag 'thunderbolt-for-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into char-misc-next

Mika writes:

thunderbolt: Changes for v5.4 merge window

The biggest change is the addition of Intel Ice Lake integrated
Thunderbolt support. There are also a couple of smaller changes like
converting the driver to use better device property interface and use
correct format string in service key attribute.

* tag 'thunderbolt-for-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
  ACPI / property: Add two new Thunderbolt property GUIDs to the list
  thunderbolt: Add support for Intel Ice Lake
  thunderbolt: Expose active parts of NVM even if upgrade is not supported
  thunderbolt: Hide switch attributes that are not set
  thunderbolt: Do not fail adding switch if some port is not implemented
  thunderbolt: Use 32-bit writes when writing ring producer/consumer
  thunderbolt: Move NVM upgrade support flag to struct icm
  thunderbolt: Correct path indices for PCIe tunnel
  thunderbolt: Show key using %*pE not %*pEp
  thunderbolt: Switch to use device_property_count_uXX()
parents e6508c7e dfda2041
...@@ -39,6 +39,12 @@ static const guid_t prp_guids[] = { ...@@ -39,6 +39,12 @@ static const guid_t prp_guids[] = {
/* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */ /* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3, GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89), 0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
/* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */
GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d,
0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7),
/* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
GUID_INIT(0x6c501103, 0xc189, 0x4296,
0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
}; };
/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ /* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
...@@ -930,6 +930,23 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, ...@@ -930,6 +930,23 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
return res; return res;
} }
static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
const struct tb_cfg_result *res)
{
/*
* For unimplemented ports access to port config space may return
* TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
* set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
* that the caller can mark the port as disabled.
*/
if (space == TB_CFG_PORT &&
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
return -ENODEV;
tb_cfg_print_error(ctl, res);
return -EIO;
}
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length) enum tb_cfg_space space, u32 offset, u32 length)
{ {
...@@ -942,8 +959,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, ...@@ -942,8 +959,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
case 1: case 1:
/* Thunderbolt error, tb_error holds the actual number */ /* Thunderbolt error, tb_error holds the actual number */
tb_cfg_print_error(ctl, &res); return tb_cfg_get_error(ctl, space, &res);
return -EIO;
case -ETIMEDOUT: case -ETIMEDOUT:
tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n", tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
...@@ -969,8 +985,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, ...@@ -969,8 +985,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
case 1: case 1:
/* Thunderbolt error, tb_error holds the actual number */ /* Thunderbolt error, tb_error holds the actual number */
tb_cfg_print_error(ctl, &res); return tb_cfg_get_error(ctl, space, &res);
return -EIO;
case -ETIMEDOUT: case -ETIMEDOUT:
tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n", tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
......
...@@ -414,7 +414,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size) ...@@ -414,7 +414,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
struct device *dev = &sw->tb->nhi->pdev->dev; struct device *dev = &sw->tb->nhi->pdev->dev;
int len, res; int len, res;
len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0); len = device_property_count_u8(dev, "ThunderboltDROM");
if (len < 0 || len < sizeof(struct tb_drom_header)) if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL; return -EINVAL;
...@@ -525,10 +525,6 @@ int tb_drom_read(struct tb_switch *sw) ...@@ -525,10 +525,6 @@ int tb_drom_read(struct tb_switch *sw)
sw->ports[3].dual_link_port = &sw->ports[4]; sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].dual_link_port = &sw->ports[3]; sw->ports[4].dual_link_port = &sw->ports[3];
/* Port 5 is inaccessible on this gen 1 controller */
if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
sw->ports[5].disabled = true;
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/property.h>
#include "nhi.h" #include "nhi.h"
#include "nhi_regs.h" #include "nhi_regs.h"
...@@ -143,9 +144,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring) ...@@ -143,9 +144,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io; return io;
} }
static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{ {
iowrite16(value, ring_desc_base(ring) + offset); /*
* The other 16-bits in the register is read-only and writes to it
* are ignored by the hardware so we can save one ioread32() by
* filling the read-only bits with zeroes.
*/
iowrite32(cons, ring_desc_base(ring) + 8);
}
static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
{
/* See ring_iowrite_cons() above for explanation */
iowrite32(prod << 16, ring_desc_base(ring) + 8);
} }
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
...@@ -197,7 +209,10 @@ static void ring_write_descriptors(struct tb_ring *ring) ...@@ -197,7 +209,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof; descriptor->sof = frame->sof;
} }
ring->head = (ring->head + 1) % ring->size; ring->head = (ring->head + 1) % ring->size;
ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); if (ring->is_tx)
ring_iowrite_prod(ring, ring->head);
else
ring_iowrite_cons(ring, ring->head);
} }
} }
...@@ -662,7 +677,7 @@ void tb_ring_stop(struct tb_ring *ring) ...@@ -662,7 +677,7 @@ void tb_ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0); ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0); ring_iowrite64desc(ring, 0, 0);
ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12); ring_iowrite32desc(ring, 0, 12);
ring->head = 0; ring->head = 0;
ring->tail = 0; ring->tail = 0;
...@@ -845,12 +860,52 @@ static irqreturn_t nhi_msi(int irq, void *data) ...@@ -845,12 +860,52 @@ static irqreturn_t nhi_msi(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int nhi_suspend_noirq(struct device *dev) static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev); struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
ret = tb_domain_suspend_noirq(tb);
if (ret)
return ret;
if (nhi->ops && nhi->ops->suspend_noirq) {
ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
if (ret)
return ret;
}
return 0;
}
static int nhi_suspend_noirq(struct device *dev)
{
return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
}
static bool nhi_wake_supported(struct pci_dev *pdev)
{
u8 val;
/*
* If power rails are sustainable for wakeup from S4 this
* property is set by the BIOS.
*/
if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
return !!val;
return true;
}
static int nhi_poweroff_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
bool wakeup;
return tb_domain_suspend_noirq(tb); wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
return __nhi_suspend_noirq(dev, wakeup);
} }
static void nhi_enable_int_throttling(struct tb_nhi *nhi) static void nhi_enable_int_throttling(struct tb_nhi *nhi)
...@@ -873,16 +928,24 @@ static int nhi_resume_noirq(struct device *dev) ...@@ -873,16 +928,24 @@ static int nhi_resume_noirq(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev); struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
/* /*
* Check that the device is still there. It may be that the user * Check that the device is still there. It may be that the user
* unplugged last device which causes the host controller to go * unplugged last device which causes the host controller to go
* away on PCs. * away on PCs.
*/ */
if (!pci_device_is_present(pdev)) if (!pci_device_is_present(pdev)) {
tb->nhi->going_away = true; nhi->going_away = true;
else } else {
if (nhi->ops && nhi->ops->resume_noirq) {
ret = nhi->ops->resume_noirq(nhi);
if (ret)
return ret;
}
nhi_enable_int_throttling(tb->nhi); nhi_enable_int_throttling(tb->nhi);
}
return tb_domain_resume_noirq(tb); return tb_domain_resume_noirq(tb);
} }
...@@ -915,16 +978,35 @@ static int nhi_runtime_suspend(struct device *dev) ...@@ -915,16 +978,35 @@ static int nhi_runtime_suspend(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev); struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
ret = tb_domain_runtime_suspend(tb);
if (ret)
return ret;
return tb_domain_runtime_suspend(tb); if (nhi->ops && nhi->ops->runtime_suspend) {
ret = nhi->ops->runtime_suspend(tb->nhi);
if (ret)
return ret;
}
return 0;
} }
static int nhi_runtime_resume(struct device *dev) static int nhi_runtime_resume(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev); struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
nhi_enable_int_throttling(tb->nhi); if (nhi->ops && nhi->ops->runtime_resume) {
ret = nhi->ops->runtime_resume(nhi);
if (ret)
return ret;
}
nhi_enable_int_throttling(nhi);
return tb_domain_runtime_resume(tb); return tb_domain_runtime_resume(tb);
} }
...@@ -952,6 +1034,9 @@ static void nhi_shutdown(struct tb_nhi *nhi) ...@@ -952,6 +1034,9 @@ static void nhi_shutdown(struct tb_nhi *nhi)
flush_work(&nhi->interrupt_work); flush_work(&nhi->interrupt_work);
} }
ida_destroy(&nhi->msix_ida); ida_destroy(&nhi->msix_ida);
if (nhi->ops && nhi->ops->shutdown)
nhi->ops->shutdown(nhi);
} }
static int nhi_init_msi(struct tb_nhi *nhi) static int nhi_init_msi(struct tb_nhi *nhi)
...@@ -996,12 +1081,27 @@ static int nhi_init_msi(struct tb_nhi *nhi) ...@@ -996,12 +1081,27 @@ static int nhi_init_msi(struct tb_nhi *nhi)
return 0; return 0;
} }
static bool nhi_imr_valid(struct pci_dev *pdev)
{
u8 val;
if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
return !!val;
return true;
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
struct tb_nhi *nhi; struct tb_nhi *nhi;
struct tb *tb; struct tb *tb;
int res; int res;
if (!nhi_imr_valid(pdev)) {
dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
return -ENODEV;
}
res = pcim_enable_device(pdev); res = pcim_enable_device(pdev);
if (res) { if (res) {
dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
...@@ -1019,6 +1119,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1019,6 +1119,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM; return -ENOMEM;
nhi->pdev = pdev; nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated bin pcim_iomap_regions */ /* cannot fail - table is allocated bin pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0]; nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
...@@ -1051,6 +1152,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1051,6 +1152,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev); pci_set_master(pdev);
if (nhi->ops && nhi->ops->init) {
res = nhi->ops->init(nhi);
if (res)
return res;
}
tb = icm_probe(nhi); tb = icm_probe(nhi);
if (!tb) if (!tb)
tb = tb_probe(nhi); tb = tb_probe(nhi);
...@@ -1111,6 +1218,7 @@ static const struct dev_pm_ops nhi_pm_ops = { ...@@ -1111,6 +1218,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
.restore_noirq = nhi_resume_noirq, .restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend, .suspend = nhi_suspend,
.freeze = nhi_suspend, .freeze = nhi_suspend,
.poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend, .poweroff = nhi_suspend,
.complete = nhi_complete, .complete = nhi_complete,
.runtime_suspend = nhi_runtime_suspend, .runtime_suspend = nhi_runtime_suspend,
...@@ -1158,6 +1266,10 @@ static struct pci_device_id nhi_ids[] = { ...@@ -1158,6 +1266,10 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ 0,} { 0,}
}; };
......
...@@ -30,6 +30,26 @@ enum nhi_mailbox_cmd { ...@@ -30,6 +30,26 @@ enum nhi_mailbox_cmd {
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
/**
* struct tb_nhi_ops - NHI specific optional operations
* @init: NHI specific initialization
* @suspend_noirq: NHI specific suspend_noirq hook
* @resume_noirq: NHI specific resume_noirq hook
* @runtime_suspend: NHI specific runtime_suspend hook
* @runtime_resume: NHI specific runtime_resume hook
* @shutdown: NHI specific shutdown
*/
struct tb_nhi_ops {
int (*init)(struct tb_nhi *nhi);
int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup);
int (*resume_noirq)(struct tb_nhi *nhi);
int (*runtime_suspend)(struct tb_nhi *nhi);
int (*runtime_resume)(struct tb_nhi *nhi);
void (*shutdown)(struct tb_nhi *nhi);
};
extern const struct tb_nhi_ops icl_nhi_ops;
/* /*
* PCI IDs used in this driver from Win Ridge forward. There is no * PCI IDs used in this driver from Win Ridge forward. There is no
* need for the PCI quirk anymore as we will use ICM also on Apple * need for the PCI quirk anymore as we will use ICM also on Apple
...@@ -51,5 +71,7 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); ...@@ -51,5 +71,7 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#endif #endif
// SPDX-License-Identifier: GPL-2.0
/*
* NHI specific operations
*
* Copyright (C) 2019, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/delay.h>
#include <linux/suspend.h>
#include "nhi.h"
#include "nhi_regs.h"
#include "tb.h"
/* Ice Lake specific NHI operations */
#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */
static int check_for_device(struct device *dev, void *data)
{
return tb_is_switch(dev);
}
static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
{
struct tb *tb = pci_get_drvdata(nhi->pdev);
int ret;
ret = device_for_each_child(&tb->root_switch->dev, NULL,
check_for_device);
return ret > 0;
}
static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
{
u32 vs_cap;
/*
* The Thunderbolt host controller is present always in Ice Lake
* but the firmware may not be loaded and running (depending
* whether there is device connected and so on). Each time the
* controller is used we need to "Force Power" it first and wait
* for the firmware to indicate it is up and running. This "Force
* Power" is really not about actually powering on/off the
* controller so it is accessible even if "Force Power" is off.
*
* The actual power management happens inside shared ACPI power
* resources using standard ACPI methods.
*/
pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
if (power) {
vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
vs_cap |= VS_CAP_22_FORCE_POWER;
} else {
vs_cap &= ~VS_CAP_22_FORCE_POWER;
}
pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
if (power) {
unsigned int retries = 10;
u32 val;
/* Wait until the firmware tells it is up and running */
do {
pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
if (val & VS_CAP_9_FW_READY)
return 0;
msleep(250);
} while (--retries);
return -ETIMEDOUT;
}
return 0;
}
static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
{
u32 data;
pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
}
static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
{
unsigned long end;
u32 data;
if (!timeout)
goto clear;
end = jiffies + msecs_to_jiffies(timeout);
do {
pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
if (data & VS_CAP_18_DONE)
goto clear;
msleep(100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
clear:
/* Clear the valid bit */
pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
return 0;
}
static void icl_nhi_set_ltr(struct tb_nhi *nhi)
{
u32 max_ltr, ltr;
pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
max_ltr &= 0xffff;
/* Program the same value for both snoop and no-snoop */
ltr = max_ltr << 16 | max_ltr;
pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
}
static int icl_nhi_suspend(struct tb_nhi *nhi)
{
int ret;
if (icl_nhi_is_device_connected(nhi))
return 0;
/*
* If there is no device connected we need to perform both: a
* handshake through LC mailbox and force power down before
* entering D3.
*/
icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
if (ret)
return ret;
return icl_nhi_force_power(nhi, false);
}
static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
{
enum icl_lc_mailbox_cmd cmd;
if (!pm_suspend_via_firmware())
return icl_nhi_suspend(nhi);
cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
icl_nhi_lc_mailbox_cmd(nhi, cmd);
return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
}
static int icl_nhi_resume(struct tb_nhi *nhi)
{
int ret;
ret = icl_nhi_force_power(nhi, true);
if (ret)
return ret;
icl_nhi_set_ltr(nhi);
return 0;
}
static void icl_nhi_shutdown(struct tb_nhi *nhi)
{
icl_nhi_force_power(nhi, false);
}
const struct tb_nhi_ops icl_nhi_ops = {
.init = icl_nhi_resume,
.suspend_noirq = icl_nhi_suspend_noirq,
.resume_noirq = icl_nhi_resume,
.runtime_suspend = icl_nhi_suspend,
.runtime_resume = icl_nhi_resume,
.shutdown = icl_nhi_shutdown,
};
...@@ -124,4 +124,41 @@ struct ring_desc { ...@@ -124,4 +124,41 @@ struct ring_desc {
#define REG_FW_STS_ICM_EN_INVERT BIT(1) #define REG_FW_STS_ICM_EN_INVERT BIT(1)
#define REG_FW_STS_ICM_EN BIT(0) #define REG_FW_STS_ICM_EN BIT(0)
/* ICL NHI VSEC registers */
/* FW ready */
#define VS_CAP_9 0xc8
#define VS_CAP_9_FW_READY BIT(31)
/* UUID */
#define VS_CAP_10 0xcc
#define VS_CAP_11 0xd0
/* LTR */
#define VS_CAP_15 0xe0
#define VS_CAP_16 0xe4
/* TBT2PCIe */
#define VS_CAP_18 0xec
#define VS_CAP_18_DONE BIT(0)
/* PCIe2TBT */
#define VS_CAP_19 0xf0
#define VS_CAP_19_VALID BIT(0)
#define VS_CAP_19_CMD_SHIFT 1
#define VS_CAP_19_CMD_MASK GENMASK(7, 1)
/* Force power */
#define VS_CAP_22 0xfc
#define VS_CAP_22_FORCE_POWER BIT(1)
#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24)
#define VS_CAP_22_DMA_DELAY_SHIFT 24
/**
* enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands
* @ICL_LC_GO2SX: Ask LC to enter Sx without wake
* @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake
* @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset
*/
enum icl_lc_mailbox_cmd {
ICL_LC_GO2SX = 0x02,
ICL_LC_GO2SX_NO_WAKE = 0x03,
ICL_LC_PREPARE_FOR_RESET = 0x21,
};
#endif #endif
...@@ -364,12 +364,14 @@ static int tb_switch_nvm_add(struct tb_switch *sw) ...@@ -364,12 +364,14 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
nvm->active = nvm_dev; nvm->active = nvm_dev;
} }
nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); if (!sw->no_nvm_upgrade) {
if (IS_ERR(nvm_dev)) { nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
ret = PTR_ERR(nvm_dev); if (IS_ERR(nvm_dev)) {
goto err_nvm_active; ret = PTR_ERR(nvm_dev);
goto err_nvm_active;
}
nvm->non_active = nvm_dev;
} }
nvm->non_active = nvm_dev;
sw->nvm = nvm; sw->nvm = nvm;
return 0; return 0;
...@@ -398,7 +400,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw) ...@@ -398,7 +400,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
if (!nvm->authenticating) if (!nvm->authenticating)
nvm_clear_auth_status(sw); nvm_clear_auth_status(sw);
nvmem_unregister(nvm->non_active); if (nvm->non_active)
nvmem_unregister(nvm->non_active);
if (nvm->active) if (nvm->active)
nvmem_unregister(nvm->active); nvmem_unregister(nvm->active);
ida_simple_remove(&nvm_ida, nvm->id); ida_simple_remove(&nvm_ida, nvm->id);
...@@ -611,8 +614,14 @@ static int tb_init_port(struct tb_port *port) ...@@ -611,8 +614,14 @@ static int tb_init_port(struct tb_port *port)
int cap; int cap;
res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
if (res) if (res) {
if (res == -ENODEV) {
tb_dbg(port->sw->tb, " Port %d: not implemented\n",
port->port);
return 0;
}
return res; return res;
}
/* Port 0 is the switch itself and has no PHY. */ /* Port 0 is the switch itself and has no PHY. */
if (port->config.type == TB_TYPE_PORT && port->port != 0) { if (port->config.type == TB_TYPE_PORT && port->port != 0) {
...@@ -1331,14 +1340,29 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, ...@@ -1331,14 +1340,29 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = container_of(kobj, struct device, kobj);
struct tb_switch *sw = tb_to_switch(dev); struct tb_switch *sw = tb_to_switch(dev);
if (attr == &dev_attr_key.attr) { if (attr == &dev_attr_device.attr) {
if (!sw->device)
return 0;
} else if (attr == &dev_attr_device_name.attr) {
if (!sw->device_name)
return 0;
} else if (attr == &dev_attr_vendor.attr) {
if (!sw->vendor)
return 0;
} else if (attr == &dev_attr_vendor_name.attr) {
if (!sw->vendor_name)
return 0;
} else if (attr == &dev_attr_key.attr) {
if (tb_route(sw) && if (tb_route(sw) &&
sw->tb->security_level == TB_SECURITY_SECURE && sw->tb->security_level == TB_SECURITY_SECURE &&
sw->security_level == TB_SECURITY_SECURE) sw->security_level == TB_SECURITY_SECURE)
return attr->mode; return attr->mode;
return 0; return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr || } else if (attr == &dev_attr_nvm_authenticate.attr) {
attr == &dev_attr_nvm_version.attr) { if (sw->dma_port && !sw->no_nvm_upgrade)
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_version.attr) {
if (sw->dma_port) if (sw->dma_port)
return attr->mode; return attr->mode;
return 0; return 0;
...@@ -1446,6 +1470,8 @@ static int tb_switch_get_generation(struct tb_switch *sw) ...@@ -1446,6 +1470,8 @@ static int tb_switch_get_generation(struct tb_switch *sw)
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return 3; return 3;
default: default:
...@@ -1689,13 +1715,17 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) ...@@ -1689,13 +1715,17 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
break; break;
} }
if (sw->no_nvm_upgrade) /* Root switch DMA port requires running firmware */
if (!tb_route(sw) && sw->config.enabled)
return 0; return 0;
sw->dma_port = dma_port_alloc(sw); sw->dma_port = dma_port_alloc(sw);
if (!sw->dma_port) if (!sw->dma_port)
return 0; return 0;
if (sw->no_nvm_upgrade)
return 0;
/* /*
* Check status of the previous flash authentication. If there * Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make * is one we need to power cycle the switch in any case to make
......
...@@ -104,10 +104,11 @@ enum icm_pkg_code { ...@@ -104,10 +104,11 @@ enum icm_pkg_code {
}; };
enum icm_event_code { enum icm_event_code {
ICM_EVENT_DEVICE_CONNECTED = 3, ICM_EVENT_DEVICE_CONNECTED = 0x3,
ICM_EVENT_DEVICE_DISCONNECTED = 4, ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
ICM_EVENT_XDOMAIN_CONNECTED = 6, ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
ICM_EVENT_XDOMAIN_DISCONNECTED = 7, ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
ICM_EVENT_RTD3_VETO = 0xa,
}; };
struct icm_pkg_header { struct icm_pkg_header {
...@@ -463,6 +464,13 @@ struct icm_tr_pkg_disconnect_xdomain_response { ...@@ -463,6 +464,13 @@ struct icm_tr_pkg_disconnect_xdomain_response {
uuid_t remote_uuid; uuid_t remote_uuid;
}; };
/* Ice Lake messages */
struct icm_icl_event_rtd3_veto {
struct icm_pkg_header hdr;
u32 veto_reason;
};
/* XDomain messages */ /* XDomain messages */
struct tb_xdomain_header { struct tb_xdomain_header {
......
...@@ -211,7 +211,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, ...@@ -211,7 +211,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return NULL; return NULL;
} }
tb_pci_init_path(path); tb_pci_init_path(path);
tunnel->paths[TB_PCI_PATH_UP] = path; tunnel->paths[TB_PCI_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
"PCIe Up"); "PCIe Up");
...@@ -220,7 +220,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, ...@@ -220,7 +220,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return NULL; return NULL;
} }
tb_pci_init_path(path); tb_pci_init_path(path);
tunnel->paths[TB_PCI_PATH_DOWN] = path; tunnel->paths[TB_PCI_PATH_UP] = path;
return tunnel; return tunnel;
} }
......
...@@ -636,7 +636,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr, ...@@ -636,7 +636,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much * It should be null terminated but anything else is pretty much
* allowed. * allowed.
*/ */
return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key); return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
} }
static DEVICE_ATTR_RO(key); static DEVICE_ATTR_RO(key);
......
...@@ -429,6 +429,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) ...@@ -429,6 +429,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
* @lock: Must be held during ring creation/destruction. Is acquired by * @lock: Must be held during ring creation/destruction. Is acquired by
* interrupt_work when dispatching interrupts to individual rings. * interrupt_work when dispatching interrupts to individual rings.
* @pdev: Pointer to the PCI device * @pdev: Pointer to the PCI device
* @ops: NHI specific optional ops
* @iobase: MMIO space of the NHI * @iobase: MMIO space of the NHI
* @tx_rings: All Tx rings available on this host controller * @tx_rings: All Tx rings available on this host controller
* @rx_rings: All Rx rings available on this host controller * @rx_rings: All Rx rings available on this host controller
...@@ -442,6 +443,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) ...@@ -442,6 +443,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
struct tb_nhi { struct tb_nhi {
spinlock_t lock; spinlock_t lock;
struct pci_dev *pdev; struct pci_dev *pdev;
const struct tb_nhi_ops *ops;
void __iomem *iobase; void __iomem *iobase;
struct tb_ring **tx_rings; struct tb_ring **tx_rings;
struct tb_ring **rx_rings; struct tb_ring **rx_rings;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment