Commit 376d6b02 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v5.19-rc1' of...

Merge tag 'thunderbolt-for-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.19 merge window

This includes following Thunderbolt/USB4 changes for the v5.19 merge
window:

  * Improvements for Thunderbolt 1 DisplayPort tunneling
  * Link USB4 ports to their USB Type-C connectors
  * Lane bonding support for host-to-host (XDomain) connections
  * Buffer allocation improvement for devices with no DisplayPort
    adapters
  * Few cleanups and minor fixes.

All these have been in linux-next with no reported issues except that
there is a minor merge conflict with the kunit-next tree because one of
the commits touches the driver KUnit tests.

* tag 'thunderbolt-for-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
  thunderbolt: Add KUnit test for devices with no DisplayPort adapters
  thunderbolt: Fix buffer allocation of devices with no DisplayPort adapters
  thunderbolt: Add support for XDomain lane bonding
  thunderbolt: Ignore port locked error in tb_port_wait_for_link_width()
  thunderbolt: Split setting link width and lane bonding into own functions
  thunderbolt: Move tb_port_state() prototype to correct place
  thunderbolt: Add debug logging when lane is enabled/disabled
  thunderbolt: Link USB4 ports to their USB Type-C connectors
  misc/mei: Add NULL check to component match callback functions
  thunderbolt: Use different lane for second DisplayPort tunnel
  thunderbolt: Dump path config space entries during discovery
  thunderbolt: Use decimal number with port numbers
  thunderbolt: Fix typo in comment
  thunderbolt: Replace usage of found with dedicated list iterator variable
parents 74f55a62 c7c99a09
...@@ -293,6 +293,16 @@ Contact: thunderbolt-software@lists.01.org ...@@ -293,6 +293,16 @@ Contact: thunderbolt-software@lists.01.org
Description: This contains XDomain service specific settings as Description: This contains XDomain service specific settings as
bitmask. Format: %x bitmask. Format: %x
What: /sys/bus/thunderbolt/devices/usb4_portX/connector
Date: April 2022
Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Description:
Symlink to the USB Type-C connector. This link is only
created when USB Type-C Connector Class is enabled,
and only if the system firmware is capable of
describing the connection between a port and its
connector.
What: /sys/bus/thunderbolt/devices/usb4_portX/link What: /sys/bus/thunderbolt/devices/usb4_portX/link
Date: Sep 2021 Date: Sep 2021
KernelVersion: v5.14 KernelVersion: v5.14
......
...@@ -784,7 +784,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent, ...@@ -784,7 +784,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
{ {
struct device *base = data; struct device *base = data;
if (strcmp(dev->driver->name, "i915") || if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_HDCP) subcomponent != I915_COMPONENT_HDCP)
return 0; return 0;
......
...@@ -131,7 +131,7 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent, ...@@ -131,7 +131,7 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
{ {
struct device *base = data; struct device *base = data;
if (strcmp(dev->driver->name, "i915") || if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_PXP) subcomponent != I915_COMPONENT_PXP)
return 0; return 0;
......
...@@ -158,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req) ...@@ -158,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
static struct tb_cfg_request * static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
{ {
struct tb_cfg_request *req; struct tb_cfg_request *req = NULL, *iter;
bool found = false;
mutex_lock(&pkg->ctl->request_queue_lock); mutex_lock(&pkg->ctl->request_queue_lock);
list_for_each_entry(req, &pkg->ctl->request_queue, list) { list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
tb_cfg_request_get(req); tb_cfg_request_get(iter);
if (req->match(req, pkg)) { if (iter->match(iter, pkg)) {
found = true; req = iter;
break; break;
} }
tb_cfg_request_put(req); tb_cfg_request_put(iter);
} }
mutex_unlock(&pkg->ctl->request_queue_lock); mutex_unlock(&pkg->ctl->request_queue_lock);
return found ? req : NULL; return req;
} }
/* utility functions */ /* utility functions */
......
...@@ -1207,7 +1207,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1207,7 +1207,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi->pdev = pdev; nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data; nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated bin pcim_iomap_regions */ /* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0]; nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count); dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
......
...@@ -166,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, ...@@ -166,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
return NULL; return NULL;
} }
tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
path->name, tb_route(src->sw), src->port);
p = src; p = src;
h = src_hopid; h = src_hopid;
...@@ -198,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, ...@@ -198,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
path->hops[i].out_port = out_port; path->hops[i].out_port = out_port;
path->hops[i].next_hop_index = next_hop; path->hops[i].next_hop_index = next_hop;
tb_dump_hop(&path->hops[i], &hop);
h = next_hop; h = next_hop;
p = out_port->remote; p = out_port->remote;
} }
tb_dbg(path->tb, "path discovery complete\n");
return path; return path;
err: err:
......
...@@ -693,8 +693,14 @@ static int __tb_port_enable(struct tb_port *port, bool enable) ...@@ -693,8 +693,14 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
else else
phy |= LANE_ADP_CS_1_LD; phy |= LANE_ADP_CS_1_LD;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1); ret = tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
return 0;
} }
/** /**
...@@ -993,7 +999,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width) ...@@ -993,7 +999,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
return !!(widths & width); return !!(widths & width);
} }
static int tb_port_set_link_width(struct tb_port *port, unsigned int width) /**
* tb_port_set_link_width() - Set target link width of the lane adapter
* @port: Lane adapter
* @width: Target link width (%1 or %2)
*
* Sets the target link width of the lane adapter to @width. Does not
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_set_link_width(struct tb_port *port, unsigned int width)
{ {
u32 val; u32 val;
int ret; int ret;
...@@ -1020,12 +1036,58 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width) ...@@ -1020,12 +1036,58 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
return -EINVAL; return -EINVAL;
} }
val |= LANE_ADP_CS_1_LB;
return tb_port_write(port, &val, TB_CFG_PORT, return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1); port->cap_phy + LANE_ADP_CS_1, 1);
} }
/**
* tb_port_set_lane_bonding() - Enable/disable lane bonding
* @port: Lane adapter
* @bonding: enable/disable bonding
*
* Enables or disables lane bonding. This should be called after target
* link width has been set (tb_port_set_link_width()). Note in most
* cases one should use tb_port_lane_bonding_enable() instead to enable
* lane bonding.
*
* As a side effect sets @port->bonding accordingly (and does the same
* for lane 1 too).
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
{
u32 val;
int ret;
if (!port->cap_phy)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (bonding)
val |= LANE_ADP_CS_1_LB;
else
val &= ~LANE_ADP_CS_1_LB;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
/*
* When lane 0 bonding is set it will affect lane 1 too so
* update both.
*/
port->bonded = bonding;
port->dual_link_port->bonded = bonding;
return 0;
}
/** /**
* tb_port_lane_bonding_enable() - Enable bonding on port * tb_port_lane_bonding_enable() - Enable bonding on port
* @port: port to enable * @port: port to enable
...@@ -1050,22 +1112,27 @@ int tb_port_lane_bonding_enable(struct tb_port *port) ...@@ -1050,22 +1112,27 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
if (ret == 1) { if (ret == 1) {
ret = tb_port_set_link_width(port, 2); ret = tb_port_set_link_width(port, 2);
if (ret) if (ret)
return ret; goto err_lane0;
} }
ret = tb_port_get_link_width(port->dual_link_port); ret = tb_port_get_link_width(port->dual_link_port);
if (ret == 1) { if (ret == 1) {
ret = tb_port_set_link_width(port->dual_link_port, 2); ret = tb_port_set_link_width(port->dual_link_port, 2);
if (ret) { if (ret)
tb_port_set_link_width(port, 1); goto err_lane0;
return ret;
}
} }
port->bonded = true; ret = tb_port_set_lane_bonding(port, true);
port->dual_link_port->bonded = true; if (ret)
goto err_lane1;
return 0; return 0;
err_lane1:
tb_port_set_link_width(port->dual_link_port, 1);
err_lane0:
tb_port_set_link_width(port, 1);
return ret;
} }
/** /**
...@@ -1074,13 +1141,10 @@ int tb_port_lane_bonding_enable(struct tb_port *port) ...@@ -1074,13 +1141,10 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
* *
* Disable bonding by setting the link width of the port and the * Disable bonding by setting the link width of the port and the
* other port in case of dual link port. * other port in case of dual link port.
*
*/ */
void tb_port_lane_bonding_disable(struct tb_port *port) void tb_port_lane_bonding_disable(struct tb_port *port)
{ {
port->dual_link_port->bonded = false; tb_port_set_lane_bonding(port, false);
port->bonded = false;
tb_port_set_link_width(port->dual_link_port, 1); tb_port_set_link_width(port->dual_link_port, 1);
tb_port_set_link_width(port, 1); tb_port_set_link_width(port, 1);
} }
...@@ -1104,10 +1168,17 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width, ...@@ -1104,10 +1168,17 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
do { do {
ret = tb_port_get_link_width(port); ret = tb_port_get_link_width(port);
if (ret < 0) if (ret < 0) {
return ret; /*
else if (ret == width) * Sometimes we get port locked error when
* polling the lanes so we can ignore it and
* retry.
*/
if (ret != -EACCES)
return ret;
} else if (ret == width) {
return 0; return 0;
}
usleep_range(1000, 2000); usleep_range(1000, 2000);
} while (ktime_before(ktime_get(), timeout)); } while (ktime_before(ktime_get(), timeout));
......
...@@ -169,12 +169,6 @@ static void tb_discover_tunnels(struct tb *tb) ...@@ -169,12 +169,6 @@ static void tb_discover_tunnels(struct tb *tb)
static int tb_port_configure_xdomain(struct tb_port *port) static int tb_port_configure_xdomain(struct tb_port *port)
{ {
/*
* XDomain paths currently only support single lane so we must
* disable the other lane according to USB4 spec.
*/
tb_port_disable(port->dual_link_port);
if (tb_switch_is_usb4(port->sw)) if (tb_switch_is_usb4(port->sw))
return usb4_port_configure_xdomain(port); return usb4_port_configure_xdomain(port);
return tb_lc_configure_xdomain(port); return tb_lc_configure_xdomain(port);
...@@ -867,7 +861,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) ...@@ -867,7 +861,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
static void tb_tunnel_dp(struct tb *tb) static void tb_tunnel_dp(struct tb *tb)
{ {
int available_up, available_down, ret; int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb); struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out; struct tb_port *port, *in, *out;
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
...@@ -912,6 +906,20 @@ static void tb_tunnel_dp(struct tb *tb) ...@@ -912,6 +906,20 @@ static void tb_tunnel_dp(struct tb *tb)
return; return;
} }
/*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
* the two lanes.
*/
link_nr = 1;
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel)) {
link_nr = 0;
break;
}
}
/* /*
* DP stream needs the domain to be active so runtime resume * DP stream needs the domain to be active so runtime resume
* both ends of the tunnel. * both ends of the tunnel.
...@@ -943,7 +951,8 @@ static void tb_tunnel_dp(struct tb *tb) ...@@ -943,7 +951,8 @@ static void tb_tunnel_dp(struct tb *tb)
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down); available_up, available_down);
tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
available_down);
if (!tunnel) { if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n"); tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim; goto err_reclaim;
......
...@@ -674,7 +674,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer, ...@@ -674,7 +674,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \ #define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \ do { \
const struct tb_port *__port = (_port); \ const struct tb_port *__port = (_port); \
level(__port->sw->tb, "%llx:%x: " fmt, \ level(__port->sw->tb, "%llx:%u: " fmt, \
tb_route(__port->sw), __port->port, ## arg); \ tb_route(__port->sw), __port->port, ## arg); \
} while (0) } while (0)
#define tb_port_WARN(port, fmt, arg...) \ #define tb_port_WARN(port, fmt, arg...) \
...@@ -991,6 +991,7 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw); ...@@ -991,6 +991,7 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw);
int tb_switch_xhci_connect(struct tb_switch *sw); int tb_switch_xhci_connect(struct tb_switch *sw);
void tb_switch_xhci_disconnect(struct tb_switch *sw); void tb_switch_xhci_disconnect(struct tb_switch *sw);
int tb_port_state(struct tb_port *port);
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_clear_counter(struct tb_port *port, int counter);
...@@ -1023,7 +1024,8 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port) ...@@ -1023,7 +1024,8 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
int tb_port_get_link_speed(struct tb_port *port); int tb_port_get_link_speed(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port); int tb_port_get_link_width(struct tb_port *port);
int tb_port_state(struct tb_port *port); int tb_port_set_link_width(struct tb_port *port, unsigned int width);
int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
int tb_port_lane_bonding_enable(struct tb_port *port); int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port); void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width, int tb_port_wait_for_link_width(struct tb_port *port, int width,
......
...@@ -527,6 +527,10 @@ enum tb_xdp_type { ...@@ -527,6 +527,10 @@ enum tb_xdp_type {
PROPERTIES_CHANGED_RESPONSE, PROPERTIES_CHANGED_RESPONSE,
ERROR_RESPONSE, ERROR_RESPONSE,
UUID_REQUEST = 12, UUID_REQUEST = 12,
LINK_STATE_STATUS_REQUEST = 15,
LINK_STATE_STATUS_RESPONSE,
LINK_STATE_CHANGE_REQUEST,
LINK_STATE_CHANGE_RESPONSE,
}; };
struct tb_xdp_header { struct tb_xdp_header {
...@@ -540,6 +544,41 @@ struct tb_xdp_error_response { ...@@ -540,6 +544,41 @@ struct tb_xdp_error_response {
u32 error; u32 error;
}; };
struct tb_xdp_link_state_status {
struct tb_xdp_header hdr;
};
struct tb_xdp_link_state_status_response {
union {
struct tb_xdp_error_response err;
struct {
struct tb_xdp_header hdr;
u32 status;
u8 slw;
u8 tlw;
u8 sls;
u8 tls;
};
};
};
struct tb_xdp_link_state_change {
struct tb_xdp_header hdr;
u8 tlw;
u8 tls;
u16 reserved;
};
struct tb_xdp_link_state_change_response {
union {
struct tb_xdp_error_response err;
struct {
struct tb_xdp_header hdr;
u32 status;
};
};
};
struct tb_xdp_uuid { struct tb_xdp_uuid {
struct tb_xdp_header hdr; struct tb_xdp_header hdr;
}; };
......
...@@ -311,11 +311,16 @@ struct tb_regs_port_header { ...@@ -311,11 +311,16 @@ struct tb_regs_port_header {
/* Lane adapter registers */ /* Lane adapter registers */
#define LANE_ADP_CS_0 0x00 #define LANE_ADP_CS_0 0x00
#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK GENMASK(19, 16)
#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT 16
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) #define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26) #define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27) #define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
#define LANE_ADP_CS_1 0x01 #define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) #define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
......
...@@ -341,6 +341,47 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test, ...@@ -341,6 +341,47 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
return sw; return sw;
} }
static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
{
struct tb_switch *sw;
int i;
sw = alloc_dev_default(test, parent, route, bonded);
if (!sw)
return NULL;
/*
* Device with:
* 2x USB4 Adapters (adapters 1,2 and 3,4),
* 1x PCIe Upstream (adapter 9),
* 1x PCIe Downstream (adapter 10),
* 1x USB3 Upstream (adapter 16),
* 1x USB3 Downstream (adapter 17)
*/
for (i = 5; i <= 8; i++)
sw->ports[i].disabled = true;
for (i = 11; i <= 14; i++)
sw->ports[i].disabled = true;
sw->ports[13].cap_adap = 0;
sw->ports[14].cap_adap = 0;
for (i = 18; i <= 19; i++)
sw->ports[i].disabled = true;
sw->generation = 4;
sw->credit_allocation = true;
sw->max_usb3_credits = 109;
sw->min_dp_aux_credits = 0;
sw->min_dp_main_credits = 0;
sw->max_pcie_credits = 30;
sw->max_dma_credits = 1;
return sw;
}
static struct tb_switch *alloc_dev_usb4(struct kunit *test, static struct tb_switch *alloc_dev_usb4(struct kunit *test,
struct tb_switch *parent, struct tb_switch *parent,
u64 route, bool bonded) u64 route, bool bonded)
...@@ -1348,7 +1389,7 @@ static void tb_test_tunnel_dp(struct kunit *test) ...@@ -1348,7 +1389,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5]; in = &host->ports[5];
out = &dev->ports[13]; out = &dev->ports[13];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL); KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
...@@ -1394,7 +1435,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test) ...@@ -1394,7 +1435,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5]; in = &host->ports[5];
out = &dev4->ports[14]; out = &dev4->ports[14];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL); KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
...@@ -1444,7 +1485,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test) ...@@ -1444,7 +1485,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13]; in = &dev2->ports[13];
out = &dev5->ports[13]; out = &dev5->ports[13];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL); KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
...@@ -1509,7 +1550,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test) ...@@ -1509,7 +1550,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13]; in = &dev6->ports[13];
out = &dev12->ports[13]; out = &dev12->ports[13];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL); KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
...@@ -1627,7 +1668,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test) ...@@ -1627,7 +1668,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13]; in = &dev2->ports[13];
out = &dev5->ports[13]; out = &dev5->ports[13];
dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL); KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in)); KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
...@@ -1996,6 +2037,56 @@ static void tb_test_credit_alloc_pcie(struct kunit *test) ...@@ -1996,6 +2037,56 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
tb_tunnel_free(tunnel); tb_tunnel_free(tunnel);
} }
static void tb_test_credit_alloc_without_dp(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *up, *down;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_without_dp(test, host, 0x1, true);
/*
* The device has no DP therefore baMinDPmain = baMinDPaux = 0
*
* Create PCIe path with buffers less than baMaxPCIe.
*
* For a device with buffers configurations:
* baMaxUSB3 = 109
* baMinDPaux = 0
* baMinDPmain = 0
* baMaxPCIe = 30
* baMaxHI = 1
* Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
* PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
* = Max(6, Min(30, 9) = 9
*/
down = &host->ports[8];
up = &dev->ports[9];
tunnel = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
/* PCIe downstream path */
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
/* PCIe upstream path */
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_dp(struct kunit *test) static void tb_test_credit_alloc_dp(struct kunit *test)
{ {
struct tb_switch *host, *dev; struct tb_switch *host, *dev;
...@@ -2009,7 +2100,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test) ...@@ -2009,7 +2100,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5]; in = &host->ports[5];
out = &dev->ports[14]; out = &dev->ports[14];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL); KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3); KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
...@@ -2245,7 +2336,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test, ...@@ -2245,7 +2336,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5]; in = &host->ports[5];
out = &dev->ports[13]; out = &dev->ports[13];
dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL); KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3); KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
...@@ -2282,7 +2373,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test, ...@@ -2282,7 +2373,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6]; in = &host->ports[6];
out = &dev->ports[14]; out = &dev->ports[14];
dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0); dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL); KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3); KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
...@@ -2709,6 +2800,7 @@ static struct kunit_case tb_test_cases[] = { ...@@ -2709,6 +2800,7 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded), KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
KUNIT_CASE(tb_test_credit_alloc_legacy_bonded), KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
KUNIT_CASE(tb_test_credit_alloc_pcie), KUNIT_CASE(tb_test_credit_alloc_pcie),
KUNIT_CASE(tb_test_credit_alloc_without_dp),
KUNIT_CASE(tb_test_credit_alloc_dp), KUNIT_CASE(tb_test_credit_alloc_dp),
KUNIT_CASE(tb_test_credit_alloc_usb3), KUNIT_CASE(tb_test_credit_alloc_usb3),
KUNIT_CASE(tb_test_credit_alloc_dma), KUNIT_CASE(tb_test_credit_alloc_dma),
......
...@@ -102,8 +102,11 @@ static unsigned int tb_available_credits(const struct tb_port *port, ...@@ -102,8 +102,11 @@ static unsigned int tb_available_credits(const struct tb_port *port,
* Maximum number of DP streams possible through the * Maximum number of DP streams possible through the
* lane adapter. * lane adapter.
*/ */
ndp = (credits - (usb3 + pcie + spare)) / if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
(sw->min_dp_aux_credits + sw->min_dp_main_credits); ndp = (credits - (usb3 + pcie + spare)) /
(sw->min_dp_aux_credits + sw->min_dp_main_credits);
else
ndp = 0;
} else { } else {
ndp = 0; ndp = 0;
} }
...@@ -858,6 +861,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, ...@@ -858,6 +861,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
* @tb: Pointer to the domain structure * @tb: Pointer to the domain structure
* @in: DP in adapter port * @in: DP in adapter port
* @out: DP out adapter port * @out: DP out adapter port
* @link_nr: Preferred lane adapter when the link is not bonded
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
* if not limited) * if not limited)
* @max_down: Maximum available downstream bandwidth for the DP tunnel * @max_down: Maximum available downstream bandwidth for the DP tunnel
...@@ -869,8 +873,8 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, ...@@ -869,8 +873,8 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
* Return: Returns a tb_tunnel on success or NULL on failure. * Return: Returns a tb_tunnel on success or NULL on failure.
*/ */
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int max_up, struct tb_port *out, int link_nr,
int max_down) int max_up, int max_down)
{ {
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
struct tb_path **paths; struct tb_path **paths;
...@@ -894,21 +898,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, ...@@ -894,21 +898,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
paths = tunnel->paths; paths = tunnel->paths;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1, "Video"); link_nr, "Video");
if (!path) if (!path)
goto err_free; goto err_free;
tb_dp_init_video_path(path); tb_dp_init_video_path(path);
paths[TB_DP_VIDEO_PATH_OUT] = path; paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
TB_DP_AUX_TX_HOPID, 1, "AUX TX"); TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path) if (!path)
goto err_free; goto err_free;
tb_dp_init_aux_path(path); tb_dp_init_aux_path(path);
paths[TB_DP_AUX_PATH_OUT] = path; paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
TB_DP_AUX_RX_HOPID, 1, "AUX RX"); TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path) if (!path)
goto err_free; goto err_free;
tb_dp_init_aux_path(path); tb_dp_init_aux_path(path);
......
...@@ -71,8 +71,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, ...@@ -71,8 +71,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid); bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int max_up, struct tb_port *out, int link_nr,
int max_down); int max_up, int max_down);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path, struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path, int transmit_ring, int receive_path,
......
...@@ -7,9 +7,37 @@ ...@@ -7,9 +7,37 @@
*/ */
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/component.h>
#include <linux/property.h>
#include "tb.h" #include "tb.h"
static int connector_bind(struct device *dev, struct device *connector, void *data)
{
int ret;
ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
if (ret)
return ret;
ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
if (ret)
sysfs_remove_link(&dev->kobj, "connector");
return ret;
}
static void connector_unbind(struct device *dev, struct device *connector, void *data)
{
sysfs_remove_link(&connector->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "connector");
}
static const struct component_ops connector_ops = {
.bind = connector_bind,
.unbind = connector_unbind,
};
static ssize_t link_show(struct device *dev, struct device_attribute *attr, static ssize_t link_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
...@@ -246,6 +274,14 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port) ...@@ -246,6 +274,14 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
if (dev_fwnode(&usb4->dev)) {
ret = component_add(&usb4->dev, &connector_ops);
if (ret) {
dev_err(&usb4->dev, "failed to add component\n");
device_unregister(&usb4->dev);
}
}
pm_runtime_no_callbacks(&usb4->dev); pm_runtime_no_callbacks(&usb4->dev);
pm_runtime_set_active(&usb4->dev); pm_runtime_set_active(&usb4->dev);
pm_runtime_enable(&usb4->dev); pm_runtime_enable(&usb4->dev);
...@@ -265,6 +301,8 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port) ...@@ -265,6 +301,8 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
*/ */
void usb4_port_device_remove(struct usb4_port *usb4) void usb4_port_device_remove(struct usb4_port *usb4)
{ {
if (dev_fwnode(&usb4->dev))
component_del(&usb4->dev, &connector_ops);
device_unregister(&usb4->dev); device_unregister(&usb4->dev);
} }
......
This diff is collapsed.
...@@ -198,15 +198,15 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); ...@@ -198,15 +198,15 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @local_property_block_len: Length of the @local_property_block in dwords * @local_property_block_len: Length of the @local_property_block in dwords
* @remote_properties: Properties exported by the remote domain * @remote_properties: Properties exported by the remote domain
* @remote_property_block_gen: Generation of @remote_properties * @remote_property_block_gen: Generation of @remote_properties
* @get_uuid_work: Work used to retrieve @remote_uuid * @state: Next XDomain discovery state to run
* @uuid_retries: Number of times left @remote_uuid is requested before * @state_work: Work used to run the next state
* giving up * @state_retries: Number of retries remain for the state
* @get_properties_work: Work used to get remote domain properties
* @properties_retries: Number of times left to read properties
* @properties_changed_work: Work used to notify the remote domain that * @properties_changed_work: Work used to notify the remote domain that
* our properties have changed * our properties have changed
* @properties_changed_retries: Number of times left to send properties * @properties_changed_retries: Number of times left to send properties
* changed notification * changed notification
* @bonding_possible: True if lane bonding is possible on local side
* @target_link_width: Target link width from the remote host
* @link: Root switch link the remote domain is connected (ICM only) * @link: Root switch link the remote domain is connected (ICM only)
* @depth: Depth in the chain the remote domain is connected (ICM only) * @depth: Depth in the chain the remote domain is connected (ICM only)
* *
...@@ -244,12 +244,13 @@ struct tb_xdomain { ...@@ -244,12 +244,13 @@ struct tb_xdomain {
u32 local_property_block_len; u32 local_property_block_len;
struct tb_property_dir *remote_properties; struct tb_property_dir *remote_properties;
u32 remote_property_block_gen; u32 remote_property_block_gen;
struct delayed_work get_uuid_work; int state;
int uuid_retries; struct delayed_work state_work;
struct delayed_work get_properties_work; int state_retries;
int properties_retries;
struct delayed_work properties_changed_work; struct delayed_work properties_changed_work;
int properties_changed_retries; int properties_changed_retries;
bool bonding_possible;
u8 target_link_width;
u8 link; u8 link;
u8 depth; u8 depth;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment