Commit 447e238f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'usb-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb

Pull USB fixes from Greg KH:
 "Here are a few tiny USB fixes for reported issues with some USB
  drivers.

  These fixes include:

   - gadget driver fixes for regressions

   - tcpm driver fix

   - dwc3 driver fixes

   - xhci renesas firmware loading fix, again.

   - usb serial option driver device id addition

   - usb serial ch341 revert for regression

  All all of these have been in linux-next with no reported problems"

* tag 'usb-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb:
  usb: gadget: u_audio: fix race condition on endpoint stop
  usb: gadget: f_uac2: fixup feedback endpoint stop
  usb: typec: tcpm: Raise vdm_sm_running flag only when VDM SM is running
  usb: renesas-xhci: Prefer firmware loading on unknown ROM state
  usb: dwc3: gadget: Stop EP0 transfers during pullup disable
  usb: dwc3: gadget: Fix dwc3_calc_trbs_left()
  Revert "USB: serial: ch341: fix character loss at high transfer rates"
  USB: serial: option: add new VID/PID to support Fibocom FG150
parents 9f73eacd 068fdad2
......@@ -940,19 +940,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
struct dwc3_trb *tmp;
u8 trbs_left;
/*
* If enqueue & dequeue are equal than it is either full or empty.
*
* One way to know for sure is if the TRB right before us has HWO bit
* set or not. If it has, then we're definitely full and can't fit any
* more transfers in our ring.
* If the enqueue & dequeue are equal then the TRB ring is either full
* or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
* pending to be processed by the driver.
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
/*
* If there is any request remained in the started_list at
* this point, that means there is no TRB available.
*/
if (!list_empty(&dep->started_list))
return 0;
return DWC3_TRB_NUM - 1;
......@@ -2243,10 +2243,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
if (ret == 0) {
dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
return -ETIMEDOUT;
}
if (ret == 0)
dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
}
/*
......@@ -2458,6 +2456,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc->delayed_status = false;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
......
......@@ -230,7 +230,13 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
int status = req->status;
/* i/f shutting down */
if (!prm->fb_ep_enabled || req->status == -ESHUTDOWN)
if (!prm->fb_ep_enabled) {
kfree(req->buf);
usb_ep_free_request(ep, req);
return;
}
if (req->status == -ESHUTDOWN)
return;
/*
......@@ -388,8 +394,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
if (!prm->ep_enabled)
return;
prm->ep_enabled = false;
audio_dev = uac->audio_dev;
params = &audio_dev->params;
......@@ -407,6 +411,8 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
}
}
prm->ep_enabled = false;
if (usb_ep_disable(ep))
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
}
......@@ -418,15 +424,16 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
if (!prm->fb_ep_enabled)
return;
prm->fb_ep_enabled = false;
if (prm->req_fback) {
usb_ep_dequeue(ep, prm->req_fback);
kfree(prm->req_fback->buf);
usb_ep_free_request(ep, prm->req_fback);
if (usb_ep_dequeue(ep, prm->req_fback)) {
kfree(prm->req_fback->buf);
usb_ep_free_request(ep, prm->req_fback);
}
prm->req_fback = NULL;
}
prm->fb_ep_enabled = false;
if (usb_ep_disable(ep))
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
}
......
......@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
return 0;
case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
return 0;
dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
return -ENOENT;
case RENESAS_ROM_STATUS_ERROR: /* Error State */
default: /* All other states are marked as "Reserved states" */
......@@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
u8 fw_state;
int err;
/* Check if device has ROM and loaded, if so skip everything */
err = renesas_check_rom(pdev);
if (err) { /* we have rom */
err = renesas_check_rom_state(pdev);
if (!err)
return err;
}
/*
* Test if the device is actually needing the firmware. As most
* BIOSes will initialize the device for us. If the device is
......@@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
(struct xhci_driver_data *)id->driver_data;
const char *fw_name = driver_data->firmware;
const struct firmware *fw;
bool has_rom;
int err;
/* Check if device has ROM and loaded, if so skip everything */
has_rom = renesas_check_rom(pdev);
if (has_rom) {
err = renesas_check_rom_state(pdev);
if (!err)
return 0;
else if (err != -ENOENT)
has_rom = false;
}
err = renesas_fw_check_running(pdev);
/* Continue ahead, if the firmware is already running. */
if (err == 0)
return 0;
/* no firmware interface available */
if (err != 1)
return err;
return has_rom ? 0 : err;
pci_dev_get(pdev);
err = request_firmware(&fw, fw_name, &pdev->dev);
err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
pci_dev_put(pdev);
if (err) {
dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
if (has_rom) {
dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
fw_name);
return 0;
}
dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
fw_name, err);
return err;
}
......
......@@ -851,7 +851,6 @@ static struct usb_serial_driver ch341_device = {
.owner = THIS_MODULE,
.name = "ch341-uart",
},
.bulk_in_size = 512,
.id_table = id_table,
.num_ports = 1,
.open = ch341_open,
......
......@@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
......
......@@ -341,6 +341,7 @@ struct tcpm_port {
bool vbus_source;
bool vbus_charge;
/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
bool send_discover;
bool op_vsafe5v;
......@@ -370,6 +371,7 @@ struct tcpm_port {
struct hrtimer send_discover_timer;
struct kthread_work send_discover_work;
bool state_machine_running;
/* Set to true when VDM State Machine has following actions. */
bool vdm_sm_running;
struct completion tx_complete;
......@@ -1431,6 +1433,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
/* Set ready, vdm state machine will actually send */
port->vdm_retries = 0;
port->vdm_state = VDM_STATE_READY;
port->vdm_sm_running = true;
mod_vdm_delayed_work(port, 0);
}
......@@ -1673,7 +1676,6 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
rlen = 1;
} else {
tcpm_register_partner_altmodes(port);
port->vdm_sm_running = false;
}
break;
case CMD_ENTER_MODE:
......@@ -1721,14 +1723,12 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
(VDO_SVDM_VERS(svdm_version));
break;
}
port->vdm_sm_running = false;
break;
default:
response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
rlen = 1;
response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
(VDO_SVDM_VERS(svdm_version));
port->vdm_sm_running = false;
break;
}
......@@ -1769,6 +1769,20 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
}
if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
/*
* Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
* advance because we are dropping the lock but may send VDMs soon.
* For the cases of INIT received:
* - If no response to send, it will be cleared later in this function.
* - If there are responses to send, it will be cleared in the state machine.
* For the cases of RSP received:
* - If no further INIT to send, it will be cleared later in this function.
* - Otherwise, it will be cleared in the state machine if timeout or it will go
* back here until no further INIT to send.
* For the cases of unknown type received:
* - We will send NAK and the flag will be cleared in the state machine.
*/
port->vdm_sm_running = true;
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
} else {
if (port->negotiated_rev >= PD_REV30)
......@@ -1837,6 +1851,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
if (rlen > 0)
tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
else
port->vdm_sm_running = false;
}
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
......@@ -1902,8 +1918,10 @@ static void vdm_run_state_machine(struct tcpm_port *port)
* if there's traffic or we're not in PDO ready state don't send
* a VDM.
*/
if (port->state != SRC_READY && port->state != SNK_READY)
if (port->state != SRC_READY && port->state != SNK_READY) {
port->vdm_sm_running = false;
break;
}
/* TODO: AMS operation for Unstructured VDM */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
......@@ -2556,10 +2574,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
/* Set VDM running flag ASAP */
if (port->data_role == TYPEC_HOST &&
port->send_discover)
port->vdm_sm_running = true;
tcpm_set_state(port, SNK_READY, 0);
} else {
/*
......@@ -2597,14 +2611,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
switch (port->state) {
case SNK_NEGOTIATE_CAPABILITIES:
/* USB PD specification, Figure 8-43 */
if (port->explicit_contract) {
if (port->explicit_contract)
next_state = SNK_READY;
if (port->data_role == TYPEC_HOST &&
port->send_discover)
port->vdm_sm_running = true;
} else {
else
next_state = SNK_WAIT_CAPABILITIES;
}
/* Threshold was relaxed before sending Request. Restore it back. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
......@@ -2619,10 +2629,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->pps_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
if (port->data_role == TYPEC_HOST &&
port->send_discover)
port->vdm_sm_running = true;
/* Threshold was relaxed before sending Request. Restore it back. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
......@@ -2698,10 +2704,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
}
break;
case DR_SWAP_SEND:
if (port->data_role == TYPEC_DEVICE &&
port->send_discover)
port->vdm_sm_running = true;
tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
break;
case PR_SWAP_SEND:
......@@ -2739,7 +2741,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else {
if (port->vdm_sm_running) {
if (port->send_discover) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
......@@ -2755,7 +2757,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else {
if (port->vdm_sm_running) {
if (port->send_discover) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
......@@ -2764,7 +2766,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
}
break;
case PD_CTRL_VCONN_SWAP:
if (port->vdm_sm_running) {
if (port->send_discover) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
......@@ -4480,18 +4482,20 @@ static void run_state_machine(struct tcpm_port *port)
/* DR_Swap states */
case DR_SWAP_SEND:
tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
port->send_discover = true;
tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case DR_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
/* Set VDM state machine running flag ASAP */
if (port->data_role == TYPEC_DEVICE && port->send_discover)
port->vdm_sm_running = true;
if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
port->send_discover = true;
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
port->send_discover = false;
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
......@@ -4503,7 +4507,6 @@ static void run_state_machine(struct tcpm_port *port)
} else {
tcpm_set_roles(port, true, port->pwr_role,
TYPEC_HOST);
port->send_discover = true;
}
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
......@@ -4646,8 +4649,6 @@ static void run_state_machine(struct tcpm_port *port)
break;
case VCONN_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
if (port->data_role == TYPEC_HOST && port->send_discover)
port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_START:
......@@ -4663,14 +4664,10 @@ static void run_state_machine(struct tcpm_port *port)
case VCONN_SWAP_TURN_ON_VCONN:
tcpm_set_vconn(port, true);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
if (port->data_role == TYPEC_HOST && port->send_discover)
port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
if (port->data_role == TYPEC_HOST && port->send_discover)
port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
......@@ -4678,8 +4675,6 @@ static void run_state_machine(struct tcpm_port *port)
case PR_SWAP_CANCEL:
case VCONN_SWAP_CANCEL:
tcpm_swap_complete(port, port->swap_status);
if (port->data_role == TYPEC_HOST && port->send_discover)
port->vdm_sm_running = true;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_READY, 0);
else
......@@ -5029,9 +5024,6 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
switch (port->state) {
case SNK_TRANSITION_SINK_VBUS:
port->explicit_contract = true;
/* Set the VDM flag ASAP */
if (port->data_role == TYPEC_HOST && port->send_discover)
port->vdm_sm_running = true;
tcpm_set_state(port, SNK_READY, 0);
break;
case SNK_DISCOVERY:
......@@ -5426,15 +5418,18 @@ static void tcpm_send_discover_work(struct kthread_work *work)
if (!port->send_discover)
goto unlock;
if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
port->send_discover = false;
goto unlock;
}
/* Retry if the port is not idle */
if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
goto unlock;
}
/* Only send the Message if the port is host for PD rev2.0 */
if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
unlock:
mutex_unlock(&port->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment