Commit 4deece6c authored by David S. Miller's avatar David S. Miller

Merge branch 'netvsc-bug-fixes-and-cleanups'

Stephen Hemminger says:

====================
netvsc: bug fixes and cleanups

These fix NAPI issues and bugs found during testing of shutdown
testing.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6069f3fb ce12b810
...@@ -686,7 +686,7 @@ struct net_device_context { ...@@ -686,7 +686,7 @@ struct net_device_context {
/* point back to our device context */ /* point back to our device context */
struct hv_device *device_ctx; struct hv_device *device_ctx;
/* netvsc_device */ /* netvsc_device */
struct netvsc_device *nvdev; struct netvsc_device __rcu *nvdev;
/* reconfigure work */ /* reconfigure work */
struct delayed_work dwork; struct delayed_work dwork;
/* last reconfig time */ /* last reconfig time */
...@@ -708,9 +708,6 @@ struct net_device_context { ...@@ -708,9 +708,6 @@ struct net_device_context {
u32 speed; u32 speed;
struct netvsc_ethtool_stats eth_stats; struct netvsc_ethtool_stats eth_stats;
/* the device is going away */
bool start_remove;
/* State to manage the associated VF interface. */ /* State to manage the associated VF interface. */
struct net_device __rcu *vf_netdev; struct net_device __rcu *vf_netdev;
...@@ -723,6 +720,7 @@ struct net_device_context { ...@@ -723,6 +720,7 @@ struct net_device_context {
/* Per channel data */ /* Per channel data */
struct netvsc_channel { struct netvsc_channel {
struct vmbus_channel *channel; struct vmbus_channel *channel;
const struct vmpacket_descriptor *desc;
struct napi_struct napi; struct napi_struct napi;
struct multi_send_data msd; struct multi_send_data msd;
struct multi_recv_comp mrc; struct multi_recv_comp mrc;
...@@ -763,8 +761,8 @@ struct netvsc_device { ...@@ -763,8 +761,8 @@ struct netvsc_device {
u32 max_chn; u32 max_chn;
u32 num_chn; u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
u32 num_sc_offered; refcount_t sc_offered;
/* Holds rndis device info */ /* Holds rndis device info */
void *extension; void *extension;
...@@ -779,6 +777,8 @@ struct netvsc_device { ...@@ -779,6 +777,8 @@ struct netvsc_device {
atomic_t open_cnt; atomic_t open_cnt;
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
struct rcu_head rcu;
}; };
static inline struct netvsc_device * static inline struct netvsc_device *
......
...@@ -80,8 +80,10 @@ static struct netvsc_device *alloc_net_device(void) ...@@ -80,8 +80,10 @@ static struct netvsc_device *alloc_net_device(void)
return net_device; return net_device;
} }
static void free_netvsc_device(struct netvsc_device *nvdev) static void free_netvsc_device(struct rcu_head *head)
{ {
struct netvsc_device *nvdev
= container_of(head, struct netvsc_device, rcu);
int i; int i;
for (i = 0; i < VRSS_CHANNEL_MAX; i++) for (i = 0; i < VRSS_CHANNEL_MAX; i++)
...@@ -90,6 +92,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev) ...@@ -90,6 +92,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
kfree(nvdev); kfree(nvdev);
} }
static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
{
call_rcu(&nvdev->rcu, free_netvsc_device);
}
static struct netvsc_device *get_outbound_net_device(struct hv_device *device) static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{ {
...@@ -551,7 +557,7 @@ void netvsc_device_remove(struct hv_device *device) ...@@ -551,7 +557,7 @@ void netvsc_device_remove(struct hv_device *device)
netvsc_disconnect_vsp(device); netvsc_disconnect_vsp(device);
net_device_ctx->nvdev = NULL; RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
/* /*
* At this point, no one should be accessing net_device * At this point, no one should be accessing net_device
...@@ -566,7 +572,7 @@ void netvsc_device_remove(struct hv_device *device) ...@@ -566,7 +572,7 @@ void netvsc_device_remove(struct hv_device *device)
napi_disable(&net_device->chan_table[i].napi); napi_disable(&net_device->chan_table[i].napi);
/* Release all resources */ /* Release all resources */
free_netvsc_device(net_device); free_netvsc_device_rcu(net_device);
} }
#define RING_AVAIL_PERCENT_HIWATER 20 #define RING_AVAIL_PERCENT_HIWATER 20
...@@ -599,7 +605,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, ...@@ -599,7 +605,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
{ {
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id; struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device *ndev = hv_get_drvdata(device); struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel; struct vmbus_channel *channel = device->channel;
u16 q_idx = 0; u16 q_idx = 0;
int queue_sends; int queue_sends;
...@@ -633,7 +638,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, ...@@ -633,7 +638,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
wake_up(&net_device->wait_drain); wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
!net_device_ctx->start_remove &&
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
queue_sends < 1)) queue_sends < 1))
netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
...@@ -702,8 +706,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, ...@@ -702,8 +706,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
packet->page_buf_cnt; packet->page_buf_cnt;
/* Add padding */ /* Add padding */
if (skb && skb->xmit_more && remain && if (skb->xmit_more && remain && !packet->cp_partial) {
!packet->cp_partial) {
padding = net_device->pkt_align - remain; padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding; rndis_msg->msg_len += padding;
packet->total_data_buflen += padding; packet->total_data_buflen += padding;
...@@ -861,9 +864,7 @@ int netvsc_send(struct hv_device *device, ...@@ -861,9 +864,7 @@ int netvsc_send(struct hv_device *device,
if (msdp->pkt) if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen; msd_len = msdp->pkt->total_data_buflen;
try_batch = (skb != NULL) && msd_len > 0 && msdp->count < try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
net_device->max_pkt;
if (try_batch && msd_len + pktlen + net_device->pkt_align < if (try_batch && msd_len + pktlen + net_device->pkt_align <
net_device->send_section_size) { net_device->send_section_size) {
section_index = msdp->pkt->send_buf_index; section_index = msdp->pkt->send_buf_index;
...@@ -873,7 +874,7 @@ int netvsc_send(struct hv_device *device, ...@@ -873,7 +874,7 @@ int netvsc_send(struct hv_device *device,
section_index = msdp->pkt->send_buf_index; section_index = msdp->pkt->send_buf_index;
packet->cp_partial = true; packet->cp_partial = true;
} else if ((skb != NULL) && pktlen + net_device->pkt_align < } else if (pktlen + net_device->pkt_align <
net_device->send_section_size) { net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device); section_index = netvsc_get_next_send_section(net_device);
if (section_index != NETVSC_INVALID_INDEX) { if (section_index != NETVSC_INVALID_INDEX) {
...@@ -1173,7 +1174,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device, ...@@ -1173,7 +1174,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
struct vmbus_channel *channel, struct vmbus_channel *channel,
struct netvsc_device *net_device, struct netvsc_device *net_device,
struct net_device *ndev, struct net_device *ndev,
u64 request_id,
const struct vmpacket_descriptor *desc) const struct vmpacket_descriptor *desc)
{ {
struct net_device_context *net_device_ctx = netdev_priv(ndev); struct net_device_context *net_device_ctx = netdev_priv(ndev);
...@@ -1195,7 +1195,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device, ...@@ -1195,7 +1195,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
default: default:
netdev_err(ndev, "unhandled packet type %d, tid %llx\n", netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
desc->type, request_id); desc->type, desc->trans_id);
break; break;
} }
...@@ -1222,28 +1222,20 @@ int netvsc_poll(struct napi_struct *napi, int budget) ...@@ -1222,28 +1222,20 @@ int netvsc_poll(struct napi_struct *napi, int budget)
u16 q_idx = channel->offermsg.offer.sub_channel_index; u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct net_device *ndev = hv_get_drvdata(device); struct net_device *ndev = hv_get_drvdata(device);
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
const struct vmpacket_descriptor *desc;
int work_done = 0; int work_done = 0;
desc = hv_pkt_iter_first(channel); /* If starting a new interval */
while (desc) { if (!nvchan->desc)
int count; nvchan->desc = hv_pkt_iter_first(channel);
count = netvsc_process_raw_pkt(device, channel, net_device,
ndev, desc->trans_id, desc);
work_done += count;
desc = __hv_pkt_iter_next(channel, desc);
/* If receive packet budget is exhausted, reschedule */ while (nvchan->desc && work_done < budget) {
if (work_done >= budget) { work_done += netvsc_process_raw_pkt(device, channel, net_device,
work_done = budget; ndev, nvchan->desc);
break; nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
} }
hv_pkt_iter_close(channel);
/* If budget was not exhausted and /* If receive ring was exhausted
* not doing busy poll * and not doing busy poll
* then re-enable host interrupts * then re-enable host interrupts
* and reschedule if ring is not empty. * and reschedule if ring is not empty.
*/ */
...@@ -1253,7 +1245,9 @@ int netvsc_poll(struct napi_struct *napi, int budget) ...@@ -1253,7 +1245,9 @@ int netvsc_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi); napi_reschedule(napi);
netvsc_chk_recv_comp(net_device, channel, q_idx); netvsc_chk_recv_comp(net_device, channel, q_idx);
return work_done;
/* Driver may overshoot since multiple packets per descriptor */
return min(work_done, budget);
} }
/* Call back when data is available in host ring buffer. /* Call back when data is available in host ring buffer.
...@@ -1263,10 +1257,12 @@ void netvsc_channel_cb(void *context) ...@@ -1263,10 +1257,12 @@ void netvsc_channel_cb(void *context)
{ {
struct netvsc_channel *nvchan = context; struct netvsc_channel *nvchan = context;
/* disable interupts from host */ if (napi_schedule_prep(&nvchan->napi)) {
hv_begin_read(&nvchan->channel->inbound); /* disable interupts from host */
hv_begin_read(&nvchan->channel->inbound);
napi_schedule(&nvchan->napi); __napi_schedule(&nvchan->napi);
}
} }
/* /*
...@@ -1325,9 +1321,7 @@ int netvsc_device_add(struct hv_device *device, ...@@ -1325,9 +1321,7 @@ int netvsc_device_add(struct hv_device *device,
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated. * populated.
*/ */
wmb(); rcu_assign_pointer(net_device_ctx->nvdev, net_device);
net_device_ctx->nvdev = net_device;
/* Connect with the NetVsp */ /* Connect with the NetVsp */
ret = netvsc_connect_vsp(device); ret = netvsc_connect_vsp(device);
...@@ -1346,7 +1340,7 @@ int netvsc_device_add(struct hv_device *device, ...@@ -1346,7 +1340,7 @@ int netvsc_device_add(struct hv_device *device,
vmbus_close(device->channel); vmbus_close(device->channel);
cleanup: cleanup:
free_netvsc_device(net_device); free_netvsc_device(&net_device->rcu);
return ret; return ret;
} }
This diff is collapsed.
...@@ -819,16 +819,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) ...@@ -819,16 +819,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{ {
struct rndis_request *request; struct rndis_request *request;
struct rndis_set_request *set; struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
int ret; int ret;
request = get_rndis_request(dev, RNDIS_MSG_SET, request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
sizeof(u32)); sizeof(u32));
if (!request) { if (!request)
ret = -ENOMEM; return -ENOMEM;
goto cleanup;
}
/* Setup the rndis set */ /* Setup the rndis set */
set = &request->request_msg.msg.set_req; set = &request->request_msg.msg.set_req;
...@@ -840,15 +838,11 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) ...@@ -840,15 +838,11 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
&new_filter, sizeof(u32)); &new_filter, sizeof(u32));
ret = rndis_filter_send_request(dev, request); ret = rndis_filter_send_request(dev, request);
if (ret != 0) if (ret == 0)
goto cleanup; wait_for_completion(&request->wait_event);
wait_for_completion(&request->wait_event); put_rndis_request(dev, request);
set_complete = &request->response_msg.msg.set_complete;
cleanup:
if (request)
put_rndis_request(dev, request);
return ret; return ret;
} }
...@@ -926,8 +920,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev) ...@@ -926,8 +920,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
struct rndis_halt_request *halt; struct rndis_halt_request *halt;
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
struct netvsc_device *nvdev = net_device_ctx->nvdev; struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct hv_device *hdev = net_device_ctx->device_ctx;
ulong flags;
/* Attempt to do a rndis device halt */ /* Attempt to do a rndis device halt */
request = get_rndis_request(dev, RNDIS_MSG_HALT, request = get_rndis_request(dev, RNDIS_MSG_HALT,
...@@ -945,9 +937,10 @@ static void rndis_filter_halt_device(struct rndis_device *dev) ...@@ -945,9 +937,10 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
dev->state = RNDIS_DEV_UNINITIALIZED; dev->state = RNDIS_DEV_UNINITIALIZED;
cleanup: cleanup:
spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
nvdev->destroy = true; nvdev->destroy = true;
spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
/* Force flag to be ordered before waiting */
wmb();
/* Wait for all send completions */ /* Wait for all send completions */
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
...@@ -997,7 +990,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) ...@@ -997,7 +990,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
u16 chn_index = new_sc->offermsg.offer.sub_channel_index; u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
struct netvsc_channel *nvchan; struct netvsc_channel *nvchan;
unsigned long flags;
int ret; int ret;
if (chn_index >= nvscdev->num_chn) if (chn_index >= nvscdev->num_chn)
...@@ -1019,10 +1011,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) ...@@ -1019,10 +1011,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
napi_enable(&nvchan->napi); napi_enable(&nvchan->napi);
spin_lock_irqsave(&nvscdev->sc_lock, flags); if (refcount_dec_and_test(&nvscdev->sc_offered))
nvscdev->num_sc_offered--;
spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
if (nvscdev->num_sc_offered == 0)
complete(&nvscdev->channel_init_wait); complete(&nvscdev->channel_init_wait);
} }
...@@ -1039,12 +1028,9 @@ int rndis_filter_device_add(struct hv_device *dev, ...@@ -1039,12 +1028,9 @@ int rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap; struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
unsigned int gso_max_size = GSO_MAX_SIZE; unsigned int gso_max_size = GSO_MAX_SIZE;
u32 mtu, size; u32 mtu, size, num_rss_qs;
u32 num_rss_qs;
u32 sc_delta;
const struct cpumask *node_cpu_mask; const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs; u32 num_possible_rss_qs;
unsigned long flags;
int i, ret; int i, ret;
rndis_device = get_rndis_device(); rndis_device = get_rndis_device();
...@@ -1067,7 +1053,7 @@ int rndis_filter_device_add(struct hv_device *dev, ...@@ -1067,7 +1053,7 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1; net_device->max_chn = 1;
net_device->num_chn = 1; net_device->num_chn = 1;
spin_lock_init(&net_device->sc_lock); refcount_set(&net_device->sc_offered, 0);
net_device->extension = rndis_device; net_device->extension = rndis_device;
rndis_device->ndev = net; rndis_device->ndev = net;
...@@ -1181,34 +1167,30 @@ int rndis_filter_device_add(struct hv_device *dev, ...@@ -1181,34 +1167,30 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2) if (ret || rsscap.num_recv_que < 2)
goto out; goto out;
net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
/* /*
* We will limit the VRSS channels to the number CPUs in the NUMA node * We will limit the VRSS channels to the number CPUs in the NUMA node
* the primary channel is currently bound to. * the primary channel is currently bound to.
*
* This also guarantees that num_possible_rss_qs <= num_online_cpus
*/ */
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
num_possible_rss_qs = cpumask_weight(node_cpu_mask); num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
rsscap.num_recv_que);
/* We will use the given number of channels if available. */ net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
net_device->num_chn = device_info->num_chn;
else
net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
num_rss_qs = net_device->num_chn - 1; /* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
for (i = 0; i < ITAB_NUM; i++) for (i = 0; i < ITAB_NUM; i++)
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn); net_device->num_chn);
net_device->num_sc_offered = num_rss_qs; num_rss_qs = net_device->num_chn - 1;
if (num_rss_qs == 0)
if (net_device->num_chn == 1) return 0;
goto out;
refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt; init_packet = &net_device->channel_init_pkt;
...@@ -1224,32 +1206,23 @@ int rndis_filter_device_add(struct hv_device *dev, ...@@ -1224,32 +1206,23 @@ int rndis_filter_device_add(struct hv_device *dev,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) if (ret)
goto out; goto out;
wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
NVSP_STAT_SUCCESS) {
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
} }
wait_for_completion(&net_device->channel_init_wait);
net_device->num_chn = 1 + net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels; init_packet->msg.v5_msg.subchn_comp.num_subchannels;
ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, /* ignore failues from setting rss parameters, still have channels */
net_device->num_chn); rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
net_device->num_chn);
/*
* Set the number of sub-channels to be received.
*/
spin_lock_irqsave(&net_device->sc_lock, flags);
sc_delta = num_rss_qs - (net_device->num_chn - 1);
net_device->num_sc_offered -= sc_delta;
spin_unlock_irqrestore(&net_device->sc_lock, flags);
out: out:
if (ret) { if (ret) {
net_device->max_chn = 1; net_device->max_chn = 1;
net_device->num_chn = 1; net_device->num_chn = 1;
net_device->num_sc_offered = 0;
} }
return 0; /* return 0 because primary channel can be used alone */ return 0; /* return 0 because primary channel can be used alone */
...@@ -1264,12 +1237,6 @@ void rndis_filter_device_remove(struct hv_device *dev, ...@@ -1264,12 +1237,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
{ {
struct rndis_device *rndis_dev = net_dev->extension; struct rndis_device *rndis_dev = net_dev->extension;
/* If not all subchannel offers are complete, wait for them until
* completion to avoid race.
*/
if (net_dev->num_sc_offered > 0)
wait_for_completion(&net_dev->channel_init_wait);
/* Halt and release the rndis device */ /* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev); rndis_filter_halt_device(rndis_dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment