Commit 0dc82fa5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
 "A smattering of different small fixes for some random driver
  subsystems. Nothing all that major, just resolutions for reported
  issues and bugs.

  All have been in linux-next with no reported issues"

* tag 'char-misc-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (21 commits)
  extcon: int3496: Set the id pin to direction-input if necessary
  extcon: int3496: Use gpiod_get instead of gpiod_get_index
  extcon: int3496: Add dependency on X86 as it's Intel specific
  extcon: int3496: Add GPIO ACPI mapping table
  extcon: int3496: Rename GPIO pins in accordance with binding
  vmw_vmci: handle the return value from pci_alloc_irq_vectors correctly
  ppdev: fix registering same device name
  parport: fix attempt to write duplicate procfiles
  auxdisplay: img-ascii-lcd: add missing sentinel entry in img_ascii_lcd_matches
  Drivers: hv: vmbus: Don't leak memory when a channel is rescinded
  Drivers: hv: vmbus: Don't leak channel ids
  Drivers: hv: util: don't forget to init host_ts.lock
  Drivers: hv: util: move waiting for release to hv_utils_transport itself
  vmbus: remove hv_event_tasklet_disable/enable
  vmbus: use rcu for per-cpu channel list
  mei: don't wait for os version message reply
  mei: fix deadlock on mei reset
  intel_th: pci: Add Gemini Lake support
  intel_th: pci: Add Denverton SOC support
  intel_th: Don't leak module refcount on failure to activate
  ...
parents 9e54ef9d 5c1724c4
...@@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg ...@@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
Index 2: The output gpio for muxing of the data pins between the USB host and Index 2: The output gpio for muxing of the data pins between the USB host and
the USB peripheral controller, write 1 to mux to the peripheral the USB peripheral controller, write 1 to mux to the peripheral
controller controller
There is a mapping between indices and GPIO connection IDs as follows
id index 0
vbus index 1
mux index 2
...@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = { ...@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
{ .compatible = "img,boston-lcd", .data = &boston_config }, { .compatible = "img,boston-lcd", .data = &boston_config },
{ .compatible = "mti,malta-lcd", .data = &malta_config }, { .compatible = "mti,malta-lcd", .data = &malta_config },
{ .compatible = "mti,sead3-lcd", .data = &sead3_config }, { .compatible = "mti,sead3-lcd", .data = &sead3_config },
{ /* sentinel */ }
}; };
/** /**
......
...@@ -84,11 +84,14 @@ struct pp_struct { ...@@ -84,11 +84,14 @@ struct pp_struct {
struct ieee1284_info state; struct ieee1284_info state;
struct ieee1284_info saved_state; struct ieee1284_info saved_state;
long default_inactivity; long default_inactivity;
int index;
}; };
/* should we use PARDEVICE_MAX here? */ /* should we use PARDEVICE_MAX here? */
static struct device *devices[PARPORT_MAX]; static struct device *devices[PARPORT_MAX];
static DEFINE_IDA(ida_index);
/* pp_struct.flags bitfields */ /* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0) #define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1) #define PP_EXCL (1<<1)
...@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp) ...@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct pardevice *pdev = NULL; struct pardevice *pdev = NULL;
char *name; char *name;
struct pardev_cb ppdev_cb; struct pardev_cb ppdev_cb;
int rc = 0; int rc = 0, index;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL) if (name == NULL)
...@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp) ...@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
goto err; goto err;
} }
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
memset(&ppdev_cb, 0, sizeof(ppdev_cb)); memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq; ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp; ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
parport_put_port(port); parport_put_port(port);
if (!pdev) { if (!pdev) {
pr_warn("%s: failed to register device!\n", name); pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO; rc = -ENXIO;
ida_simple_remove(&ida_index, index);
goto err; goto err;
} }
pp->pdev = pdev; pp->pdev = pdev;
pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n"); dev_dbg(&pdev->dev, "registered pardevice\n");
err: err:
kfree(name); kfree(name);
...@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file) ...@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
if (pp->pdev) { if (pp->pdev) {
parport_unregister_device(pp->pdev); parport_unregister_device(pp->pdev);
ida_simple_remove(&ida_index, pp->index);
pp->pdev = NULL; pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
} }
......
...@@ -44,7 +44,7 @@ config EXTCON_GPIO ...@@ -44,7 +44,7 @@ config EXTCON_GPIO
config EXTCON_INTEL_INT3496 config EXTCON_INTEL_INT3496
tristate "Intel INT3496 ACPI device extcon driver" tristate "Intel INT3496 ACPI device extcon driver"
depends on GPIOLIB && ACPI depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
help help
Say Y here to enable extcon support for USB OTG ports controlled by Say Y here to enable extcon support for USB OTG ports controlled by
an Intel INT3496 ACPI device. an Intel INT3496 ACPI device.
......
...@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = { ...@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
EXTCON_NONE, EXTCON_NONE,
}; };
static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
{ "id-gpios", &id_gpios, 1 },
{ "vbus-gpios", &vbus_gpios, 1 },
{ "mux-gpios", &mux_gpios, 1 },
{ },
};
static void int3496_do_usb_id(struct work_struct *work) static void int3496_do_usb_id(struct work_struct *work)
{ {
struct int3496_data *data = struct int3496_data *data =
...@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev) ...@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
struct int3496_data *data; struct int3496_data *data;
int ret; int ret;
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
acpi_int3496_default_gpios);
if (ret) {
dev_err(dev, "can't add GPIO ACPI mapping\n");
return ret;
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
...@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev) ...@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
data->dev = dev; data->dev = dev;
INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
data->gpio_usb_id = devm_gpiod_get_index(dev, "id", data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
INT3496_GPIO_USB_ID,
GPIOD_IN);
if (IS_ERR(data->gpio_usb_id)) { if (IS_ERR(data->gpio_usb_id)) {
ret = PTR_ERR(data->gpio_usb_id); ret = PTR_ERR(data->gpio_usb_id);
dev_err(dev, "can't request USB ID GPIO: %d\n", ret); dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
return ret; return ret;
} else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
gpiod_direction_input(data->gpio_usb_id);
} }
data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
if (data->usb_id_irq <= 0) { if (data->usb_id_irq < 0) {
dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
return -EINVAL; return data->usb_id_irq;
} }
data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
INT3496_GPIO_VBUS_EN,
GPIOD_ASIS);
if (IS_ERR(data->gpio_vbus_en)) if (IS_ERR(data->gpio_vbus_en))
dev_info(dev, "can't request VBUS EN GPIO\n"); dev_info(dev, "can't request VBUS EN GPIO\n");
data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
INT3496_GPIO_USB_MUX,
GPIOD_ASIS);
if (IS_ERR(data->gpio_usb_mux)) if (IS_ERR(data->gpio_usb_mux))
dev_info(dev, "can't request USB MUX GPIO\n"); dev_info(dev, "can't request USB MUX GPIO\n");
...@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev) ...@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, data->usb_id_irq, data); devm_free_irq(&pdev->dev, data->usb_id_irq, data);
cancel_delayed_work_sync(&data->work); cancel_delayed_work_sync(&data->work);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
return 0; return 0;
} }
......
...@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) ...@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
wait_for_completion(&info->waitevent); wait_for_completion(&info->waitevent);
if (channel->rescind) {
ret = -ENODEV;
goto post_msg_err;
}
post_msg_err: post_msg_err:
/*
* If the channel has been rescinded;
* we will be awakened by the rescind
* handler; set the error code to zero so we don't leak memory.
*/
if (channel->rescind)
ret = 0;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&info->msglistentry); list_del(&info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
...@@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel) ...@@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
int ret; int ret;
/* /*
* vmbus_on_event(), running in the tasklet, can race * vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when * with vmbus_close_internal() in the case of SMP guest, e.g., when
* the former is accessing channel->inbound.ring_buffer, the latter * the former is accessing channel->inbound.ring_buffer, the latter
* could be freeing the ring_buffer pages. * could be freeing the ring_buffer pages, so here we must stop it
* * first.
* To resolve the race, we can serialize them by disabling the
* tasklet when the latter is running here.
*/ */
hv_event_tasklet_disable(channel); tasklet_disable(&channel->callback_event);
/* /*
* In case a device driver's probe() fails (e.g., * In case a device driver's probe() fails (e.g.,
...@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) ...@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out: out:
hv_event_tasklet_enable(channel);
return ret; return ret;
} }
......
...@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void) ...@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
static void free_channel(struct vmbus_channel *channel) static void free_channel(struct vmbus_channel *channel)
{ {
tasklet_kill(&channel->callback_event); tasklet_kill(&channel->callback_event);
kfree(channel);
kfree_rcu(channel, rcu);
} }
static void percpu_channel_enq(void *arg) static void percpu_channel_enq(void *arg)
...@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg) ...@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
struct hv_per_cpu_context *hv_cpu struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context); = this_cpu_ptr(hv_context.cpu_context);
list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
} }
static void percpu_channel_deq(void *arg) static void percpu_channel_deq(void *arg)
{ {
struct vmbus_channel *channel = arg; struct vmbus_channel *channel = arg;
list_del(&channel->percpu_list); list_del_rcu(&channel->percpu_list);
} }
...@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid) ...@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
true); true);
} }
void hv_event_tasklet_disable(struct vmbus_channel *channel)
{
tasklet_disable(&channel->callback_event);
}
void hv_event_tasklet_enable(struct vmbus_channel *channel)
{
tasklet_enable(&channel->callback_event);
/* In case there is any pending event */
tasklet_schedule(&channel->callback_event);
}
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
{ {
unsigned long flags; unsigned long flags;
...@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) ...@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
BUG_ON(!channel->rescind); BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
hv_event_tasklet_disable(channel);
if (channel->target_cpu != get_cpu()) { if (channel->target_cpu != get_cpu()) {
put_cpu(); put_cpu();
smp_call_function_single(channel->target_cpu, smp_call_function_single(channel->target_cpu,
...@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) ...@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
percpu_channel_deq(channel); percpu_channel_deq(channel);
put_cpu(); put_cpu();
} }
hv_event_tasklet_enable(channel);
if (channel->primary_channel == NULL) { if (channel->primary_channel == NULL) {
list_del(&channel->listentry); list_del(&channel->listentry);
...@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
init_vp_index(newchannel, dev_type); init_vp_index(newchannel, dev_type);
hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) { if (newchannel->target_cpu != get_cpu()) {
put_cpu(); put_cpu();
smp_call_function_single(newchannel->target_cpu, smp_call_function_single(newchannel->target_cpu,
...@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_enq(newchannel); percpu_channel_enq(newchannel);
put_cpu(); put_cpu();
} }
hv_event_tasklet_enable(newchannel);
/* /*
* This state is used to indicate a successful open * This state is used to indicate a successful open
...@@ -565,7 +549,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -565,7 +549,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
list_del(&newchannel->listentry); list_del(&newchannel->listentry);
mutex_unlock(&vmbus_connection.channel_mutex); mutex_unlock(&vmbus_connection.channel_mutex);
hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) { if (newchannel->target_cpu != get_cpu()) {
put_cpu(); put_cpu();
smp_call_function_single(newchannel->target_cpu, smp_call_function_single(newchannel->target_cpu,
...@@ -574,7 +557,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -574,7 +557,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_deq(newchannel); percpu_channel_deq(newchannel);
put_cpu(); put_cpu();
} }
hv_event_tasklet_enable(newchannel);
vmbus_release_relid(newchannel->offermsg.child_relid); vmbus_release_relid(newchannel->offermsg.child_relid);
...@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) ...@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
/* Allocate the channel object and save this offer. */ /* Allocate the channel object and save this offer. */
newchannel = alloc_channel(); newchannel = alloc_channel();
if (!newchannel) { if (!newchannel) {
vmbus_release_relid(offer->child_relid);
pr_err("Unable to allocate channel object\n"); pr_err("Unable to allocate channel object\n");
return; return;
} }
......
...@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); ...@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
static const char fcopy_devname[] = "vmbus/hv_fcopy"; static const char fcopy_devname[] = "vmbus/hv_fcopy";
static u8 *recv_buffer; static u8 *recv_buffer;
static struct hvutil_transport *hvt; static struct hvutil_transport *hvt;
static struct completion release_event;
/* /*
* This state maintains the version number registered by the daemon. * This state maintains the version number registered by the daemon.
*/ */
...@@ -331,7 +330,6 @@ static void fcopy_on_reset(void) ...@@ -331,7 +330,6 @@ static void fcopy_on_reset(void)
if (cancel_delayed_work_sync(&fcopy_timeout_work)) if (cancel_delayed_work_sync(&fcopy_timeout_work))
fcopy_respond_to_host(HV_E_FAIL); fcopy_respond_to_host(HV_E_FAIL);
complete(&release_event);
} }
int hv_fcopy_init(struct hv_util_service *srv) int hv_fcopy_init(struct hv_util_service *srv)
...@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv) ...@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
recv_buffer = srv->recv_buffer; recv_buffer = srv->recv_buffer;
fcopy_transaction.recv_channel = srv->channel; fcopy_transaction.recv_channel = srv->channel;
init_completion(&release_event);
/* /*
* When this driver loads, the user level daemon that * When this driver loads, the user level daemon that
* processes the host requests may not yet be running. * processes the host requests may not yet be running.
...@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void) ...@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void)
fcopy_transaction.state = HVUTIL_DEVICE_DYING; fcopy_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&fcopy_timeout_work); cancel_delayed_work_sync(&fcopy_timeout_work);
hvutil_transport_destroy(hvt); hvutil_transport_destroy(hvt);
wait_for_completion(&release_event);
} }
...@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); ...@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp"; static const char kvp_devname[] = "vmbus/hv_kvp";
static u8 *recv_buffer; static u8 *recv_buffer;
static struct hvutil_transport *hvt; static struct hvutil_transport *hvt;
static struct completion release_event;
/* /*
* Register the kernel component with the user-level daemon. * Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number. * As part of this registration, pass the LIC version number.
...@@ -714,7 +713,6 @@ static void kvp_on_reset(void) ...@@ -714,7 +713,6 @@ static void kvp_on_reset(void)
if (cancel_delayed_work_sync(&kvp_timeout_work)) if (cancel_delayed_work_sync(&kvp_timeout_work))
kvp_respond_to_host(NULL, HV_E_FAIL); kvp_respond_to_host(NULL, HV_E_FAIL);
kvp_transaction.state = HVUTIL_DEVICE_INIT; kvp_transaction.state = HVUTIL_DEVICE_INIT;
complete(&release_event);
} }
int int
...@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv) ...@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv)
recv_buffer = srv->recv_buffer; recv_buffer = srv->recv_buffer;
kvp_transaction.recv_channel = srv->channel; kvp_transaction.recv_channel = srv->channel;
init_completion(&release_event);
/* /*
* When this driver loads, the user level daemon that * When this driver loads, the user level daemon that
* processes the host requests may not yet be running. * processes the host requests may not yet be running.
...@@ -747,5 +744,4 @@ void hv_kvp_deinit(void) ...@@ -747,5 +744,4 @@ void hv_kvp_deinit(void)
cancel_delayed_work_sync(&kvp_timeout_work); cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work); cancel_work_sync(&kvp_sendkey_work);
hvutil_transport_destroy(hvt); hvutil_transport_destroy(hvt);
wait_for_completion(&release_event);
} }
...@@ -79,7 +79,6 @@ static int dm_reg_value; ...@@ -79,7 +79,6 @@ static int dm_reg_value;
static const char vss_devname[] = "vmbus/hv_vss"; static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer; static __u8 *recv_buffer;
static struct hvutil_transport *hvt; static struct hvutil_transport *hvt;
static struct completion release_event;
static void vss_timeout_func(struct work_struct *dummy); static void vss_timeout_func(struct work_struct *dummy);
static void vss_handle_request(struct work_struct *dummy); static void vss_handle_request(struct work_struct *dummy);
...@@ -361,13 +360,11 @@ static void vss_on_reset(void) ...@@ -361,13 +360,11 @@ static void vss_on_reset(void)
if (cancel_delayed_work_sync(&vss_timeout_work)) if (cancel_delayed_work_sync(&vss_timeout_work))
vss_respond_to_host(HV_E_FAIL); vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_DEVICE_INIT; vss_transaction.state = HVUTIL_DEVICE_INIT;
complete(&release_event);
} }
int int
hv_vss_init(struct hv_util_service *srv) hv_vss_init(struct hv_util_service *srv)
{ {
init_completion(&release_event);
if (vmbus_proto_version < VERSION_WIN8_1) { if (vmbus_proto_version < VERSION_WIN8_1) {
pr_warn("Integration service 'Backup (volume snapshot)'" pr_warn("Integration service 'Backup (volume snapshot)'"
" not supported on this host version.\n"); " not supported on this host version.\n");
...@@ -400,5 +397,4 @@ void hv_vss_deinit(void) ...@@ -400,5 +397,4 @@ void hv_vss_deinit(void)
cancel_delayed_work_sync(&vss_timeout_work); cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work); cancel_work_sync(&vss_handle_request_work);
hvutil_transport_destroy(hvt); hvutil_transport_destroy(hvt);
wait_for_completion(&release_event);
} }
...@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv) ...@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
if (!hyperv_cs) if (!hyperv_cs)
return -ENODEV; return -ENODEV;
spin_lock_init(&host_ts.lock);
INIT_WORK(&wrk.work, hv_set_host_time); INIT_WORK(&wrk.work, hv_set_host_time);
/* /*
......
...@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file) ...@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
* connects back. * connects back.
*/ */
hvt_reset(hvt); hvt_reset(hvt);
mutex_unlock(&hvt->lock);
if (mode_old == HVUTIL_TRANSPORT_DESTROY) if (mode_old == HVUTIL_TRANSPORT_DESTROY)
hvt_transport_free(hvt); complete(&hvt->release);
mutex_unlock(&hvt->lock);
return 0; return 0;
} }
...@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name, ...@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
init_waitqueue_head(&hvt->outmsg_q); init_waitqueue_head(&hvt->outmsg_q);
mutex_init(&hvt->lock); mutex_init(&hvt->lock);
init_completion(&hvt->release);
spin_lock(&hvt_list_lock); spin_lock(&hvt_list_lock);
list_add(&hvt->list, &hvt_list); list_add(&hvt->list, &hvt_list);
...@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt) ...@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
cn_del_callback(&hvt->cn_id); cn_del_callback(&hvt->cn_id);
if (mode_old != HVUTIL_TRANSPORT_CHARDEV) if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
wait_for_completion(&hvt->release);
hvt_transport_free(hvt); hvt_transport_free(hvt);
} }
...@@ -41,6 +41,7 @@ struct hvutil_transport { ...@@ -41,6 +41,7 @@ struct hvutil_transport {
int outmsg_len; /* its length */ int outmsg_len; /* its length */
wait_queue_head_t outmsg_q; /* poll/read wait queue */ wait_queue_head_t outmsg_q; /* poll/read wait queue */
struct mutex lock; /* protects struct members */ struct mutex lock; /* protects struct members */
struct completion release; /* synchronize with fd release */
}; };
struct hvutil_transport *hvutil_transport_init(const char *name, struct hvutil_transport *hvutil_transport_init(const char *name,
......
...@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) ...@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (relid == 0) if (relid == 0)
continue; continue;
rcu_read_lock();
/* Find channel based on relid */ /* Find channel based on relid */
list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
if (channel->offermsg.child_relid != relid) if (channel->offermsg.child_relid != relid)
continue; continue;
...@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) ...@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
tasklet_schedule(&channel->callback_event); tasklet_schedule(&channel->callback_event);
} }
} }
rcu_read_unlock();
} }
} }
......
...@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev) ...@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
else else
intel_th_trace_enable(thdev); intel_th_trace_enable(thdev);
if (ret) if (ret) {
pm_runtime_put(&thdev->dev); pm_runtime_put(&thdev->dev);
module_put(thdrv->driver.owner);
}
return ret; return ret;
} }
......
...@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { ...@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0, .driver_data = (kernel_ulong_t)0,
}, },
{
/* Denverton */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
.driver_data = (kernel_ulong_t)0,
},
{
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
.driver_data = (kernel_ulong_t)0,
},
{ 0 }, { 0 },
}; };
......
...@@ -112,11 +112,9 @@ struct mkhi_msg { ...@@ -112,11 +112,9 @@ struct mkhi_msg {
static int mei_osver(struct mei_cl_device *cldev) static int mei_osver(struct mei_cl_device *cldev)
{ {
int ret;
const size_t size = sizeof(struct mkhi_msg_hdr) + const size_t size = sizeof(struct mkhi_msg_hdr) +
sizeof(struct mkhi_fwcaps) + sizeof(struct mkhi_fwcaps) +
sizeof(struct mei_os_ver); sizeof(struct mei_os_ver);
size_t length = 8;
char buf[size]; char buf[size];
struct mkhi_msg *req; struct mkhi_msg *req;
struct mkhi_fwcaps *fwcaps; struct mkhi_fwcaps *fwcaps;
...@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev) ...@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev)
os_ver = (struct mei_os_ver *)fwcaps->data; os_ver = (struct mei_os_ver *)fwcaps->data;
os_ver->os_type = OSTYPE_LINUX; os_ver->os_type = OSTYPE_LINUX;
ret = __mei_cl_send(cldev->cl, buf, size, mode); return __mei_cl_send(cldev->cl, buf, size, mode);
if (ret < 0)
return ret;
ret = __mei_cl_recv(cldev->cl, buf, length, 0);
if (ret < 0)
return ret;
return 0;
} }
static void mei_mkhi_fix(struct mei_cl_device *cldev) static void mei_mkhi_fix(struct mei_cl_device *cldev)
...@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) ...@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
return; return;
ret = mei_osver(cldev); ret = mei_osver(cldev);
if (ret) if (ret < 0)
dev_err(&cldev->dev, "OS version command failed %d\n", ret); dev_err(&cldev->dev, "OS version command failed %d\n", ret);
mei_cldev_disable(cldev); mei_cldev_disable(cldev);
......
...@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev) ...@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev)
mei_clear_interrupts(dev); mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
/* we're already in reset, cancel the init timer /* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error * if the reset was called due the hbm protocol error
* we need to call it before hw start * we need to call it before hw start
...@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work) ...@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work)
container_of(work, struct mei_device, reset_work); container_of(work, struct mei_device, reset_work);
int ret; int ret;
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
mutex_lock(&dev->device_lock); mutex_lock(&dev->device_lock);
ret = mei_reset(dev); ret = mei_reset(dev);
...@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev) ...@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev)
mei_cancel_work(dev); mei_cancel_work(dev);
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
mutex_lock(&dev->device_lock); mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_DOWN; dev->dev_state = MEI_DEV_POWER_DOWN;
......
...@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, ...@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
*/ */
error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
PCI_IRQ_MSIX); PCI_IRQ_MSIX);
if (error) { if (error < 0) {
error = pci_alloc_irq_vectors(pdev, 1, 1, error = pci_alloc_irq_vectors(pdev, 1, 1,
PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
if (error) if (error < 0)
goto err_remove_bitmap; goto err_remove_bitmap;
} else { } else {
vmci_dev->exclusive_vectors = true; vmci_dev->exclusive_vectors = true;
......
...@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name, ...@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name,
* pardevice fields. -arca * pardevice fields. -arca
*/ */
port->ops->init_state(par_dev, par_dev->state); port->ops->init_state(par_dev, par_dev->state);
if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
port->proc_device = par_dev; port->proc_device = par_dev;
parport_device_proc_register(par_dev); parport_device_proc_register(par_dev);
}
return par_dev; return par_dev;
......
...@@ -845,6 +845,13 @@ struct vmbus_channel { ...@@ -845,6 +845,13 @@ struct vmbus_channel {
* link up channels based on their CPU affinity. * link up channels based on their CPU affinity.
*/ */
struct list_head percpu_list; struct list_head percpu_list;
/*
* Defer freeing channel until after all cpu's have
* gone through grace period.
*/
struct rcu_head rcu;
/* /*
* For performance critical channels (storage, networking * For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput * etc,), Hyper-V has a mechanism to enhance the throughput
...@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, ...@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt, const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version); int *nego_fw_version, int *nego_srv_version);
void hv_event_tasklet_disable(struct vmbus_channel *channel);
void hv_event_tasklet_enable(struct vmbus_channel *channel);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
void vmbus_setevent(struct vmbus_channel *channel); void vmbus_setevent(struct vmbus_channel *channel);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment