Commit 25bfe4f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-3.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
 "Here are a few driver fixes for char/misc drivers that resolve
  reported issues.

  All have been in linux-next successfully for a few days"

* tag 'char-misc-3.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  Drivers: hv: vmbus: Negotiate version 3.0 when running on ws2012r2 hosts
  Tools: hv: Handle the case when the target file exists correctly
  vme_tsi148: Utilize to_pci_dev() macro
  vme_tsi148: Fix PCI address mapping assumption
  vme_tsi148: Fix typo in tsi148_slave_get()
  w1: avoid recursive device_add
  w1: fix netlink refcnt leak on error path
  misc: Grammar s/addition/additional/
  drivers: mcb: fix memory leak in chameleon_parse_cells() error path
  mei: ignore client writing state during cb completion
  mei: me: do not load the driver if the FW doesn't support MEI interface
  GenWQE: Increase driver version number
  GenWQE: Fix multithreading problems
  GenWQE: Ensure rc is not returning an uninitialized value
  GenWQE: Add wmb before DDCB is started
  GenWQE: Enable access to VPD flash area
parents 60fbf2bd 03367ef5
......@@ -55,6 +55,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
case (VERSION_WIN8):
return VERSION_WIN7;
case (VERSION_WIN8_1):
return VERSION_WIN8;
case (VERSION_WS2008):
default:
return VERSION_INVAL;
......@@ -77,7 +80,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
if (version == VERSION_WIN8)
if (version == VERSION_WIN8_1)
msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
......
......@@ -141,6 +141,7 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
default:
pr_err("Invalid chameleon descriptor type 0x%x\n",
dtype);
kfree(header);
return -EINVAL;
}
num_cells++;
......
......@@ -300,8 +300,8 @@ config SGI_GRU_DEBUG
depends on SGI_GRU
default n
---help---
This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N.
This option enables additional debugging code for the SGI GRU driver.
If you are unsure, say N.
config APDS9802ALS
tristate "Medfield Avago APDS9802 ALS Sensor module"
......
......@@ -336,6 +336,44 @@ enum genwqe_requ_state {
GENWQE_REQU_STATE_MAX,
};
/**
* struct genwqe_sgl - Scatter gather list describing user-space memory
* @sgl: scatter gather list needs to be 128 byte aligned
* @sgl_dma_addr: dma address of sgl
* @sgl_size: size of area used for sgl
* @user_addr: user-space address of memory area
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
struct sg_entry *sgl;
size_t sgl_size; /* size of sgl */
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
size_t lpage_size;
void *fpage;
dma_addr_t fpage_dma_addr;
void *lpage;
dma_addr_t lpage_dma_addr;
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
/**
* struct ddcb_requ - Kernel internal representation of the DDCB request
* @cmd: User space representation of the DDCB execution request
......@@ -347,9 +385,7 @@ struct ddcb_requ {
struct ddcb_queue *queue; /* associated queue */
struct dma_mapping dma_mappings[DDCB_FIXUPS];
struct sg_entry *sgl[DDCB_FIXUPS];
dma_addr_t sgl_dma_addr[DDCB_FIXUPS];
size_t sgl_size[DDCB_FIXUPS];
struct genwqe_sgl sgls[DDCB_FIXUPS];
/* kernel/user shared content */
struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
......@@ -453,22 +489,6 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
struct ddcb_requ *req);
struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
dma_addr_t *dma_addr, size_t *sgl_size);
void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
dma_addr_t dma_addr, size_t size);
int genwqe_setup_sgl(struct genwqe_dev *cd,
unsigned long offs,
unsigned long size,
struct sg_entry *sgl, /* genwqe sgl */
dma_addr_t dma_addr, size_t sgl_size,
dma_addr_t *dma_list, int page_offs, int num_pages);
int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
int size);
static inline bool dma_mapping_used(struct dma_mapping *m)
{
if (!m)
......
......@@ -305,6 +305,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
break;
new = (old | DDCB_NEXT_BE32);
wmb();
icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
if (icrc_hsi_shi == old)
......@@ -314,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
/* Queue must be re-started by updating QUEUE_OFFSET */
ddcb_mark_tapped(pddcb);
num = (u64)ddcb_no << 8;
wmb();
__genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
return RET_DDCB_TAPPED;
......@@ -1306,7 +1310,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
*/
int genwqe_finish_queue(struct genwqe_dev *cd)
{
int i, rc, in_flight;
int i, rc = 0, in_flight;
int waitmax = genwqe_ddcb_software_timeout;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_queue *queue = &cd->queue;
......
......@@ -531,7 +531,9 @@ static int do_flash_update(struct genwqe_file *cfile,
case '1':
cmdopts = 0x1C;
break; /* download/erase_first/part_1 */
case 'v': /* cmdopts = 0x0c (VPD) */
case 'v':
cmdopts = 0x0C;
break; /* download/erase_first/vpd */
default:
return -EINVAL;
}
......@@ -665,6 +667,8 @@ static int do_flash_read(struct genwqe_file *cfile,
cmdopts = 0x1A;
break; /* upload/part_1 */
case 'v':
cmdopts = 0x0A;
break; /* upload/vpd */
default:
return -EINVAL;
}
......@@ -836,15 +840,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
__genwqe_del_mapping(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map, req);
}
if (req->sgl[i] != NULL) {
genwqe_free_sgl(cd, req->sgl[i],
req->sgl_dma_addr[i],
req->sgl_size[i]);
req->sgl[i] = NULL;
req->sgl_dma_addr[i] = 0x0;
req->sgl_size[i] = 0;
}
if (req->sgls[i].sgl != NULL)
genwqe_free_sync_sgl(cd, &req->sgls[i]);
}
return 0;
}
......@@ -913,7 +910,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
case ATS_TYPE_SGL_RDWR:
case ATS_TYPE_SGL_RD: {
int page_offs, nr_pages, offs;
int page_offs;
u_addr = be64_to_cpu(*((__be64 *)
&cmd->asiv[asiv_offs]));
......@@ -951,27 +948,18 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
page_offs = 0;
}
offs = offset_in_page(u_addr);
nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
/* create genwqe style scatter gather list */
req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages,
&req->sgl_dma_addr[i],
&req->sgl_size[i]);
if (req->sgl[i] == NULL) {
rc = -ENOMEM;
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
u_size);
if (rc != 0)
goto err_out;
}
genwqe_setup_sgl(cd, offs, u_size,
req->sgl[i],
req->sgl_dma_addr[i],
req->sgl_size[i],
m->dma_list,
page_offs,
nr_pages);
genwqe_setup_sgl(cd, &req->sgls[i],
&m->dma_list[page_offs]);
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(req->sgl_dma_addr[i]);
cpu_to_be64(req->sgls[i].sgl_dma_addr);
break;
}
......
......@@ -275,67 +275,107 @@ static int genwqe_sgl_size(int num_pages)
return roundup(len, PAGE_SIZE);
}
struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
dma_addr_t *dma_addr, size_t *sgl_size)
/**
* genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
*
* Allocates memory for sgl and overlapping pages. Pages which might
* overlap other user-space memory blocks are being cached for DMAs,
* such that we do not run into syncronization issues. Data is copied
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
struct sg_entry *sgl;
*sgl_size = genwqe_sgl_size(num_pages);
if (get_order(*sgl_size) > MAX_ORDER) {
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld "
"fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
__func__, user_addr, user_size, sgl->nr_pages,
sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
sgl->user_addr = user_addr;
sgl->user_size = user_size;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
return NULL;
return -ENOMEM;
}
sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr);
if (sgl == NULL) {
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
&sgl->sgl_dma_addr);
if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
return NULL;
return -ENOMEM;
}
return sgl;
/* Only use buffering on incomplete pages */
if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
&sgl->fpage_dma_addr);
if (sgl->fpage == NULL)
goto err_out;
/* Sync with user memory */
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
user_addr, sgl->fpage_size)) {
rc = -EFAULT;
goto err_out;
}
}
if (sgl->lpage_size != 0) {
sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
&sgl->lpage_dma_addr);
if (sgl->lpage == NULL)
goto err_out1;
/* Sync with user memory */
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT;
goto err_out1;
}
}
return 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
return -ENOMEM;
}
int genwqe_setup_sgl(struct genwqe_dev *cd,
unsigned long offs,
unsigned long size,
struct sg_entry *sgl,
dma_addr_t dma_addr, size_t sgl_size,
dma_addr_t *dma_list, int page_offs, int num_pages)
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list)
{
int i = 0, j = 0, p;
unsigned long dma_offs, map_offs;
struct pci_dev *pci_dev = cd->pci_dev;
dma_addr_t prev_daddr = 0;
struct sg_entry *s, *last_s = NULL;
/* sanity checks */
if (offs > PAGE_SIZE) {
dev_err(&pci_dev->dev,
"[%s] too large start offs %08lx\n", __func__, offs);
return -EFAULT;
}
if (sgl_size < genwqe_sgl_size(num_pages)) {
dev_err(&pci_dev->dev,
"[%s] sgl_size too small %08lx for %d pages\n",
__func__, sgl_size, num_pages);
return -EFAULT;
}
size_t size = sgl->user_size;
dma_offs = 128; /* next block if needed/dma_offset */
map_offs = offs; /* offset in first page */
map_offs = sgl->fpage_offs; /* offset in first page */
s = &sgl[0]; /* first set of 8 entries */
s = &sgl->sgl[0]; /* first set of 8 entries */
p = 0; /* page */
while (p < num_pages) {
while (p < sgl->nr_pages) {
dma_addr_t daddr;
unsigned int size_to_map;
/* always write the chaining entry, cleanup is done later */
j = 0;
s[j].target_addr = cpu_to_be64(dma_addr + dma_offs);
s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
s[j].len = cpu_to_be32(128);
s[j].flags = cpu_to_be32(SG_CHAINED);
j++;
......@@ -343,7 +383,17 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
while (j < 8) {
/* DMA mapping for requested page, offs, size */
size_to_map = min(size, PAGE_SIZE - map_offs);
daddr = dma_list[page_offs + p] + map_offs;
if ((p == 0) && (sgl->fpage != NULL)) {
daddr = sgl->fpage_dma_addr + map_offs;
} else if ((p == sgl->nr_pages - 1) &&
(sgl->lpage != NULL)) {
daddr = sgl->lpage_dma_addr;
} else {
daddr = dma_list[p] + map_offs;
}
size -= size_to_map;
map_offs = 0;
......@@ -358,7 +408,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
size_to_map);
p++; /* process next page */
if (p == num_pages)
if (p == sgl->nr_pages)
goto fixup; /* nothing to do */
prev_daddr = daddr + size_to_map;
......@@ -374,7 +424,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
j++;
p++; /* process next page */
if (p == num_pages)
if (p == sgl->nr_pages)
goto fixup; /* nothing to do */
}
dma_offs += 128;
......@@ -395,10 +445,50 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
return 0;
}
void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
dma_addr_t dma_addr, size_t size)
/**
* genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
*
* After the DMA transfer has been completed we free the memory for
* the sgl and the cached pages. Data is being transfered from cached
* pages into user-space buffers.
*/
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
__genwqe_free_consistent(cd, size, sg_list, dma_addr);
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
sgl->fpage_size)) {
dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
__func__);
rc = -EFAULT;
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
sgl->fpage = NULL;
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
if (copy_to_user(sgl->user_addr + sgl->user_size -
sgl->lpage_size, sgl->lpage,
sgl->lpage_size)) {
dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
__func__);
rc = -EFAULT;
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
sgl->lpage = NULL;
sgl->lpage_dma_addr = 0;
}
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0x0;
sgl->sgl_size = 0;
return rc;
}
/**
......
......@@ -36,7 +36,7 @@
#include <asm/byteorder.h>
#include <linux/genwqe/genwqe_card.h>
#define DRV_VERS_STRING "2.0.0"
#define DRV_VERS_STRING "2.0.15"
/*
* Static minor number assignement, until we decide/implement
......
......@@ -115,6 +115,11 @@
#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
/* Host Firmware Status Registers in PCI Config Space */
#define PCI_CFG_HFS_1 0x40
#define PCI_CFG_HFS_2 0x48
/*
* MEI HW Section
*/
......
......@@ -455,8 +455,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
cl->status = 0;
list_del(&cb->list);
if (MEI_WRITING == cl->writing_state &&
cb->fop_type == MEI_FOP_WRITE &&
if (cb->fop_type == MEI_FOP_WRITE &&
cl != &dev->iamthif_cl) {
cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
cl->writing_state = MEI_WRITE_COMPLETE;
......
......@@ -644,8 +644,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
goto out;
}
if (MEI_WRITE_COMPLETE == cl->writing_state)
mask |= (POLLIN | POLLRDNORM);
mask |= (POLLIN | POLLRDNORM);
out:
mutex_unlock(&dev->device_lock);
......
......@@ -97,15 +97,31 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u32 reg;
if (ent->device == MEI_DEV_ID_PBG_1) {
pci_read_config_dword(pdev, 0x48, &reg);
/* make sure that bit 9 is up and bit 10 is down */
if ((reg & 0x600) == 0x200) {
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
return false;
}
/* Cougar Point || Patsburg */
if (ent->device == MEI_DEV_ID_CPT_1 ||
ent->device == MEI_DEV_ID_PBG_1) {
pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
if ((reg & 0x600) == 0x200)
goto no_mei;
}
/* Lynx Point */
if (ent->device == MEI_DEV_ID_LPT_H ||
ent->device == MEI_DEV_ID_LPT_W ||
ent->device == MEI_DEV_ID_LPT_HR) {
/* Read ME FW Status check for SPS Firmware */
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
/* if bits [19:16] = 15, running SPS Firmware */
if ((reg & 0xf0000) == 0xf0000)
goto no_mei;
}
return true;
no_mei:
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
return false;
}
/**
* mei_probe - Device Initialization Routine
......
......@@ -320,7 +320,7 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
struct pci_dev *pdev;
struct tsi148_driver *bridge;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
pdev = to_pci_dev(tsi148_bridge->parent);
bridge = tsi148_bridge->driver_priv;
......@@ -433,9 +433,7 @@ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
if (sync != 0) {
pdev = container_of(tsi148_bridge->parent,
struct pci_dev, dev);
pdev = to_pci_dev(tsi148_bridge->parent);
synchronize_irq(pdev->irq);
}
} else {
......@@ -741,7 +739,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
reg_join(vme_bound_high, vme_bound_low, &vme_bound);
reg_join(pci_offset_high, pci_offset_low, &pci_offset);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*pci_base = (dma_addr_t)(*vme_base + pci_offset);
*enabled = 0;
*aspace = 0;
......@@ -814,7 +812,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
tsi148_bridge = image->parent;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
pdev = to_pci_dev(tsi148_bridge->parent);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
......@@ -910,11 +908,15 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long pci_bound, vme_offset, pci_base;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
struct pci_bus_region region;
struct pci_dev *pdev;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
pdev = to_pci_dev(tsi148_bridge->parent);
/* Verify input data */
if (vme_base & 0xFFFF) {
dev_err(tsi148_bridge->parent, "Invalid VME Window "
......@@ -949,7 +951,9 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
pci_bound = 0;
vme_offset = 0;
} else {
pci_base = (unsigned long long)image->bus_resource.start;
pcibios_resource_to_bus(pdev->bus, &region,
&image->bus_resource);
pci_base = region.start;
/*
* Bound address is a valid address for the window, adjust
......@@ -2232,7 +2236,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size,
struct pci_dev *pdev;
/* Find pci_dev container of dev */
pdev = container_of(parent, struct pci_dev, dev);
pdev = to_pci_dev(parent);
return pci_alloc_consistent(pdev, size, dma);
}
......@@ -2243,7 +2247,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size,
struct pci_dev *pdev;
/* Find pci_dev container of dev */
pdev = container_of(parent, struct pci_dev, dev);
pdev = to_pci_dev(parent);
pci_free_consistent(pdev, size, vaddr, dma);
}
......
......@@ -614,27 +614,11 @@ static int w1_uevent(struct device *dev, struct kobj_uevent_env *env)
return err;
}
/*
* Handle sysfs file creation and removal here, before userspace is told that
* the device is added / removed from the system
*/
static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
void *data)
static int w1_family_notify(unsigned long action, struct w1_slave *sl)
{
struct device *dev = data;
struct w1_slave *sl;
struct w1_family_ops *fops;
int err;
/*
* Only care about slave devices at the moment. Yes, we should use a
* separate "type" for this, but for now, look at the release function
* to know which type it is...
*/
if (dev->release != w1_slave_release)
return 0;
sl = dev_to_w1_slave(dev);
fops = sl->family->fops;
if (!fops)
......@@ -673,10 +657,6 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
return 0;
}
static struct notifier_block w1_bus_nb = {
.notifier_call = w1_bus_notify,
};
static int __w1_attach_slave_device(struct w1_slave *sl)
{
int err;
......@@ -698,6 +678,9 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__,
dev_name(&sl->dev), sl);
/* suppress for w1_family_notify before sending KOBJ_ADD */
dev_set_uevent_suppress(&sl->dev, true);
err = device_register(&sl->dev);
if (err < 0) {
dev_err(&sl->dev,
......@@ -705,7 +688,7 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
dev_name(&sl->dev), err);
return err;
}
w1_family_notify(BUS_NOTIFY_ADD_DEVICE, sl);
dev_set_uevent_suppress(&sl->dev, false);
kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
......@@ -799,6 +782,7 @@ int w1_unref_slave(struct w1_slave *sl)
msg.type = W1_SLAVE_REMOVE;
w1_netlink_send(sl->master, &msg);
w1_family_notify(BUS_NOTIFY_DEL_DEVICE, sl);
device_unregister(&sl->dev);
#ifdef DEBUG
memset(sl, 0, sizeof(*sl));
......@@ -1186,10 +1170,6 @@ static int __init w1_init(void)
goto err_out_exit_init;
}
retval = bus_register_notifier(&w1_bus_type, &w1_bus_nb);
if (retval)
goto err_out_bus_unregister;
retval = driver_register(&w1_master_driver);
if (retval) {
printk(KERN_ERR
......
......@@ -300,12 +300,6 @@ static int w1_process_command_root(struct cn_msg *msg,
struct w1_netlink_msg *w;
u32 *id;
if (mcmd->type != W1_LIST_MASTERS) {
printk(KERN_NOTICE "%s: msg: %x.%x, wrong type: %u, len: %u.\n",
__func__, msg->id.idx, msg->id.val, mcmd->type, mcmd->len);
return -EPROTO;
}
cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!cn)
return -ENOMEM;
......@@ -441,6 +435,9 @@ static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
w1_netlink_send_error(&node->block->msg, node->m, cmd,
node->block->portid, err);
/* ref taken in w1_search_slave or w1_search_master_id when building
* the block
*/
if (sl)
w1_unref_slave(sl);
else
......@@ -503,30 +500,42 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
msg_len = msg->len;
while (msg_len && !err) {
struct w1_reg_num id;
u16 mlen = m->len;
dev = NULL;
sl = NULL;
memcpy(&id, m->id.id, sizeof(id));
#if 0
printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n",
__func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len);
#endif
if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
/* execute on this thread, no need to process later */
if (m->type == W1_LIST_MASTERS) {
err = w1_process_command_root(msg, m, nsp->portid);
goto out_cont;
}
/* All following message types require additional data,
* check here before references are taken.
*/
if (!m->len) {
err = -EPROTO;
goto out_cont;
}
/* both search calls take reference counts */
if (m->type == W1_MASTER_CMD) {
dev = w1_search_master_id(m->id.mst.id);
} else if (m->type == W1_SLAVE_CMD) {
sl = w1_search_slave(&id);
sl = w1_search_slave((struct w1_reg_num *)m->id.id);
if (sl)
dev = sl->master;
} else {
err = w1_process_command_root(msg, m, nsp->portid);
printk(KERN_NOTICE
"%s: msg: %x.%x, wrong type: %u, len: %u.\n",
__func__, msg->id.idx, msg->id.val,
m->type, m->len);
err = -EPROTO;
goto out_cont;
}
......@@ -536,8 +545,6 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
}
err = 0;
if (!mlen)
goto out_cont;
atomic_inc(&block->refcnt);
node->async.cb = w1_process_cb;
......@@ -557,7 +564,8 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (err)
w1_netlink_send_error(msg, m, NULL, nsp->portid, err);
msg_len -= sizeof(struct w1_netlink_msg) + m->len;
m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
m = (struct w1_netlink_msg *)(((u8 *)m) +
sizeof(struct w1_netlink_msg) + m->len);
/*
* Let's allow requests for nonexisting devices.
......
......@@ -147,15 +147,17 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
* 0 . 13 (Windows Server 2008)
* 1 . 1 (Windows 7)
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
#define VERSION_INVAL -1
#define VERSION_CURRENT VERSION_WIN8
#define VERSION_CURRENT VERSION_WIN8_1
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
......
......@@ -305,6 +305,7 @@ enum hv_kvp_exchg_pool {
#define HV_ERROR_DEVICE_NOT_CONNECTED 0x8007048F
#define HV_INVALIDARG 0x80070057
#define HV_GUID_NOTFOUND 0x80041002
#define HV_ERROR_ALREADY_EXISTS 0x80070050
#define ADDR_FAMILY_NONE 0x00
#define ADDR_FAMILY_IPV4 0x01
......
......@@ -82,8 +82,10 @@ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
if (!access(target_fname, F_OK)) {
syslog(LOG_INFO, "File: %s exists", target_fname);
if (!smsg->copy_flags & OVER_WRITE)
if (!(smsg->copy_flags & OVER_WRITE)) {
error = HV_ERROR_ALREADY_EXISTS;
goto done;
}
}
target_fd = open(target_fname, O_RDWR | O_CREAT | O_CLOEXEC, 0744);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment