Commit 4a9bf88a authored by Dan Williams's avatar Dan Williams

Merge branch 'pmem-api' into libnvdimm-for-next

parents a06a7576 67a3e8fe
...@@ -397,7 +397,8 @@ prototypes: ...@@ -397,7 +397,8 @@ prototypes:
int (*release) (struct gendisk *, fmode_t); int (*release) (struct gendisk *, fmode_t);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); int (*direct_access) (struct block_device *, sector_t, void __pmem **,
unsigned long *);
int (*media_changed) (struct gendisk *); int (*media_changed) (struct gendisk *);
void (*unlock_native_capacity) (struct gendisk *); void (*unlock_native_capacity) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *); int (*revalidate_disk) (struct gendisk *);
......
...@@ -6161,6 +6161,7 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ ...@@ -6161,6 +6161,7 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported S: Supported
F: drivers/nvdimm/pmem.c F: drivers/nvdimm/pmem.c
F: include/linux/pmem.h F: include/linux/pmem.h
F: arch/*/include/asm/pmem.h
LINUX FOR IBM pSERIES (RS/6000) LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com> M: Paul Mackerras <paulus@au.ibm.com>
......
...@@ -95,7 +95,7 @@ static struct physmap_flash_data cdb89712_bootrom_pdata __initdata = { ...@@ -95,7 +95,7 @@ static struct physmap_flash_data cdb89712_bootrom_pdata __initdata = {
static struct resource cdb89712_bootrom_resources[] __initdata = { static struct resource cdb89712_bootrom_resources[] __initdata = {
DEFINE_RES_NAMED(CS7_PHYS_BASE, SZ_128, "BOOTROM", IORESOURCE_MEM | DEFINE_RES_NAMED(CS7_PHYS_BASE, SZ_128, "BOOTROM", IORESOURCE_MEM |
IORESOURCE_CACHEABLE | IORESOURCE_READONLY), IORESOURCE_READONLY),
}; };
static struct platform_device cdb89712_bootrom_pdev __initdata = { static struct platform_device cdb89712_bootrom_pdev __initdata = {
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/io.h> #include <linux/io.h>
#include "pm-rcar.h" #include "pm-rcar.h"
/* SYSC */ /* SYSC */
......
...@@ -435,6 +435,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo ...@@ -435,6 +435,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
{ {
return ioremap(phys_addr, size); return ioremap(phys_addr, size);
} }
#define ioremap_cache ioremap_cache
/* /*
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <asm/io.h> #include <linux/io.h>
/* IBM Summit (EXA) Cyclone counter code*/ /* IBM Summit (EXA) Cyclone counter code*/
#define CYCLONE_CBAR_ADDR 0xFEB00CD0 #define CYCLONE_CBAR_ADDR 0xFEB00CD0
......
...@@ -102,7 +102,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) ...@@ -102,7 +102,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) { } else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE]; res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; flags |= IORESOURCE_READONLY;
} else { } else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue; continue;
......
...@@ -141,13 +141,14 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) ...@@ -141,13 +141,14 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/ */
static long static long
axon_ram_direct_access(struct block_device *device, sector_t sector, axon_ram_direct_access(struct block_device *device, sector_t sector,
void **kaddr, unsigned long *pfn, long size) void __pmem **kaddr, unsigned long *pfn, long size)
{ {
struct axon_ram_bank *bank = device->bd_disk->private_data; struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
void *addr = (void *)(bank->ph_addr + offset);
*kaddr = (void *)(bank->ph_addr + offset); *kaddr = (void __pmem *)addr;
*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; *pfn = virt_to_phys(addr) >> PAGE_SHIFT;
return bank->size - offset; return bank->size - offset;
} }
......
...@@ -342,6 +342,7 @@ ioremap_cache(phys_addr_t offset, unsigned long size) ...@@ -342,6 +342,7 @@ ioremap_cache(phys_addr_t offset, unsigned long size)
{ {
return __ioremap_mode(offset, size, PAGE_KERNEL); return __ioremap_mode(offset, size, PAGE_KERNEL);
} }
#define ioremap_cache ioremap_cache
#ifdef CONFIG_HAVE_IOREMAP_PROT #ifdef CONFIG_HAVE_IOREMAP_PROT
static inline void __iomem * static inline void __iomem *
......
...@@ -231,8 +231,7 @@ static void pci_parse_of_addrs(struct platform_device *op, ...@@ -231,8 +231,7 @@ static void pci_parse_of_addrs(struct platform_device *op,
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) { } else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE]; res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
| IORESOURCE_SIZEALIGN;
} else { } else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue; continue;
......
...@@ -28,6 +28,7 @@ config X86 ...@@ -28,6 +28,7 @@ config X86
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_PMEM_API select ARCH_HAS_PMEM_API
select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
......
...@@ -89,6 +89,8 @@ int set_pages_rw(struct page *page, int numpages); ...@@ -89,6 +89,8 @@ int set_pages_rw(struct page *page, int numpages);
void clflush_cache_range(void *addr, unsigned int size); void clflush_cache_range(void *addr, unsigned int size);
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
#ifdef CONFIG_DEBUG_RODATA #ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void); void mark_rodata_ro(void);
extern const int rodata_test_data; extern const int rodata_test_data;
...@@ -109,75 +111,4 @@ static inline int rodata_test(void) ...@@ -109,75 +111,4 @@ static inline int rodata_test(void)
} }
#endif #endif
#ifdef ARCH_HAS_NOCACHE_UACCESS
/**
* arch_memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Copy data to persistent memory media via non-temporal stores so that
* a subsequent arch_wmb_pmem() can flush cpu and memory controller
* write buffers to guarantee durability.
*/
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t n)
{
int unwritten;
/*
* We are copying between two kernel buffers, if
* __copy_from_user_inatomic_nocache() returns an error (page
* fault) we would have already reported a general protection fault
* before the WARN+BUG.
*/
unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
(void __user *) src, n);
if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
__func__, dst, src, unwritten))
BUG();
}
/**
* arch_wmb_pmem - synchronize writes to persistent memory
*
* After a series of arch_memcpy_to_pmem() operations this drains data
* from cpu write buffers and any platform (memory controller) buffers
* to ensure that written data is durable on persistent memory media.
*/
static inline void arch_wmb_pmem(void)
{
/*
* wmb() to 'sfence' all previous writes such that they are
* architecturally visible to 'pcommit'. Note, that we've
* already arranged for pmem writes to avoid the cache via
* arch_memcpy_to_pmem().
*/
wmb();
pcommit_sfence();
}
static inline bool __arch_has_wmb_pmem(void)
{
#ifdef CONFIG_X86_64
/*
* We require that wmb() be an 'sfence', that is only guaranteed on
* 64-bit builds
*/
return static_cpu_has(X86_FEATURE_PCOMMIT);
#else
return false;
#endif
}
#else /* ARCH_HAS_NOCACHE_UACCESS i.e. ARCH=um */
extern void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n);
extern void arch_wmb_pmem(void);
static inline bool __arch_has_wmb_pmem(void)
{
return false;
}
#endif
#endif /* _ASM_X86_CACHEFLUSH_H */ #endif /* _ASM_X86_CACHEFLUSH_H */
...@@ -248,12 +248,6 @@ static inline void flush_write_buffers(void) ...@@ -248,12 +248,6 @@ static inline void flush_write_buffers(void)
#endif #endif
} }
static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
unsigned long size)
{
return (void __force __pmem *) ioremap_cache(offset, size);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
extern void native_io_delay(void); extern void native_io_delay(void);
......
/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __ASM_X86_PMEM_H__
#define __ASM_X86_PMEM_H__
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
#ifdef CONFIG_ARCH_HAS_PMEM_API
/**
* arch_memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Copy data to persistent memory media via non-temporal stores so that
* a subsequent arch_wmb_pmem() can flush cpu and memory controller
* write buffers to guarantee durability.
*/
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t n)
{
int unwritten;
/*
* We are copying between two kernel buffers, if
* __copy_from_user_inatomic_nocache() returns an error (page
* fault) we would have already reported a general protection fault
* before the WARN+BUG.
*/
unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
(void __user *) src, n);
if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
__func__, dst, src, unwritten))
BUG();
}
/**
* arch_wmb_pmem - synchronize writes to persistent memory
*
* After a series of arch_memcpy_to_pmem() operations this drains data
* from cpu write buffers and any platform (memory controller) buffers
* to ensure that written data is durable on persistent memory media.
*/
static inline void arch_wmb_pmem(void)
{
/*
* wmb() to 'sfence' all previous writes such that they are
* architecturally visible to 'pcommit'. Note, that we've
* already arranged for pmem writes to avoid the cache via
* arch_memcpy_to_pmem().
*/
wmb();
pcommit_sfence();
}
/**
* __arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. This function requires explicit ordering with an
* arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
*/
static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = vaddr + size;
void *p;
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
/*
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
*/
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
{
return iter_is_iovec(i) == false;
}
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* This function requires explicit ordering with an arch_wmb_pmem() call.
*/
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
void *vaddr = (void __force *)addr;
size_t len;
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(vaddr, bytes, i);
if (__iter_needs_pmem_wb(i))
__arch_wb_cache_pmem(vaddr, bytes);
return len;
}
/**
* arch_clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with an arch_wmb_pmem() call.
*/
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{
void *vaddr = (void __force *)addr;
/* TODO: implement the zeroing via non-temporal writes */
if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
clear_page(vaddr);
else
memset(vaddr, 0, size);
__arch_wb_cache_pmem(vaddr, size);
}
static inline bool arch_has_wmb_pmem(void)
{
#ifdef CONFIG_X86_64
/*
* We require that wmb() be an 'sfence', that is only guaranteed on
* 64-bit builds
*/
return static_cpu_has(X86_FEATURE_PCOMMIT);
#else
return false;
#endif
}
#endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */
...@@ -57,6 +57,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset, ...@@ -57,6 +57,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
else else
BUG(); BUG();
} }
#define ioremap_cache ioremap_cache
#define ioremap_wc ioremap_nocache #define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache #define ioremap_wt ioremap_nocache
......
...@@ -410,6 +410,7 @@ config ACPI_NFIT ...@@ -410,6 +410,7 @@ config ACPI_NFIT
tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
depends on PHYS_ADDR_T_64BIT depends on PHYS_ADDR_T_64BIT
depends on BLK_DEV depends on BLK_DEV
depends on ARCH_HAS_MMIO_FLUSH
select LIBNVDIMM select LIBNVDIMM
help help
Infrastructure to probe ACPI 6 compliant platforms for Infrastructure to probe ACPI 6 compliant platforms for
......
...@@ -1017,7 +1017,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) ...@@ -1017,7 +1017,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
if (mmio->num_lines) if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio); offset = to_interleave_offset(offset, mmio);
return readq(mmio->base + offset); return readq(mmio->addr.base + offset);
} }
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
...@@ -1042,11 +1042,11 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, ...@@ -1042,11 +1042,11 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
if (mmio->num_lines) if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio); offset = to_interleave_offset(offset, mmio);
writeq(cmd, mmio->base + offset); writeq(cmd, mmio->addr.base + offset);
wmb_blk(nfit_blk); wmb_blk(nfit_blk);
if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
readq(mmio->base + offset); readq(mmio->addr.base + offset);
} }
static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
...@@ -1078,11 +1078,16 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, ...@@ -1078,11 +1078,16 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
} }
if (rw) if (rw)
memcpy_to_pmem(mmio->aperture + offset, memcpy_to_pmem(mmio->addr.aperture + offset,
iobuf + copied, c); iobuf + copied, c);
else else {
if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
mmio_flush_range((void __force *)
mmio->addr.aperture + offset, c);
memcpy_from_pmem(iobuf + copied, memcpy_from_pmem(iobuf + copied,
mmio->aperture + offset, c); mmio->addr.aperture + offset, c);
}
copied += c; copied += c;
len -= c; len -= c;
...@@ -1129,7 +1134,10 @@ static void nfit_spa_mapping_release(struct kref *kref) ...@@ -1129,7 +1134,10 @@ static void nfit_spa_mapping_release(struct kref *kref)
WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
iounmap(spa_map->iomem); if (spa_map->type == SPA_MAP_APERTURE)
memunmap((void __force *)spa_map->addr.aperture);
else
iounmap(spa_map->addr.base);
release_mem_region(spa->address, spa->length); release_mem_region(spa->address, spa->length);
list_del(&spa_map->list); list_del(&spa_map->list);
kfree(spa_map); kfree(spa_map);
...@@ -1175,7 +1183,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, ...@@ -1175,7 +1183,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
spa_map = find_spa_mapping(acpi_desc, spa); spa_map = find_spa_mapping(acpi_desc, spa);
if (spa_map) { if (spa_map) {
kref_get(&spa_map->kref); kref_get(&spa_map->kref);
return spa_map->iomem; return spa_map->addr.base;
} }
spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
...@@ -1191,20 +1199,19 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, ...@@ -1191,20 +1199,19 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
if (!res) if (!res)
goto err_mem; goto err_mem;
if (type == SPA_MAP_APERTURE) { spa_map->type = type;
/* if (type == SPA_MAP_APERTURE)
* TODO: memremap_pmem() support, but that requires cache spa_map->addr.aperture = (void __pmem *)memremap(start, n,
* flushing when the aperture is moved. ARCH_MEMREMAP_PMEM);
*/ else
spa_map->iomem = ioremap_wc(start, n); spa_map->addr.base = ioremap_nocache(start, n);
} else
spa_map->iomem = ioremap_nocache(start, n);
if (!spa_map->iomem) if (!spa_map->addr.base)
goto err_map; goto err_map;
list_add_tail(&spa_map->list, &acpi_desc->spa_maps); list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
return spa_map->iomem; return spa_map->addr.base;
err_map: err_map:
release_mem_region(start, n); release_mem_region(start, n);
...@@ -1267,7 +1274,7 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, ...@@ -1267,7 +1274,7 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
nfit_blk->dimm_flags = flags.flags; nfit_blk->dimm_flags = flags.flags;
else if (rc == -ENOTTY) { else if (rc == -ENOTTY) {
/* fall back to a conservative default */ /* fall back to a conservative default */
nfit_blk->dimm_flags = ND_BLK_DCR_LATCH; nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
rc = 0; rc = 0;
} else } else
rc = -ENXIO; rc = -ENXIO;
...@@ -1307,9 +1314,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, ...@@ -1307,9 +1314,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
/* map block aperture memory */ /* map block aperture memory */
nfit_blk->bdw_offset = nfit_mem->bdw->offset; nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW]; mmio = &nfit_blk->mmio[BDW];
mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
SPA_MAP_APERTURE); SPA_MAP_APERTURE);
if (!mmio->base) { if (!mmio->addr.base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm)); nvdimm_name(nvdimm));
return -ENOMEM; return -ENOMEM;
...@@ -1330,9 +1337,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, ...@@ -1330,9 +1337,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
nfit_blk->stat_offset = nfit_mem->dcr->status_offset; nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
mmio = &nfit_blk->mmio[DCR]; mmio = &nfit_blk->mmio[DCR];
mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
SPA_MAP_CONTROL); SPA_MAP_CONTROL);
if (!mmio->base) { if (!mmio->addr.base) {
dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
nvdimm_name(nvdimm)); nvdimm_name(nvdimm));
return -ENOMEM; return -ENOMEM;
...@@ -1399,7 +1406,7 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, ...@@ -1399,7 +1406,7 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
if (mmio->base) if (mmio->addr.base)
nfit_spa_unmap(acpi_desc, mmio->spa); nfit_spa_unmap(acpi_desc, mmio->spa);
} }
nd_blk_region_set_provider_data(ndbr, NULL); nd_blk_region_set_provider_data(ndbr, NULL);
......
...@@ -41,6 +41,7 @@ enum nfit_uuids { ...@@ -41,6 +41,7 @@ enum nfit_uuids {
}; };
enum { enum {
ND_BLK_READ_FLUSH = 1,
ND_BLK_DCR_LATCH = 2, ND_BLK_DCR_LATCH = 2,
}; };
...@@ -117,12 +118,16 @@ enum nd_blk_mmio_selector { ...@@ -117,12 +118,16 @@ enum nd_blk_mmio_selector {
DCR, DCR,
}; };
struct nd_blk_addr {
union {
void __iomem *base;
void __pmem *aperture;
};
};
struct nfit_blk { struct nfit_blk {
struct nfit_blk_mmio { struct nfit_blk_mmio {
union { struct nd_blk_addr addr;
void __iomem *base;
void __pmem *aperture;
};
u64 size; u64 size;
u64 base_offset; u64 base_offset;
u32 line_size; u32 line_size;
...@@ -149,7 +154,8 @@ struct nfit_spa_mapping { ...@@ -149,7 +154,8 @@ struct nfit_spa_mapping {
struct acpi_nfit_system_address *spa; struct acpi_nfit_system_address *spa;
struct list_head list; struct list_head list;
struct kref kref; struct kref kref;
void __iomem *iomem; enum spa_map_type type;
struct nd_blk_addr addr;
}; };
static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref)
......
...@@ -371,7 +371,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, ...@@ -371,7 +371,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX #ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector, static long brd_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, unsigned long *pfn, long size) void __pmem **kaddr, unsigned long *pfn, long size)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page; struct page *page;
...@@ -381,7 +381,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector, ...@@ -381,7 +381,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
page = brd_insert_page(brd, sector); page = brd_insert_page(brd, sector);
if (!page) if (!page)
return -ENOSPC; return -ENOSPC;
*kaddr = page_address(page); *kaddr = (void __pmem *)page_address(page);
*pfn = page_to_pfn(page); *pfn = page_to_pfn(page);
/* /*
......
...@@ -38,7 +38,7 @@ typedef struct icn_cdef { ...@@ -38,7 +38,7 @@ typedef struct icn_cdef {
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/major.h> #include <linux/major.h>
#include <asm/io.h> #include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/slab.h> #include <linux/slab.h>
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/ioctl.h> #include <linux/ioctl.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/io.h> #include <linux/io.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/rslib.h> #include <linux/rslib.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/io.h> #include <linux/io.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h> #include <linux/mtd/nand.h>
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h> #include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <asm/io.h> #include <linux/io.h>
/* /*
* Note: Driver name and platform data format have been updated! * Note: Driver name and platform data format have been updated!
......
...@@ -92,7 +92,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, ...@@ -92,7 +92,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
} }
static long pmem_direct_access(struct block_device *bdev, sector_t sector, static long pmem_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, unsigned long *pfn, long size) void __pmem **kaddr, unsigned long *pfn, long size)
{ {
struct pmem_device *pmem = bdev->bd_disk->private_data; struct pmem_device *pmem = bdev->bd_disk->private_data;
size_t offset = sector << 9; size_t offset = sector << 9;
...@@ -101,7 +101,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector, ...@@ -101,7 +101,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector,
return -ENODEV; return -ENODEV;
/* FIXME convert DAX to comprehend that this mapping has a lifetime */ /* FIXME convert DAX to comprehend that this mapping has a lifetime */
*kaddr = (void __force *) pmem->virt_addr + offset; *kaddr = pmem->virt_addr + offset;
*pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT; *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
return pmem->size - offset; return pmem->size - offset;
...@@ -119,7 +119,7 @@ static struct pmem_device *pmem_alloc(struct device *dev, ...@@ -119,7 +119,7 @@ static struct pmem_device *pmem_alloc(struct device *dev,
{ {
struct pmem_device *pmem; struct pmem_device *pmem;
pmem = kzalloc(sizeof(*pmem), GFP_KERNEL); pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem) if (!pmem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -128,19 +128,16 @@ static struct pmem_device *pmem_alloc(struct device *dev, ...@@ -128,19 +128,16 @@ static struct pmem_device *pmem_alloc(struct device *dev,
if (!arch_has_pmem_api()) if (!arch_has_pmem_api())
dev_warn(dev, "unable to guarantee persistence of writes\n"); dev_warn(dev, "unable to guarantee persistence of writes\n");
if (!request_mem_region(pmem->phys_addr, pmem->size, dev_name(dev))) { if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
dev_name(dev))) {
dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
&pmem->phys_addr, pmem->size); &pmem->phys_addr, pmem->size);
kfree(pmem);
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
} }
pmem->virt_addr = memremap_pmem(pmem->phys_addr, pmem->size); pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr, pmem->size);
if (!pmem->virt_addr) { if (!pmem->virt_addr)
release_mem_region(pmem->phys_addr, pmem->size);
kfree(pmem);
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
}
return pmem; return pmem;
} }
...@@ -210,20 +207,12 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns, ...@@ -210,20 +207,12 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
return 0; return 0;
} }
static void pmem_free(struct pmem_device *pmem)
{
memunmap_pmem(pmem->virt_addr);
release_mem_region(pmem->phys_addr, pmem->size);
kfree(pmem);
}
static int nd_pmem_probe(struct device *dev) static int nd_pmem_probe(struct device *dev)
{ {
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_common *ndns; struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio; struct nd_namespace_io *nsio;
struct pmem_device *pmem; struct pmem_device *pmem;
int rc;
ndns = nvdimm_namespace_common_probe(dev); ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns)) if (IS_ERR(ndns))
...@@ -236,16 +225,14 @@ static int nd_pmem_probe(struct device *dev) ...@@ -236,16 +225,14 @@ static int nd_pmem_probe(struct device *dev)
dev_set_drvdata(dev, pmem); dev_set_drvdata(dev, pmem);
ndns->rw_bytes = pmem_rw_bytes; ndns->rw_bytes = pmem_rw_bytes;
if (is_nd_btt(dev)) if (is_nd_btt(dev))
rc = nvdimm_namespace_attach_btt(ndns); return nvdimm_namespace_attach_btt(ndns);
else if (nd_btt_probe(ndns, pmem) == 0) {
if (nd_btt_probe(ndns, pmem) == 0)
/* we'll come back as btt-pmem */ /* we'll come back as btt-pmem */
rc = -ENXIO; return -ENXIO;
} else return pmem_attach_disk(ndns, pmem);
rc = pmem_attach_disk(ndns, pmem);
if (rc)
pmem_free(pmem);
return rc;
} }
static int nd_pmem_remove(struct device *dev) static int nd_pmem_remove(struct device *dev)
...@@ -256,7 +243,6 @@ static int nd_pmem_remove(struct device *dev) ...@@ -256,7 +243,6 @@ static int nd_pmem_remove(struct device *dev)
nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns); nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
else else
pmem_detach_disk(pmem); pmem_detach_disk(pmem);
pmem_free(pmem);
return 0; return 0;
} }
......
...@@ -326,8 +326,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) ...@@ -326,8 +326,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
dev->rom_base_reg = rom; dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_CACHEABLE | IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
IORESOURCE_SIZEALIGN;
__pci_read_base(dev, pci_bar_mem32, res, rom); __pci_read_base(dev, pci_bar_mem32, res, rom);
} }
} }
......
...@@ -97,8 +97,6 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) ...@@ -97,8 +97,6 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
/* ??? rule->flags restricted to 8 bits, all tests bogus ??? */ /* ??? rule->flags restricted to 8 bits, all tests bogus ??? */
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
res->flags |= IORESOURCE_READONLY; res->flags |= IORESOURCE_READONLY;
if (rule->flags & IORESOURCE_MEM_CACHEABLE)
res->flags |= IORESOURCE_CACHEABLE;
if (rule->flags & IORESOURCE_MEM_RANGELENGTH) if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
res->flags |= IORESOURCE_RANGELENGTH; res->flags |= IORESOURCE_RANGELENGTH;
if (rule->flags & IORESOURCE_MEM_SHADOWABLE) if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
......
...@@ -29,7 +29,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); ...@@ -29,7 +29,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_make_request(struct request_queue *q, struct bio *bio); static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn, long size); void __pmem **kaddr, unsigned long *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
...@@ -879,18 +879,20 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -879,18 +879,20 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
static long static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum, dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn, long size) void __pmem **kaddr, unsigned long *pfn, long size)
{ {
struct dcssblk_dev_info *dev_info; struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz; unsigned long offset, dev_sz;
void *addr;
dev_info = bdev->bd_disk->private_data; dev_info = bdev->bd_disk->private_data;
if (!dev_info) if (!dev_info)
return -ENODEV; return -ENODEV;
dev_sz = dev_info->end - dev_info->start; dev_sz = dev_info->end - dev_info->start;
offset = secnum * 512; offset = secnum * 512;
*kaddr = (void *) (dev_info->start + offset); addr = (void *) (dev_info->start + offset);
*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; *pfn = virt_to_phys(addr) >> PAGE_SHIFT;
*kaddr = (void __pmem *) addr;
return dev_sz - offset; return dev_sz - offset;
} }
......
...@@ -100,12 +100,7 @@ static int asd_map_memio(struct asd_ha_struct *asd_ha) ...@@ -100,12 +100,7 @@ static int asd_map_memio(struct asd_ha_struct *asd_ha)
pci_name(asd_ha->pcidev)); pci_name(asd_ha->pcidev));
goto Err; goto Err;
} }
if (io_handle->flags & IORESOURCE_CACHEABLE) io_handle->addr = ioremap(io_handle->start, io_handle->len);
io_handle->addr = ioremap(io_handle->start,
io_handle->len);
else
io_handle->addr = ioremap_nocache(io_handle->start,
io_handle->len);
if (!io_handle->addr) { if (!io_handle->addr) {
asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
pci_name(asd_ha->pcidev)); pci_name(asd_ha->pcidev));
......
...@@ -259,10 +259,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) ...@@ -259,10 +259,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
addr = (unsigned long)pci_resource_start(pdev, 0); addr = (unsigned long)pci_resource_start(pdev, 0);
range = pci_resource_len(pdev, 0); range = pci_resource_len(pdev, 0);
flags = pci_resource_flags(pdev, 0); flags = pci_resource_flags(pdev, 0);
if (flags & IORESOURCE_CACHEABLE) mem_base0 = ioremap(addr, range);
mem_base0 = ioremap(addr, range);
else
mem_base0 = ioremap_nocache(addr, range);
if (!mem_base0) { if (!mem_base0) {
pr_notice("arcmsr%d: memory mapping region fail\n", pr_notice("arcmsr%d: memory mapping region fail\n",
acb->host->host_no); acb->host->host_no);
......
...@@ -324,13 +324,9 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) ...@@ -324,13 +324,9 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
goto err_out; goto err_out;
res_flag_ex = pci_resource_flags(pdev, bar_ex); res_flag_ex = pci_resource_flags(pdev, bar_ex);
if (res_flag_ex & IORESOURCE_MEM) { if (res_flag_ex & IORESOURCE_MEM)
if (res_flag_ex & IORESOURCE_CACHEABLE) mvi->regs_ex = ioremap(res_start, res_len);
mvi->regs_ex = ioremap(res_start, res_len); else
else
mvi->regs_ex = ioremap_nocache(res_start,
res_len);
} else
mvi->regs_ex = (void *)res_start; mvi->regs_ex = (void *)res_start;
if (!mvi->regs_ex) if (!mvi->regs_ex)
goto err_out; goto err_out;
...@@ -342,10 +338,7 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) ...@@ -342,10 +338,7 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
goto err_out; goto err_out;
res_flag = pci_resource_flags(pdev, bar); res_flag = pci_resource_flags(pdev, bar);
if (res_flag & IORESOURCE_CACHEABLE) mvi->regs = ioremap(res_start, res_len);
mvi->regs = ioremap(res_start, res_len);
else
mvi->regs = ioremap_nocache(res_start, res_len);
if (!mvi->regs) { if (!mvi->regs) {
if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/sun3x.h> #include <asm/sun3x.h>
#include <asm/io.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/dvma.h> #include <asm/dvma.h>
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/io.h>
#include "../comedidev.h" #include "../comedidev.h"
/* /*
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
*/ */
#include <linux/uuid.h> #include <linux/uuid.h>
#include <linux/io.h>
#include "version.h" #include "version.h"
#include "visorbus.h" #include "visorbus.h"
...@@ -36,7 +37,7 @@ static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID; ...@@ -36,7 +37,7 @@ static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID;
struct visorchannel { struct visorchannel {
u64 physaddr; u64 physaddr;
ulong nbytes; ulong nbytes;
void __iomem *mapped; void *mapped;
bool requested; bool requested;
struct channel_header chan_hdr; struct channel_header chan_hdr;
uuid_le guid; uuid_le guid;
...@@ -93,7 +94,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, ...@@ -93,7 +94,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
} }
} }
channel->mapped = ioremap_cache(physaddr, size); channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
if (!channel->mapped) { if (!channel->mapped) {
release_mem_region(physaddr, size); release_mem_region(physaddr, size);
goto cleanup; goto cleanup;
...@@ -113,7 +114,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, ...@@ -113,7 +114,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
if (uuid_le_cmp(guid, NULL_UUID_LE) == 0) if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
guid = channel->chan_hdr.chtype; guid = channel->chan_hdr.chtype;
iounmap(channel->mapped); memunmap(channel->mapped);
if (channel->requested) if (channel->requested)
release_mem_region(channel->physaddr, channel->nbytes); release_mem_region(channel->physaddr, channel->nbytes);
channel->mapped = NULL; channel->mapped = NULL;
...@@ -126,7 +127,8 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, ...@@ -126,7 +127,8 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
} }
} }
channel->mapped = ioremap_cache(channel->physaddr, channel_bytes); channel->mapped = memremap(channel->physaddr, channel_bytes,
MEMREMAP_WB);
if (!channel->mapped) { if (!channel->mapped) {
release_mem_region(channel->physaddr, channel_bytes); release_mem_region(channel->physaddr, channel_bytes);
goto cleanup; goto cleanup;
...@@ -167,7 +169,7 @@ visorchannel_destroy(struct visorchannel *channel) ...@@ -167,7 +169,7 @@ visorchannel_destroy(struct visorchannel *channel)
if (!channel) if (!channel)
return; return;
if (channel->mapped) { if (channel->mapped) {
iounmap(channel->mapped); memunmap(channel->mapped);
if (channel->requested) if (channel->requested)
release_mem_region(channel->physaddr, channel->nbytes); release_mem_region(channel->physaddr, channel->nbytes);
} }
...@@ -241,7 +243,7 @@ visorchannel_read(struct visorchannel *channel, ulong offset, ...@@ -241,7 +243,7 @@ visorchannel_read(struct visorchannel *channel, ulong offset,
if (offset + nbytes > channel->nbytes) if (offset + nbytes > channel->nbytes)
return -EIO; return -EIO;
memcpy_fromio(local, channel->mapped + offset, nbytes); memcpy(local, channel->mapped + offset, nbytes);
return 0; return 0;
} }
...@@ -262,7 +264,7 @@ visorchannel_write(struct visorchannel *channel, ulong offset, ...@@ -262,7 +264,7 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
memcpy(&channel->chan_hdr + offset, local, copy_size); memcpy(&channel->chan_hdr + offset, local, copy_size);
} }
memcpy_toio(channel->mapped + offset, local, nbytes); memcpy(channel->mapped + offset, local, nbytes);
return 0; return 0;
} }
......
...@@ -119,7 +119,7 @@ static struct visorchannel *controlvm_channel; ...@@ -119,7 +119,7 @@ static struct visorchannel *controlvm_channel;
/* Manages the request payload in the controlvm channel */ /* Manages the request payload in the controlvm channel */
struct visor_controlvm_payload_info { struct visor_controlvm_payload_info {
u8 __iomem *ptr; /* pointer to base address of payload pool */ u8 *ptr; /* pointer to base address of payload pool */
u64 offset; /* offset from beginning of controlvm u64 offset; /* offset from beginning of controlvm
* channel to beginning of payload * pool */ * channel to beginning of payload * pool */
u32 bytes; /* number of bytes in payload pool */ u32 bytes; /* number of bytes in payload pool */
...@@ -401,21 +401,22 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) ...@@ -401,21 +401,22 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
p = __va((unsigned long) (addr)); p = __va((unsigned long) (addr));
memcpy(ctx->data, p, bytes); memcpy(ctx->data, p, bytes);
} else { } else {
void __iomem *mapping; void *mapping;
if (!request_mem_region(addr, bytes, "visorchipset")) { if (!request_mem_region(addr, bytes, "visorchipset")) {
rc = NULL; rc = NULL;
goto cleanup; goto cleanup;
} }
mapping = ioremap_cache(addr, bytes); mapping = memremap(addr, bytes, MEMREMAP_WB);
if (!mapping) { if (!mapping) {
release_mem_region(addr, bytes); release_mem_region(addr, bytes);
rc = NULL; rc = NULL;
goto cleanup; goto cleanup;
} }
memcpy_fromio(ctx->data, mapping, bytes); memcpy(ctx->data, mapping, bytes);
release_mem_region(addr, bytes); release_mem_region(addr, bytes);
memunmap(mapping);
} }
ctx->byte_stream = true; ctx->byte_stream = true;
...@@ -1327,7 +1328,7 @@ static int ...@@ -1327,7 +1328,7 @@ static int
initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
struct visor_controlvm_payload_info *info) struct visor_controlvm_payload_info *info)
{ {
u8 __iomem *payload = NULL; u8 *payload = NULL;
int rc = CONTROLVM_RESP_SUCCESS; int rc = CONTROLVM_RESP_SUCCESS;
if (!info) { if (!info) {
...@@ -1339,7 +1340,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, ...@@ -1339,7 +1340,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
goto cleanup; goto cleanup;
} }
payload = ioremap_cache(phys_addr + offset, bytes); payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
if (!payload) { if (!payload) {
rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
goto cleanup; goto cleanup;
...@@ -1352,7 +1353,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, ...@@ -1352,7 +1353,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
cleanup: cleanup:
if (rc < 0) { if (rc < 0) {
if (payload) { if (payload) {
iounmap(payload); memunmap(payload);
payload = NULL; payload = NULL;
} }
} }
...@@ -1363,7 +1364,7 @@ static void ...@@ -1363,7 +1364,7 @@ static void
destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info) destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
{ {
if (info->ptr) { if (info->ptr) {
iounmap(info->ptr); memunmap(info->ptr);
info->ptr = NULL; info->ptr = NULL;
} }
memset(info, 0, sizeof(struct visor_controlvm_payload_info)); memset(info, 0, sizeof(struct visor_controlvm_payload_info));
......
...@@ -38,11 +38,11 @@ ...@@ -38,11 +38,11 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/io.h>
#ifdef CONFIG_SPARC #ifdef CONFIG_SPARC
#include <linux/sunserialcore.h> #include <linux/sunserialcore.h>
#endif #endif
#include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "8250.h" #include "8250.h"
......
...@@ -325,7 +325,6 @@ static int ocfb_probe(struct platform_device *pdev) ...@@ -325,7 +325,6 @@ static int ocfb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "I/O resource request failed\n"); dev_err(&pdev->dev, "I/O resource request failed\n");
return -ENXIO; return -ENXIO;
} }
res->flags &= ~IORESOURCE_CACHEABLE;
fbdev->regs = devm_ioremap_resource(&pdev->dev, res); fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fbdev->regs)) if (IS_ERR(fbdev->regs))
return PTR_ERR(fbdev->regs); return PTR_ERR(fbdev->regs);
......
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/io.h>
#include <asm/io.h>
#include <video/s1d13xxxfb.h> #include <video/s1d13xxxfb.h>
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include <linux/fb.h> #include <linux/fb.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/io.h>
#include <asm/grfioctl.h> /* for HP-UX compatibility */ #include <asm/grfioctl.h> /* for HP-UX compatibility */
#include <asm/uaccess.h> #include <asm/uaccess.h>
......
...@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(bdev_write_page); ...@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(bdev_write_page);
* accessible at this address. * accessible at this address.
*/ */
long bdev_direct_access(struct block_device *bdev, sector_t sector, long bdev_direct_access(struct block_device *bdev, sector_t sector,
void **addr, unsigned long *pfn, long size) void __pmem **addr, unsigned long *pfn, long size)
{ {
long avail; long avail;
const struct block_device_operations *ops = bdev->bd_disk->fops; const struct block_device_operations *ops = bdev->bd_disk->fops;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pmem.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/vmstat.h> #include <linux/vmstat.h>
...@@ -34,7 +35,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size) ...@@ -34,7 +35,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
might_sleep(); might_sleep();
do { do {
void *addr; void __pmem *addr;
unsigned long pfn; unsigned long pfn;
long count; long count;
...@@ -46,10 +47,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size) ...@@ -46,10 +47,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
unsigned pgsz = PAGE_SIZE - offset_in_page(addr); unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
if (pgsz > count) if (pgsz > count)
pgsz = count; pgsz = count;
if (pgsz < PAGE_SIZE) clear_pmem(addr, pgsz);
memset(addr, 0, pgsz);
else
clear_page(addr);
addr += pgsz; addr += pgsz;
size -= pgsz; size -= pgsz;
count -= pgsz; count -= pgsz;
...@@ -59,26 +57,29 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size) ...@@ -59,26 +57,29 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
} }
} while (size); } while (size);
wmb_pmem();
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(dax_clear_blocks); EXPORT_SYMBOL_GPL(dax_clear_blocks);
static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits) static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
unsigned blkbits)
{ {
unsigned long pfn; unsigned long pfn;
sector_t sector = bh->b_blocknr << (blkbits - 9); sector_t sector = bh->b_blocknr << (blkbits - 9);
return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
} }
static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos, /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
loff_t end) static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
loff_t pos, loff_t end)
{ {
loff_t final = end - pos + first; /* The final byte of the buffer */ loff_t final = end - pos + first; /* The final byte of the buffer */
if (first > 0) if (first > 0)
memset(addr, 0, first); clear_pmem(addr, first);
if (final < size) if (final < size)
memset(addr + final, 0, size - final); clear_pmem(addr + final, size - final);
} }
static bool buffer_written(struct buffer_head *bh) static bool buffer_written(struct buffer_head *bh)
...@@ -106,14 +107,15 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, ...@@ -106,14 +107,15 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
loff_t pos = start; loff_t pos = start;
loff_t max = start; loff_t max = start;
loff_t bh_max = start; loff_t bh_max = start;
void *addr; void __pmem *addr;
bool hole = false; bool hole = false;
bool need_wmb = false;
if (iov_iter_rw(iter) != WRITE) if (iov_iter_rw(iter) != WRITE)
end = min(end, i_size_read(inode)); end = min(end, i_size_read(inode));
while (pos < end) { while (pos < end) {
unsigned len; size_t len;
if (pos == max) { if (pos == max) {
unsigned blkbits = inode->i_blkbits; unsigned blkbits = inode->i_blkbits;
sector_t block = pos >> blkbits; sector_t block = pos >> blkbits;
...@@ -145,19 +147,23 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, ...@@ -145,19 +147,23 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
retval = dax_get_addr(bh, &addr, blkbits); retval = dax_get_addr(bh, &addr, blkbits);
if (retval < 0) if (retval < 0)
break; break;
if (buffer_unwritten(bh) || buffer_new(bh)) if (buffer_unwritten(bh) || buffer_new(bh)) {
dax_new_buf(addr, retval, first, pos, dax_new_buf(addr, retval, first, pos,
end); end);
need_wmb = true;
}
addr += first; addr += first;
size = retval - first; size = retval - first;
} }
max = min(pos + size, end); max = min(pos + size, end);
} }
if (iov_iter_rw(iter) == WRITE) if (iov_iter_rw(iter) == WRITE) {
len = copy_from_iter_nocache(addr, max - pos, iter); len = copy_from_iter_pmem(addr, max - pos, iter);
else if (!hole) need_wmb = true;
len = copy_to_iter(addr, max - pos, iter); } else if (!hole)
len = copy_to_iter((void __force *)addr, max - pos,
iter);
else else
len = iov_iter_zero(max - pos, iter); len = iov_iter_zero(max - pos, iter);
...@@ -168,6 +174,9 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, ...@@ -168,6 +174,9 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
addr += len; addr += len;
} }
if (need_wmb)
wmb_pmem();
return (pos == start) ? retval : pos - start; return (pos == start) ? retval : pos - start;
} }
...@@ -260,11 +269,13 @@ static int dax_load_hole(struct address_space *mapping, struct page *page, ...@@ -260,11 +269,13 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
static int copy_user_bh(struct page *to, struct buffer_head *bh, static int copy_user_bh(struct page *to, struct buffer_head *bh,
unsigned blkbits, unsigned long vaddr) unsigned blkbits, unsigned long vaddr)
{ {
void *vfrom, *vto; void __pmem *vfrom;
void *vto;
if (dax_get_addr(bh, &vfrom, blkbits) < 0) if (dax_get_addr(bh, &vfrom, blkbits) < 0)
return -EIO; return -EIO;
vto = kmap_atomic(to); vto = kmap_atomic(to);
copy_user_page(vto, vfrom, vaddr, to); copy_user_page(vto, (void __force *)vfrom, vaddr, to);
kunmap_atomic(vto); kunmap_atomic(vto);
return 0; return 0;
} }
...@@ -275,7 +286,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, ...@@ -275,7 +286,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
unsigned long vaddr = (unsigned long)vmf->virtual_address; unsigned long vaddr = (unsigned long)vmf->virtual_address;
void *addr; void __pmem *addr;
unsigned long pfn; unsigned long pfn;
pgoff_t size; pgoff_t size;
int error; int error;
...@@ -303,8 +314,10 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, ...@@ -303,8 +314,10 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
goto out; goto out;
} }
if (buffer_unwritten(bh) || buffer_new(bh)) if (buffer_unwritten(bh) || buffer_new(bh)) {
clear_page(addr); clear_pmem(addr, PAGE_SIZE);
wmb_pmem();
}
error = vm_insert_mixed(vma, vaddr, pfn); error = vm_insert_mixed(vma, vaddr, pfn);
...@@ -538,11 +551,12 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, ...@@ -538,11 +551,12 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
if (err < 0) if (err < 0)
return err; return err;
if (buffer_written(&bh)) { if (buffer_written(&bh)) {
void *addr; void __pmem *addr;
err = dax_get_addr(&bh, &addr, inode->i_blkbits); err = dax_get_addr(&bh, &addr, inode->i_blkbits);
if (err < 0) if (err < 0)
return err; return err;
memset(addr + offset, 0, length); clear_pmem(addr + offset, length);
wmb_pmem();
} }
return 0; return 0;
......
...@@ -1555,8 +1555,8 @@ struct block_device_operations { ...@@ -1555,8 +1555,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, long (*direct_access)(struct block_device *, sector_t, void __pmem **,
void **, unsigned long *pfn, long size); unsigned long *pfn, long size);
unsigned int (*check_events) (struct gendisk *disk, unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing); unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */ /* ->media_changed() is DEPRECATED, use ->check_events() instead */
...@@ -1574,8 +1574,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, ...@@ -1574,8 +1574,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *, extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *); struct writeback_control *);
extern long bdev_direct_access(struct block_device *, sector_t, void **addr, extern long bdev_direct_access(struct block_device *, sector_t,
unsigned long *pfn, long size); void __pmem **addr, unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */ #else /* CONFIG_BLOCK */
struct block_device; struct block_device;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <asm/io.h> #include <linux/io.h>
#include <asm/page.h> #include <asm/page.h>
/* /*
......
...@@ -80,6 +80,10 @@ int check_signature(const volatile void __iomem *io_addr, ...@@ -80,6 +80,10 @@ int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length); const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res); void devm_ioremap_release(struct device *dev, void *res);
void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
/* /*
* Some systems do not have legacy ISA devices. * Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems. * /dev/port is not a valid interface on these systems.
...@@ -121,4 +125,13 @@ static inline int arch_phys_wc_index(int handle) ...@@ -121,4 +125,13 @@ static inline int arch_phys_wc_index(int handle)
#endif #endif
#endif #endif
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
};
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
#endif /* _LINUX_IO_H */ #endif /* _LINUX_IO_H */
...@@ -369,7 +369,14 @@ static inline int put_page_unless_one(struct page *page) ...@@ -369,7 +369,14 @@ static inline int put_page_unless_one(struct page *page)
} }
extern int page_is_ram(unsigned long pfn); extern int page_is_ram(unsigned long pfn);
extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
enum {
REGION_INTERSECTS,
REGION_DISJOINT,
REGION_MIXED,
};
int region_intersects(resource_size_t offset, size_t size, const char *type);
/* Support for virtually mapped pages */ /* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr); struct page *vmalloc_to_page(const void *addr);
......
...@@ -27,9 +27,9 @@ ...@@ -27,9 +27,9 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/io.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <asm/io.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
......
...@@ -14,28 +14,35 @@ ...@@ -14,28 +14,35 @@
#define __PMEM_H__ #define __PMEM_H__
#include <linux/io.h> #include <linux/io.h>
#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API #ifdef CONFIG_ARCH_HAS_PMEM_API
#include <asm/cacheflush.h> #include <asm/pmem.h>
#else #else
static inline void arch_wmb_pmem(void) static inline void arch_wmb_pmem(void)
{ {
BUG(); BUG();
} }
static inline bool __arch_has_wmb_pmem(void) static inline bool arch_has_wmb_pmem(void)
{ {
return false; return false;
} }
static inline void __pmem *arch_memremap_pmem(resource_size_t offset, static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
unsigned long size) size_t n)
{ {
return NULL; BUG();
} }
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
size_t n) struct iov_iter *i)
{
BUG();
return 0;
}
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{ {
BUG(); BUG();
} }
...@@ -43,8 +50,8 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, ...@@ -43,8 +50,8 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
/* /*
* Architectures that define ARCH_HAS_PMEM_API must provide * Architectures that define ARCH_HAS_PMEM_API must provide
* implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
* arch_wmb_pmem(), and __arch_has_wmb_pmem(). * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
*/ */
static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
...@@ -52,13 +59,13 @@ static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t si ...@@ -52,13 +59,13 @@ static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t si
memcpy(dst, (void __force const *) src, size); memcpy(dst, (void __force const *) src, size);
} }
static inline void memunmap_pmem(void __pmem *addr) static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
{ {
iounmap((void __force __iomem *) addr); devm_memunmap(dev, (void __force *) addr);
} }
/** /**
* arch_has_wmb_pmem - true if wmb_pmem() ensures durability * arch_has_pmem_api - true if wmb_pmem() ensures durability
* *
* For a given cpu implementation within an architecture it is possible * For a given cpu implementation within an architecture it is possible
* that wmb_pmem() resolves to a nop. In the case this returns * that wmb_pmem() resolves to a nop. In the case this returns
...@@ -66,13 +73,6 @@ static inline void memunmap_pmem(void __pmem *addr) ...@@ -66,13 +73,6 @@ static inline void memunmap_pmem(void __pmem *addr)
* fall back to a different data consistency model, or otherwise notify * fall back to a different data consistency model, or otherwise notify
* the user. * the user.
*/ */
static inline bool arch_has_wmb_pmem(void)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return __arch_has_wmb_pmem();
return false;
}
static inline bool arch_has_pmem_api(void) static inline bool arch_has_pmem_api(void)
{ {
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
...@@ -85,16 +85,24 @@ static inline bool arch_has_pmem_api(void) ...@@ -85,16 +85,24 @@ static inline bool arch_has_pmem_api(void)
* default_memremap_pmem + default_memcpy_to_pmem is sufficient for * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
* making data durable relative to i/o completion. * making data durable relative to i/o completion.
*/ */
static void default_memcpy_to_pmem(void __pmem *dst, const void *src, static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t size) size_t size)
{ {
memcpy((void __force *) dst, src, size); memcpy((void __force *) dst, src, size);
} }
static void __pmem *default_memremap_pmem(resource_size_t offset, static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
unsigned long size) size_t bytes, struct iov_iter *i)
{ {
return (void __pmem __force *)ioremap_wt(offset, size); return copy_from_iter_nocache((void __force *)addr, bytes, i);
}
static inline void default_clear_pmem(void __pmem *addr, size_t size)
{
if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
clear_page((void __force *)addr);
else
memset((void __force *)addr, 0, size);
} }
/** /**
...@@ -109,12 +117,16 @@ static void __pmem *default_memremap_pmem(resource_size_t offset, ...@@ -109,12 +117,16 @@ static void __pmem *default_memremap_pmem(resource_size_t offset,
* wmb_pmem() arrange for the data to be written through the * wmb_pmem() arrange for the data to be written through the
* cache to persistent media. * cache to persistent media.
*/ */
static inline void __pmem *memremap_pmem(resource_size_t offset, static inline void __pmem *memremap_pmem(struct device *dev,
unsigned long size) resource_size_t offset, unsigned long size)
{ {
if (arch_has_pmem_api()) #ifdef ARCH_MEMREMAP_PMEM
return arch_memremap_pmem(offset, size); return (void __pmem *) devm_memremap(dev, offset, size,
return default_memremap_pmem(offset, size); ARCH_MEMREMAP_PMEM);
#else
return (void __pmem *) devm_memremap(dev, offset, size,
MEMREMAP_WT);
#endif
} }
/** /**
...@@ -149,4 +161,37 @@ static inline void wmb_pmem(void) ...@@ -149,4 +161,37 @@ static inline void wmb_pmem(void)
if (arch_has_pmem_api()) if (arch_has_pmem_api())
arch_wmb_pmem(); arch_wmb_pmem();
} }
/**
* copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* This function requires explicit ordering with a wmb_pmem() call.
*/
static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i);
return default_copy_from_iter_pmem(addr, bytes, i);
}
/**
* clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with a wmb_pmem() call.
*/
static inline void clear_pmem(void __pmem *addr, size_t size)
{
if (arch_has_pmem_api())
arch_clear_pmem(addr, size);
else
default_clear_pmem(addr, size);
}
#endif /* __PMEM_H__ */ #endif /* __PMEM_H__ */
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#define __linux_video_vga_h__ #define __linux_video_vga_h__
#include <linux/types.h> #include <linux/types.h>
#include <asm/io.h> #include <linux/io.h>
#include <asm/vga.h> #include <asm/vga.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
......
...@@ -99,6 +99,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o ...@@ -99,6 +99,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_TORTURE_TEST) += torture.o
obj-$(CONFIG_HAS_IOMEM) += memremap.o
$(obj)/configs.o: $(obj)/config_data.h $(obj)/configs.o: $(obj)/config_data.h
# config_data.h contains the same information as ikconfig.h but gzipped. # config_data.h contains the same information as ikconfig.h but gzipped.
......
/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/device.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/mm.h>
#ifndef ioremap_cache
/* temporary while we convert existing ioremap_cache users to memremap */
__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
{
return ioremap(offset, size);
}
#endif
/**
* memremap() - remap an iomem_resource as cacheable memory
* @offset: iomem resource start address
* @size: size of remap
* @flags: either MEMREMAP_WB or MEMREMAP_WT
*
* memremap() is "ioremap" for cases where it is known that the resource
* being mapped does not have i/o side effects and the __iomem
* annotation is not applicable.
*
* MEMREMAP_WB - matches the default mapping for "System RAM" on
* the architecture. This is usually a read-allocate write-back cache.
* Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
* memremap() will bypass establishing a new mapping and instead return
* a pointer into the direct map.
*
* MEMREMAP_WT - establish a mapping whereby writes either bypass the
* cache or are written through to memory and never exist in a
* cache-dirty state with respect to program visibility. Attempts to
* map "System RAM" with this mapping type will fail.
*/
void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{
int is_ram = region_intersects(offset, size, "System RAM");
void *addr = NULL;
if (is_ram == REGION_MIXED) {
WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
&offset, (unsigned long) size);
return NULL;
}
/* Try all mapping types requested until one returns non-NULL */
if (flags & MEMREMAP_WB) {
flags &= ~MEMREMAP_WB;
/*
* MEMREMAP_WB is special in that it can be satisifed
* from the direct map. Some archs depend on the
* capability of memremap() to autodetect cases where
* the requested range is potentially in "System RAM"
*/
if (is_ram == REGION_INTERSECTS)
addr = __va(offset);
else
addr = ioremap_cache(offset, size);
}
/*
* If we don't have a mapping yet and more request flags are
* pending then we will be attempting to establish a new virtual
* address mapping. Enforce that this mapping is not aliasing
* "System RAM"
*/
if (!addr && is_ram == REGION_INTERSECTS && flags) {
WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
&offset, (unsigned long) size);
return NULL;
}
if (!addr && (flags & MEMREMAP_WT)) {
flags &= ~MEMREMAP_WT;
addr = ioremap_wt(offset, size);
}
return addr;
}
EXPORT_SYMBOL(memremap);
void memunmap(void *addr)
{
if (is_vmalloc_addr(addr))
iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
static void devm_memremap_release(struct device *dev, void *res)
{
memunmap(res);
}
static int devm_memremap_match(struct device *dev, void *res, void *match_data)
{
return *(void **)res == match_data;
}
void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags)
{
void **ptr, *addr;
ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = memremap(offset, size, flags);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_memremap);
void devm_memunmap(struct device *dev, void *addr)
{
WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match,
addr));
memunmap(addr);
}
EXPORT_SYMBOL(devm_memunmap);
...@@ -492,40 +492,51 @@ int __weak page_is_ram(unsigned long pfn) ...@@ -492,40 +492,51 @@ int __weak page_is_ram(unsigned long pfn)
} }
EXPORT_SYMBOL_GPL(page_is_ram); EXPORT_SYMBOL_GPL(page_is_ram);
/* /**
* Search for a resouce entry that fully contains the specified region. * region_intersects() - determine intersection of region with known resources
* If found, return 1 if it is RAM, 0 if not. * @start: region start address
* If not found, or region is not fully contained, return -1 * @size: size of region
* @name: name of resource (in iomem_resource)
* *
* Used by the ioremap functions to ensure the user is not remapping RAM and is * Check if the specified region partially overlaps or fully eclipses a
* a vast speed up over walking through the resource table page by page. * resource identified by @name. Return REGION_DISJOINT if the region
* does not overlap @name, return REGION_MIXED if the region overlaps
* @type and another resource, and return REGION_INTERSECTS if the
* region overlaps @type and no other defined resource. Note, that
* REGION_INTERSECTS is also returned in the case when the specified
* region overlaps RAM and undefined memory holes.
*
* region_intersect() is used by memory remapping functions to ensure
* the user is not remapping RAM and is a vast speed up over walking
* through the resource table page by page.
*/ */
int region_is_ram(resource_size_t start, unsigned long size) int region_intersects(resource_size_t start, size_t size, const char *name)
{ {
struct resource *p;
resource_size_t end = start + size - 1;
unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
const char *name = "System RAM"; resource_size_t end = start + size - 1;
int ret = -1; int type = 0; int other = 0;
struct resource *p;
read_lock(&resource_lock); read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) { for (p = iomem_resource.child; p ; p = p->sibling) {
if (p->end < start) bool is_type = strcmp(p->name, name) == 0 && p->flags == flags;
continue;
if (start >= p->start && start <= p->end)
if (p->start <= start && end <= p->end) { is_type ? type++ : other++;
/* resource fully contains region */ if (end >= p->start && end <= p->end)
if ((p->flags != flags) || strcmp(p->name, name)) is_type ? type++ : other++;
ret = 0; if (p->start >= start && p->end <= end)
else is_type ? type++ : other++;
ret = 1;
break;
}
if (end < p->start)
break; /* not found */
} }
read_unlock(&resource_lock); read_unlock(&resource_lock);
return ret;
if (other == 0)
return type ? REGION_INTERSECTS : REGION_DISJOINT;
if (type)
return REGION_MIXED;
return REGION_DISJOINT;
} }
void __weak arch_remove_reservations(struct resource *avail) void __weak arch_remove_reservations(struct resource *avail)
......
...@@ -531,4 +531,7 @@ config ARCH_HAS_SG_CHAIN ...@@ -531,4 +531,7 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API config ARCH_HAS_PMEM_API
bool bool
config ARCH_HAS_MMIO_FLUSH
bool
endmenu endmenu
...@@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap); ...@@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap);
* @dev: generic device to handle the resource for * @dev: generic device to handle the resource for
* @res: resource to be handled * @res: resource to be handled
* *
* Checks that a resource is a valid memory region, requests the memory region * Checks that a resource is a valid memory region, requests the memory
* and ioremaps it either as cacheable or as non-cacheable memory depending on * region and ioremaps it. All operations are managed and will be undone
* the resource's flags. All operations are managed and will be undone on * on driver detach.
* driver detach.
* *
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example: * on failure. Usage example:
...@@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) ...@@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
return IOMEM_ERR_PTR(-EBUSY); return IOMEM_ERR_PTR(-EBUSY);
} }
if (res->flags & IORESOURCE_CACHEABLE) dest_ptr = devm_ioremap(dev, res->start, size);
dest_ptr = devm_ioremap(dev, res->start, size);
else
dest_ptr = devm_ioremap_nocache(dev, res->start, size);
if (!dest_ptr) { if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res); dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size); devm_release_mem_region(dev, res->start, size);
......
...@@ -41,11 +41,8 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, ...@@ -41,11 +41,8 @@ void __iomem *pci_iomap_range(struct pci_dev *dev,
len = maxlen; len = maxlen;
if (flags & IORESOURCE_IO) if (flags & IORESOURCE_IO)
return __pci_ioport_map(dev, start, len); return __pci_ioport_map(dev, start, len);
if (flags & IORESOURCE_MEM) { if (flags & IORESOURCE_MEM)
if (flags & IORESOURCE_CACHEABLE) return ioremap(start, len);
return ioremap(start, len);
return ioremap_nocache(start, len);
}
/* What? */ /* What? */
return NULL; return NULL;
} }
......
ldflags-y += --wrap=ioremap_wt
ldflags-y += --wrap=ioremap_wc ldflags-y += --wrap=ioremap_wc
ldflags-y += --wrap=memremap
ldflags-y += --wrap=devm_ioremap_nocache ldflags-y += --wrap=devm_ioremap_nocache
ldflags-y += --wrap=ioremap_cache ldflags-y += --wrap=devm_memremap
ldflags-y += --wrap=ioremap_nocache ldflags-y += --wrap=ioremap_nocache
ldflags-y += --wrap=iounmap ldflags-y += --wrap=iounmap
ldflags-y += --wrap=memunmap
ldflags-y += --wrap=__devm_request_region
ldflags-y += --wrap=__request_region ldflags-y += --wrap=__request_region
ldflags-y += --wrap=__release_region ldflags-y += --wrap=__release_region
......
...@@ -80,23 +80,39 @@ void __iomem *__wrap_devm_ioremap_nocache(struct device *dev, ...@@ -80,23 +80,39 @@ void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
} }
EXPORT_SYMBOL(__wrap_devm_ioremap_nocache); EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size) void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags)
{ {
return __nfit_test_ioremap(offset, size, ioremap_cache); struct nfit_test_resource *nfit_res;
rcu_read_lock();
nfit_res = get_nfit_res(offset);
rcu_read_unlock();
if (nfit_res)
return nfit_res->buf + offset - nfit_res->res->start;
return devm_memremap(dev, offset, size, flags);
} }
EXPORT_SYMBOL(__wrap_ioremap_cache); EXPORT_SYMBOL(__wrap_devm_memremap);
void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size) void *__wrap_memremap(resource_size_t offset, size_t size,
unsigned long flags)
{ {
return __nfit_test_ioremap(offset, size, ioremap_nocache); struct nfit_test_resource *nfit_res;
rcu_read_lock();
nfit_res = get_nfit_res(offset);
rcu_read_unlock();
if (nfit_res)
return nfit_res->buf + offset - nfit_res->res->start;
return memremap(offset, size, flags);
} }
EXPORT_SYMBOL(__wrap_ioremap_nocache); EXPORT_SYMBOL(__wrap_memremap);
void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size) void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
{ {
return __nfit_test_ioremap(offset, size, ioremap_wt); return __nfit_test_ioremap(offset, size, ioremap_nocache);
} }
EXPORT_SYMBOL(__wrap_ioremap_wt); EXPORT_SYMBOL(__wrap_ioremap_nocache);
void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size) void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
{ {
...@@ -117,9 +133,22 @@ void __wrap_iounmap(volatile void __iomem *addr) ...@@ -117,9 +133,22 @@ void __wrap_iounmap(volatile void __iomem *addr)
} }
EXPORT_SYMBOL(__wrap_iounmap); EXPORT_SYMBOL(__wrap_iounmap);
struct resource *__wrap___request_region(struct resource *parent, void __wrap_memunmap(void *addr)
resource_size_t start, resource_size_t n, const char *name, {
int flags) struct nfit_test_resource *nfit_res;
rcu_read_lock();
nfit_res = get_nfit_res((unsigned long) addr);
rcu_read_unlock();
if (nfit_res)
return;
return memunmap(addr);
}
EXPORT_SYMBOL(__wrap_memunmap);
static struct resource *nfit_test_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name, int flags)
{ {
struct nfit_test_resource *nfit_res; struct nfit_test_resource *nfit_res;
...@@ -147,10 +176,29 @@ struct resource *__wrap___request_region(struct resource *parent, ...@@ -147,10 +176,29 @@ struct resource *__wrap___request_region(struct resource *parent,
return res; return res;
} }
} }
if (dev)
return __devm_request_region(dev, parent, start, n, name);
return __request_region(parent, start, n, name, flags); return __request_region(parent, start, n, name, flags);
} }
struct resource *__wrap___request_region(struct resource *parent,
resource_size_t start, resource_size_t n, const char *name,
int flags)
{
return nfit_test_request_region(NULL, parent, start, n, name, flags);
}
EXPORT_SYMBOL(__wrap___request_region); EXPORT_SYMBOL(__wrap___request_region);
struct resource *__wrap___devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name)
{
if (!dev)
return NULL;
return nfit_test_request_region(dev, parent, start, n, name, 0);
}
EXPORT_SYMBOL(__wrap___devm_request_region);
void __wrap___release_region(struct resource *parent, resource_size_t start, void __wrap___release_region(struct resource *parent, resource_size_t start,
resource_size_t n) resource_size_t n)
{ {
......
...@@ -1029,9 +1029,13 @@ static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, ...@@ -1029,9 +1029,13 @@ static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
lane = nd_region_acquire_lane(nd_region); lane = nd_region_acquire_lane(nd_region);
if (rw) if (rw)
memcpy(mmio->base + dpa, iobuf, len); memcpy(mmio->addr.base + dpa, iobuf, len);
else else {
memcpy(iobuf, mmio->base + dpa, len); memcpy(iobuf, mmio->addr.base + dpa, len);
/* give us some some coverage of the mmio_flush_range() API */
mmio_flush_range(mmio->addr.base + dpa, len);
}
nd_region_release_lane(nd_region, lane); nd_region_release_lane(nd_region, lane);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment