Commit da4a22cb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'io-mappings-for-linus-2' of...

Merge branch 'io-mappings-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'io-mappings-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  io mapping: clean up #ifdefs
  io mapping: improve documentation
  i915: use io-mapping interfaces instead of a variety of mapping kludges
  resources: add io-mapping functions to dynamically map large device apertures
  x86: add iomap_atomic*()/iounmap_atomic() on 32-bit using fixmaps
parents 20ebc007 e5beae16
The io_mapping functions in linux/io-mapping.h provide an abstraction for
efficiently mapping small regions of an I/O device to the CPU. The initial
usage is to support the large graphics aperture on 32-bit processors where
ioremap_wc cannot be used to statically map the entire aperture to the CPU
as it would consume too much of the kernel address space.
A mapping object is created during driver initialization using
struct io_mapping *io_mapping_create_wc(unsigned long base,
unsigned long size)
'base' is the bus address of the region to be made
mappable, while 'size' indicates how large a mapping region to
enable. Both are in bytes.
This _wc variant provides a mapping which may only be used
with the io_mapping_map_atomic_wc or io_mapping_map_wc.
With this mapping object, individual pages can be mapped either atomically
or not, depending on the necessary scheduling environment. Of course, atomic
maps are more efficient:
void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
'offset' is the offset within the defined mapping region.
Accessing addresses beyond the region specified in the
creation function yields undefined results. Using an offset
which is not page aligned yields an undefined result. The
return value points to a single page in CPU address space.
This _wc variant returns a write-combining map to the
page and may only be used with mappings created by
io_mapping_create_wc
Note that the task may not sleep while holding this page
mapped.
void io_mapping_unmap_atomic(void *vaddr)
'vaddr' must be the the value returned by the last
io_mapping_map_atomic_wc call. This unmaps the specified
page and allows the task to sleep once again.
If you need to sleep while holding the lock, you can use the non-atomic
variant, although they may be significantly slower.
void *io_mapping_map_wc(struct io_mapping *mapping,
unsigned long offset)
This works like io_mapping_map_atomic_wc except it allows
the task to sleep while holding the page mapped.
void io_mapping_unmap(void *vaddr)
This works like io_mapping_unmap_atomic, except it is used
for pages mapped with io_mapping_map_wc.
At driver close time, the io_mapping object must be freed:
void io_mapping_free(struct io_mapping *mapping)
Current Implementation:
The initial implementation of these functions uses existing mapping
mechanisms and so provides only an abstraction layer and no new
functionality.
On 64-bit processors, io_mapping_create_wc calls ioremap_wc for the whole
range, creating a permanent kernel-visible mapping to the resource. The
map_atomic and map functions add the requested offset to the base of the
virtual address returned by ioremap_wc.
On 32-bit processors with HIGHMEM defined, io_mapping_map_atomic_wc uses
kmap_atomic_pfn to map the specified page in an atomic fashion;
kmap_atomic_pfn isn't really supposed to be used with device pages, but it
provides an efficient mapping for this usage.
On 32-bit processors without HIGHMEM defined, io_mapping_map_atomic_wc and
io_mapping_map_wc both use ioremap_wc, a terribly inefficient function which
performs an IPI to inform all processors about the new mapping. This results
in a significant performance penalty.
......@@ -1894,6 +1894,10 @@ config SYSVIPC_COMPAT
endmenu
config HAVE_ATOMIC_IOMAP
def_bool y
depends on X86_32
source "net/Kconfig"
source "drivers/Kconfig"
......
......@@ -9,6 +9,10 @@
extern int fixmaps_set;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
......
......@@ -28,10 +28,8 @@ extern unsigned long __FIXADDR_TOP;
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
......@@ -75,10 +73,8 @@ enum fixed_addresses {
#ifdef CONFIG_X86_CYCLONE_TIMER
FIX_CYCLONE_TIMER, /*cyclone timer register*/
#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
......
......@@ -25,14 +25,11 @@
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/fixmap.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
......
obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o gup.o
obj-$(CONFIG_X86_32) += pgtable_32.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
......
......@@ -334,7 +334,6 @@ int devmem_is_allowed(unsigned long pagenr)
return 0;
}
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;
......@@ -357,6 +356,7 @@ static void __init kmap_init(void)
kmap_prot = PAGE_KERNEL;
}
#ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
unsigned long vaddr;
......@@ -436,7 +436,6 @@ static void __init set_highmem_pages_init(void)
#endif /* !CONFIG_NUMA */
#else
# define kmap_init() do { } while (0)
# define permanent_kmaps_init(pgd_base) do { } while (0)
# define set_highmem_pages_init() do { } while (0)
#endif /* CONFIG_HIGHMEM */
......
/*
* Copyright © 2008 Ingo Molnar
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <asm/iomap.h>
#include <linux/module.h>
/* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned long vaddr;
pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
arch_flush_lazy_mmu_mode();
return (void*) vaddr;
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
iounmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
/*
* Force other mappings to Oops if they'll try to access this pte
* without first remap it. Keeping stale mappings around is a bad idea
* also, in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor.
*/
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
kpte_clear_flush(kmap_pte-idx, vaddr);
arch_flush_lazy_mmu_mode();
pagefault_enable();
}
EXPORT_SYMBOL_GPL(iounmap_atomic);
......@@ -31,6 +31,7 @@
#define _I915_DRV_H_
#include "i915_reg.h"
#include <linux/io-mapping.h>
/* General customization:
*/
......@@ -246,6 +247,8 @@ typedef struct drm_i915_private {
struct {
struct drm_mm gtt_space;
struct io_mapping *gtt_mapping;
/**
* List of objects currently involved in rendering from the
* ringbuffer.
......
......@@ -193,35 +193,50 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
}
/*
* Try to write quickly with an atomic kmap. Return true on success.
*
* If this fails (which includes a partial write), we'll redo the whole
* thing with the slow version.
*
* This is a workaround for the low performance of iounmap (approximate
* 10% cpu cost on normal 3D workloads). kmap_atomic on HIGHMEM kernels
* happens to let us map card memory without taking IPIs. When the vmap
* rework lands we should be able to dump this hack.
/* This is the fast write path which cannot handle
* page faults in the source data
*/
static inline int fast_user_write(unsigned long pfn, char __user *user_data,
int l, int o)
static inline int
fast_user_write(struct io_mapping *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
#ifdef CONFIG_HIGHMEM
unsigned long unwritten;
char *vaddr_atomic;
unsigned long unwritten;
vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
#if WATCH_PWRITE
DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
i, o, l, pfn, vaddr_atomic);
#endif
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, user_data, l);
kunmap_atomic(vaddr_atomic, KM_USER0);
return !unwritten;
#else
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten)
return -EFAULT;
return 0;
}
/* Here's the write path which can sleep for
* page faults
*/
static inline int
slow_user_write(struct io_mapping *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
char __iomem *vaddr;
unsigned long unwritten;
vaddr = io_mapping_map_wc(mapping, page_base);
if (vaddr == NULL)
return -EFAULT;
unwritten = __copy_from_user(vaddr + page_offset,
user_data, length);
io_mapping_unmap(vaddr);
if (unwritten)
return -EFAULT;
return 0;
#endif
}
static int
......@@ -230,10 +245,12 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset;
loff_t offset, page_base;
char __user *user_data;
int ret = 0;
int page_offset, page_length;
int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
......@@ -257,57 +274,37 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
obj_priv->dirty = 1;
while (remain > 0) {
unsigned long pfn;
int i, o, l;
/* Operation in this page
*
* i = page number
* o = offset within page
* l = bytes to copy
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
i = offset >> PAGE_SHIFT;
o = offset & (PAGE_SIZE-1);
l = remain;
if ((o + l) > PAGE_SIZE)
l = PAGE_SIZE - o;
pfn = (dev->agp->base >> PAGE_SHIFT) + i;
if (!fast_user_write(pfn, user_data, l, o)) {
unsigned long unwritten;
char __iomem *vaddr;
vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
#if WATCH_PWRITE
DRM_INFO("pwrite slow i %d o %d l %d "
"pfn %ld vaddr %p\n",
i, o, l, pfn, vaddr);
#endif
if (vaddr == NULL) {
ret = -EFAULT;
goto fail;
}
unwritten = __copy_from_user(vaddr + o, user_data, l);
#if WATCH_PWRITE
DRM_INFO("unwritten %ld\n", unwritten);
#endif
iounmap(vaddr);
if (unwritten) {
ret = -EFAULT;
page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
page_offset, user_data, page_length);
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. In this case, use the
* non-atomic function
*/
if (ret) {
ret = slow_user_write (dev_priv->mm.gtt_mapping,
page_base, page_offset,
user_data, page_length);
if (ret)
goto fail;
}
}
remain -= l;
user_data += l;
offset += l;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
#if WATCH_PWRITE && 1
i915_gem_clflush_object(obj);
i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
i915_gem_clflush_object(obj);
#endif
fail:
i915_gem_object_unpin(obj);
......@@ -1525,12 +1522,12 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_exec_object *entry)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_relocation_entry __user *relocs;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
uint32_t last_reloc_offset = -1;
void __iomem *reloc_page = NULL;
void __iomem *reloc_page;
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
......@@ -1653,26 +1650,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
* perform.
*/
reloc_offset = obj_priv->gtt_offset + reloc.offset;
if (reloc_page == NULL ||
(last_reloc_offset & ~(PAGE_SIZE - 1)) !=
(reloc_offset & ~(PAGE_SIZE - 1))) {
if (reloc_page != NULL)
iounmap(reloc_page);
reloc_page = ioremap_wc(dev->agp->base +
(reloc_offset &
~(PAGE_SIZE - 1)),
PAGE_SIZE);
last_reloc_offset = reloc_offset;
if (reloc_page == NULL) {
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -ENOMEM;
}
}
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset &
~(PAGE_SIZE - 1)));
reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1)));
(reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc.delta;
#if WATCH_BUF
......@@ -1681,6 +1663,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
readl(reloc_entry), reloc_val);
#endif
writel(reloc_val, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
/* Write the updated presumed offset for this entry back out
* to the user.
......@@ -1696,9 +1679,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
drm_gem_object_unreference(target_obj);
}
if (reloc_page != NULL)
iounmap(reloc_page);
#if WATCH_BUF
if (0)
i915_gem_dump_object(obj, 128, __func__, ~0);
......@@ -2540,6 +2520,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
return ret;
dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
dev->agp->agp_info.aper_size
* 1024 * 1024);
mutex_lock(&dev->struct_mutex);
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
......@@ -2557,11 +2541,13 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
io_mapping_free(dev_priv->mm.gtt_mapping);
return ret;
}
......
/*
* Copyright © 2008 Ingo Molnar
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
void
iounmap_atomic(void *kvaddr, enum km_type type);
/*
* Copyright © 2008 Keith Packard <keithp@keithp.com>
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _LINUX_IO_MAPPING_H
#define _LINUX_IO_MAPPING_H
#include <linux/types.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/iomap.h>
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
* See Documentation/io_mapping.txt
*/
/* this struct isn't actually defined anywhere */
struct io_mapping;
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
/*
* For small address space machines, mapping large objects
* into the kernel virtual space isn't practical. Where
* available, use fixmap support to dynamically map pages
* of the object at run time.
*/
static inline struct io_mapping *
io_mapping_create_wc(unsigned long base, unsigned long size)
{
return (struct io_mapping *) base;
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
}
/* Atomic map/unmap */
static inline void *
io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
{
offset += (unsigned long) mapping;
return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0,
__pgprot(__PAGE_KERNEL_WC));
}
static inline void
io_mapping_unmap_atomic(void *vaddr)
{
iounmap_atomic(vaddr, KM_USER0);
}
static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
offset += (unsigned long) mapping;
return ioremap_wc(offset, PAGE_SIZE);
}
static inline void
io_mapping_unmap(void *vaddr)
{
iounmap(vaddr);
}
#else
/* Create the io_mapping object*/
static inline struct io_mapping *
io_mapping_create_wc(unsigned long base, unsigned long size)
{
return (struct io_mapping *) ioremap_wc(base, size);
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
iounmap(mapping);
}
/* Atomic map/unmap */
static inline void *
io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
{
return ((char *) mapping) + offset;
}
static inline void
io_mapping_unmap_atomic(void *vaddr)
{
}
/* Non-atomic map/unmap */
static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
return ((char *) mapping) + offset;
}
static inline void
io_mapping_unmap(void *vaddr)
{
}
#endif /* HAVE_ATOMIC_IOMAP */
#endif /* _LINUX_IO_MAPPING_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment