Commit bfc1de0c authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: (24 commits)
  [SPARC]: Add solaris/sunos binary support to feature removal schedule.
  [SPARC]: Merge asm-sparc{,64}/a.out.h
  [SPARC]: Merge asm-sparc{,64}/fb.h
  [SPARC]: Merge asm-sparc{,64}/errno.h
  [SPARC]: Merge asm-sparc{,64}/emergency-restart.h
  [SPARC]: Merge asm-sparc{,64}/div64.h
  [SPARC]: Merge asm-sparc{,64}/device.h
  [SPARC]: Merge asm-sparc{,64}/current.h
  [SPARC]: Merge asm-sparc{,64}/cputime.h
  [SPARC]: Merge asm-sparc{,64}/cache.h
  [SPARC]: Merge asm-sparc{,64}/byteorder.h
  [SPARC]: Merge asm-sparc{,64}/bugs.h
  [SPARC]: Merge asm-sparc{,64}/bug.h
  [SPARC]: Kill BSD errno translation table and header files.
  [SPARC]: Merge asm-sparc{,64}/bpp.h
  [SPARC]: Merge include/asm-sparc{,64}/auxvec.h
  [SPARC]: Merge include/asm-sparc{,64}/of_device.h
  [SPARC]: Merge include/asm-sparc{,64}/prom.h
  [SPARC]: Remove of_platform_device_create
  [SPARC64]: Add kretprobe support.
  ...
parents 1712a699 e88bb415
...@@ -304,3 +304,14 @@ Why: The support code for the old firmware hurts code readability/maintainabilit ...@@ -304,3 +304,14 @@ Why: The support code for the old firmware hurts code readability/maintainabilit
and slightly hurts runtime performance. Bugfixes for the old firmware and slightly hurts runtime performance. Bugfixes for the old firmware
are not provided by Broadcom anymore. are not provided by Broadcom anymore.
Who: Michael Buesch <mb@bu3sch.de> Who: Michael Buesch <mb@bu3sch.de>
---------------------------
What: Solaris/SunOS syscall and binary support on Sparc
When: 2.6.26
Why: Largely unmaintained and almost entirely unused. File system
layering used to divert library and dynamic linker searches to
/usr/gnemul is extremely buggy and unfixable. Making it work
is largely pointless as without a lot of work only the most
trivial of Solaris binaries can work with the emulation code.
Who: David S. Miller <davem@davemloft.net>
/* $Id: errtbls.c,v 1.2 1995/11/25 00:57:55 davem Exp $ /* errtbls.c: Error number conversion tables.
* errtbls.c: Error number conversion tables between various syscall
* OS semantics.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
* *
* Based upon preliminary work which is: * Based upon preliminary work which is:
* *
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/ */
#include <asm/bsderrno.h> /* NetBSD (bsd4.4) errnos */
#include <asm/solerrno.h> /* Solaris errnos */ #include <asm/solerrno.h> /* Solaris errnos */
/* Here are tables which convert between Linux/SunOS error number /* Here is the table which converts between Linux error number values
* values to the equivalent in other OSs. Note that since the Linux * to the equivalent under Solaris. Note that since the Linux ones
* ones have been set up to match exactly those of SunOS, no * have been set up to match exactly those of SunOS, no translation
* translation table is needed for that OS. * table is needed for that OS.
*/ */
int solaris_errno[] = { int solaris_errno[] = {
...@@ -145,132 +142,3 @@ int solaris_errno[] = { ...@@ -145,132 +142,3 @@ int solaris_errno[] = {
SOL_ELIBMAX, SOL_ELIBMAX,
SOL_ELIBSCN, SOL_ELIBSCN,
}; };
int netbsd_errno[] = {
0,
BSD_EPERM,
BSD_ENOENT,
BSD_ESRCH,
BSD_EINTR,
BSD_EIO,
BSD_ENXIO,
BSD_E2BIG,
BSD_ENOEXEC,
BSD_EBADF,
BSD_ECHILD,
BSD_EAGAIN,
BSD_ENOMEM,
BSD_EACCES,
BSD_EFAULT,
BSD_NOTBLK,
BSD_EBUSY,
BSD_EEXIST,
BSD_EXDEV,
BSD_ENODEV,
BSD_ENOTDIR,
BSD_EISDIR,
BSD_EINVAL,
BSD_ENFILE,
BSD_EMFILE,
BSD_ENOTTY,
BSD_ETXTBSY,
BSD_EFBIG,
BSD_ENOSPC,
BSD_ESPIPE,
BSD_EROFS,
BSD_EMLINK,
BSD_EPIPE,
BSD_EDOM,
BSD_ERANGE,
BSD_EWOULDBLOCK,
BSD_EINPROGRESS,
BSD_EALREADY,
BSD_ENOTSOCK,
BSD_EDESTADDRREQ,
BSD_EMSGSIZE,
BSD_EPROTOTYPE,
BSD_ENOPROTOOPT,
BSD_EPROTONOSUPPORT,
BSD_ESOCKTNOSUPPORT,
BSD_EOPNOTSUPP,
BSD_EPFNOSUPPORT,
BSD_EAFNOSUPPORT,
BSD_EADDRINUSE,
BSD_EADDRNOTAVAIL,
BSD_ENETDOWN,
BSD_ENETUNREACH,
BSD_ENETRESET,
BSD_ECONNABORTED,
BSD_ECONNRESET,
BSD_ENOBUFS,
BSD_EISCONN,
BSD_ENOTONN,
BSD_ESHUTDOWN,
BSD_ETOOMANYREFS,
BSD_ETIMEDOUT,
BSD_ECONNREFUSED,
BSD_ELOOP,
BSD_ENAMETOOLONG,
BSD_EHOSTDOWN,
BSD_EHOSTUNREACH,
BSD_ENOTEMPTY,
BSD_EPROCLIM,
BSD_EUSERS,
BSD_EDQUOT,
BSD_ESTALE,
BSD_EREMOTE,
BSD_ENOSTR,
BSD_ETIME,
BSD_ENOSR,
BSD_ENOMSG,
BSD_EBADMSG,
BSD_IDRM,
BSD_EDEADLK,
BSD_ENOLCK,
BSD_ENONET,
BSD_ERREMOTE,
BSD_ENOLINK,
BSD_EADV,
BSD_ESRMNT,
BSD_ECOMM,
BSD_EPROTO,
BSD_EMULTIHOP,
BSD_EINVAL, /* EDOTDOT XXX??? */
BSD_REMCHG,
BSD_NOSYS,
BSD_STRPIPE,
BSD_EOVERFLOW,
BSD_EBADFD,
BSD_ECHRNG,
BSD_EL2NSYNC,
BSD_EL3HLT,
BSD_EL3RST,
BSD_NRNG,
BSD_EUNATCH,
BSD_ENOCSI,
BSD_EL2HLT,
BSD_EBADE,
BSD_EBADR,
BSD_EXFULL,
BSD_ENOANO,
BSD_EBADRQC,
BSD_EBADSLT,
BSD_EDEADLOCK,
BSD_EBFONT,
BSD_ELIBEXEC,
BSD_ENODATA,
BSD_ELIBBAD,
BSD_ENOPKG,
BSD_ELIBACC,
BSD_ENOTUNIQ,
BSD_ERESTART,
BSD_EUCLEAN,
BSD_ENOTNAM,
BSD_ENAVAIL,
BSD_EISNAM,
BSD_EREMOTEIO,
BSD_EILSEQ,
BSD_ELIBMAX,
BSD_ELIBSCN,
};
...@@ -584,30 +584,3 @@ static int __init of_debug(char *str) ...@@ -584,30 +584,3 @@ static int __init of_debug(char *str)
} }
__setup("of_debug=", of_debug); __setup("of_debug=", of_debug);
struct of_device* of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent,
struct bus_type *bus)
{
struct of_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->dev.parent = parent;
dev->dev.bus = bus;
dev->dev.release = of_release_dev;
strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
if (of_device_register(dev) != 0) {
kfree(dev);
return NULL;
}
return dev;
}
EXPORT_SYMBOL(of_platform_device_create);
...@@ -89,6 +89,10 @@ SECTIONS ...@@ -89,6 +89,10 @@ SECTIONS
.data.cacheline_aligned : { .data.cacheline_aligned : {
*(.data.cacheline_aligned) *(.data.cacheline_aligned)
} }
. = ALIGN(32);
.data.read_mostly : {
*(.data.read_mostly)
}
__bss_start = .; __bss_start = .;
.sbss : { .sbss : {
......
...@@ -41,6 +41,10 @@ config MMU ...@@ -41,6 +41,10 @@ config MMU
bool bool
default y default y
config IOMMU_HELPER
bool
default y
config QUICKLIST config QUICKLIST
bool bool
default y default y
......
/* iommu.c: Generic sparc64 IOMMU support. /* iommu.c: Generic sparc64 IOMMU support.
* *
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/ */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/iommu-helper.h>
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
#include <linux/pci.h> #include <linux/pci.h>
...@@ -41,7 +42,7 @@ ...@@ -41,7 +42,7 @@
"i" (ASI_PHYS_BYPASS_EC_E)) "i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */ /* Must be invoked under the IOMMU lock. */
static void __iommu_flushall(struct iommu *iommu) static void iommu_flushall(struct iommu *iommu)
{ {
if (iommu->iommu_flushinv) { if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0); iommu_write(iommu->iommu_flushinv, ~(u64)0);
...@@ -83,54 +84,91 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) ...@@ -83,54 +84,91 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val; iopte_val(*iopte) = val;
} }
/* Based largely upon the ppc64 iommu allocator. */ /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
static long arena_alloc(struct iommu *iommu, unsigned long npages) * facility it must all be done in one pass while under the iommu lock.
*
* On sun4u platforms, we only flush the IOMMU once every time we've passed
* over the entire page table doing allocations. Therefore we only ever advance
* the hint and cannot backtrack it.
*/
unsigned long iommu_range_alloc(struct device *dev,
struct iommu *iommu,
unsigned long npages,
unsigned long *handle)
{ {
unsigned long n, end, start, limit, boundary_size;
struct iommu_arena *arena = &iommu->arena; struct iommu_arena *arena = &iommu->arena;
unsigned long n, i, start, end, limit; int pass = 0;
int pass;
limit = arena->limit; /* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
if (handle && *handle)
start = *handle;
else
start = arena->hint; start = arena->hint;
pass = 0;
again: limit = arena->limit;
n = find_next_zero_bit(arena->map, limit, start);
end = n + npages; /* The case below can happen if we have a small segment appended
if (unlikely(end >= limit)) { * to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the beginning and flush.
*/
if (start >= limit) {
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
}
again:
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << IO_PAGE_SHIFT);
else
boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
n = iommu_area_alloc(arena->map, limit, start, npages, 0,
boundary_size >> IO_PAGE_SHIFT, 0);
if (n == -1) {
if (likely(pass < 1)) { if (likely(pass < 1)) {
limit = start; /* First failure, rescan from the beginning. */
start = 0; start = 0;
__iommu_flushall(iommu); if (iommu->flush_all)
iommu->flush_all(iommu);
pass++; pass++;
goto again; goto again;
} else { } else {
/* Scanned the whole thing, give up. */ /* Second failure, give up */
return -1; return DMA_ERROR_CODE;
}
}
for (i = n; i < end; i++) {
if (test_bit(i, arena->map)) {
start = i + 1;
goto again;
} }
} }
for (i = n; i < end; i++) end = n + npages;
__set_bit(i, arena->map);
arena->hint = end; arena->hint = end;
/* Update handle for SG allocations */
if (handle)
*handle = end;
return n; return n;
} }
static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
{ {
unsigned long i; struct iommu_arena *arena = &iommu->arena;
unsigned long entry;
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
for (i = base; i < (base + npages); i++) iommu_area_free(arena->map, entry, npages);
__clear_bit(i, arena->map);
} }
int iommu_table_init(struct iommu *iommu, int tsbsize, int iommu_table_init(struct iommu *iommu, int tsbsize,
...@@ -156,6 +194,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, ...@@ -156,6 +194,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
} }
iommu->arena.limit = num_tsb_entries; iommu->arena.limit = num_tsb_entries;
if (tlb_type != hypervisor)
iommu->flush_all = iommu_flushall;
/* Allocate and initialize the dummy page which we /* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to. * set inactive IO PTEs to point to.
*/ */
...@@ -192,22 +233,18 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, ...@@ -192,22 +233,18 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
return -ENOMEM; return -ENOMEM;
} }
static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
unsigned long npages)
{ {
long entry; unsigned long entry;
entry = arena_alloc(iommu, npages); entry = iommu_range_alloc(dev, iommu, npages, NULL);
if (unlikely(entry < 0)) if (unlikely(entry == DMA_ERROR_CODE))
return NULL; return NULL;
return iommu->page_table + entry; return iommu->page_table + entry;
} }
static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
{
arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
}
static int iommu_alloc_ctx(struct iommu *iommu) static int iommu_alloc_ctx(struct iommu *iommu)
{ {
int lowest = iommu->ctx_lowest_free; int lowest = iommu->ctx_lowest_free;
...@@ -258,7 +295,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, ...@@ -258,7 +295,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) { if (unlikely(iopte == NULL)) {
...@@ -296,7 +333,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, ...@@ -296,7 +333,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
free_npages(iommu, dvma - iommu->page_table_map_base, npages); iommu_range_free(iommu, dvma, npages);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
...@@ -327,7 +364,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, ...@@ -327,7 +364,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
npages >>= IO_PAGE_SHIFT; npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages); base = alloc_npages(dev, iommu, npages);
ctx = 0; ctx = 0;
if (iommu->iommu_ctxflush) if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu); ctx = iommu_alloc_ctx(iommu);
...@@ -465,7 +502,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, ...@@ -465,7 +502,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
for (i = 0; i < npages; i++) for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i); iopte_make_dummy(iommu, base + i);
free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); iommu_range_free(iommu, bus_addr, npages);
iommu_free_ctx(iommu, ctx); iommu_free_ctx(iommu, ctx);
...@@ -475,124 +512,209 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, ...@@ -475,124 +512,209 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, ctx, i, npages, iopte_protection; struct scatterlist *s, *outs, *segstart;
struct scatterlist *sg; unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
int outcount, incount, i;
struct strbuf *strbuf; struct strbuf *strbuf;
struct iommu *iommu; struct iommu *iommu;
iopte_t *base;
u32 dma_base; BUG_ON(direction == DMA_NONE);
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
dma_4u_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
sglist->dma_length = sglist->length;
return 1;
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc; strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
if (unlikely(direction == DMA_NONE)) return 0;
goto bad_no_ctx;
npages = calc_npages(sglist, nelems);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
ctx = 0; ctx = 0;
if (iommu->iommu_ctxflush) if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu); ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (base == NULL)
goto bad;
dma_base = iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT);
if (strbuf->strbuf_enabled) if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx); prot = IOPTE_STREAMING(ctx);
else else
iopte_protection = IOPTE_CONSISTENT(ctx); prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE) if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE; prot |= IOPTE_WRITE;
for_each_sg(sglist, sg, nelems, i) { outs = s = segstart = &sglist[0];
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); outcount = 1;
unsigned long slen = sg->length; incount = nelems;
unsigned long this_npages; handle = 0;
this_npages = iommu_num_pages(paddr, slen); /* Init first segment length for backout at failure */
outs->dma_length = 0;
sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); max_seg_size = dma_get_max_seg_size(dev);
sg->dma_length = slen; for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, slen;
iopte_t *base;
paddr &= IO_PAGE_MASK; slen = s->length;
while (this_npages--) { /* Sanity check */
iopte_val(*base) = iopte_protection | paddr; if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
iopte_val(*base) = prot | paddr;
base++; base++;
paddr += IO_PAGE_SIZE; paddr += IO_PAGE_SIZE;
dma_base += IO_PAGE_SIZE; }
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
} }
} }
return nelems; if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, i;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return 0; return 0;
} }
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, ctx, i, npages; unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf; struct strbuf *strbuf;
struct iommu *iommu; struct iommu *iommu;
iopte_t *base;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) { BUG_ON(direction == DMA_NONE);
if (printk_ratelimit())
WARN_ON(1);
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc; strbuf = dev->archdata.stc;
bus_addr = sglist->dma_address & IO_PAGE_MASK; ctx = fetch_sg_ctx(iommu, sglist);
npages = calc_npages(sglist, nelems); spin_lock_irqsave(&iommu->lock, flags);
base = iommu->page_table + sg = sglist;
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
spin_lock_irqsave(&iommu->lock, flags); if (!len)
break;
npages = iommu_num_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
/* Record the context, if any. */ entry = ((dma_handle - iommu->page_table_map_base)
ctx = 0; >> IO_PAGE_SHIFT);
if (iommu->iommu_ctxflush) base = iommu->page_table + entry;
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */ dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled) if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
/* Step 2: Clear out the TSB entries. */
for (i = 0; i < npages; i++) for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i); iopte_make_dummy(iommu, base + i);
free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx); iommu_free_ctx(iommu, ctx);
......
/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $ /* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
* *
* Copyright (C) 1999 David S. Miller (davem@redhat.com) * Copyright (C) 1999, 2008 David S. Miller (davem@davemloft.net)
*/ */
#ifndef _IOMMU_COMMON_H
#define _IOMMU_COMMON_H
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -56,21 +58,12 @@ static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems) ...@@ -56,21 +58,12 @@ static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
return npages; return npages;
} }
/* You are _strongly_ advised to enable the following debugging code extern unsigned long iommu_range_alloc(struct device *dev,
* any time you make changes to the sg code below, run it for a while struct iommu *iommu,
* with filesystems mounted read-only before buying the farm... -DaveM unsigned long npages,
*/ unsigned long *handle);
#undef VERIFY_SG extern void iommu_range_free(struct iommu *iommu,
dma_addr_t dma_addr,
#ifdef VERIFY_SG unsigned long npages);
extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages);
#endif
/* Two addresses are "virtually contiguous" if and only if:
* 1) They are equal, or...
* 2) They are both on a page boundary
*/
#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
(((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
extern unsigned long prepare_sg(struct device *dev, struct scatterlist *sg, int nents); #endif /* _IOMMU_COMMON_H */
...@@ -480,8 +480,117 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -480,8 +480,117 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0; return 0;
} }
/* architecture specific initialization */ /* Called with kretprobe_lock held. The value stored in the return
int arch_init_kprobes(void) * address register is actually 2 instructions before where the
* callee will return to. Sequences usually look something like this
*
* call some_function <--- return register points here
* nop <--- call delay slot
* whatever <--- where callee returns to
*
* To keep trampoline_probe_handler logic simpler, we normalize the
* value kept in ri->ret_addr so we don't need to keep adjusting it
* back and forth.
*/
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
/* Replace the return addr with trampoline addr */
regs->u_regs[UREG_RETPC] =
((unsigned long)kretprobe_trampoline) - 8;
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
reset_current_kprobe();
spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
}
void kretprobe_trampoline_holder(void)
{
asm volatile(".global kretprobe_trampoline\n"
"kretprobe_trampoline:\n"
"\tnop\n"
"\tnop\n");
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{ {
return register_kprobe(&trampoline_p);
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
return 1;
return 0; return 0;
} }
...@@ -868,29 +868,3 @@ static int __init of_debug(char *str) ...@@ -868,29 +868,3 @@ static int __init of_debug(char *str)
} }
__setup("of_debug=", of_debug); __setup("of_debug=", of_debug);
struct of_device* of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent,
struct bus_type *bus)
{
struct of_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->dev.parent = parent;
dev->dev.bus = bus;
dev->dev.release = of_release_dev;
strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
if (of_device_register(dev) != 0) {
kfree(dev);
return NULL;
}
return dev;
}
EXPORT_SYMBOL(of_platform_device_create);
/* pci_sun4v.c: SUN4V specific PCI controller support. /* pci_sun4v.c: SUN4V specific PCI controller support.
* *
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p) ...@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p)
return 0; return 0;
} }
static inline void iommu_batch_new_entry(unsigned long entry)
{
struct iommu_batch *p = &__get_cpu_var(iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
iommu_batch_flush(p);
p->entry = entry;
}
/* Interrupts must be disabled. */ /* Interrupts must be disabled. */
static inline long iommu_batch_add(u64 phys_page) static inline long iommu_batch_add(u64 phys_page)
{ {
...@@ -113,54 +124,6 @@ static inline long iommu_batch_end(void) ...@@ -113,54 +124,6 @@ static inline long iommu_batch_end(void)
return iommu_batch_flush(p); return iommu_batch_flush(p);
} }
static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
{
unsigned long n, i, start, end, limit;
int pass;
limit = arena->limit;
start = arena->hint;
pass = 0;
again:
n = find_next_zero_bit(arena->map, limit, start);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
limit = start;
start = 0;
pass++;
goto again;
} else {
/* Scanned the whole thing, give up. */
return -1;
}
}
for (i = n; i < end; i++) {
if (test_bit(i, arena->map)) {
start = i + 1;
goto again;
}
}
for (i = n; i < end; i++)
__set_bit(i, arena->map);
arena->hint = end;
return n;
}
static void arena_free(struct iommu_arena *arena, unsigned long base,
unsigned long npages)
{
unsigned long i;
for (i = base; i < (base + npages); i++)
__clear_bit(i, arena->map);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size, static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp) dma_addr_t *dma_addrp, gfp_t gfp)
{ {
...@@ -185,11 +148,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, ...@@ -185,11 +148,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
entry = arena_alloc(&iommu->arena, npages); entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L)) if (unlikely(entry == DMA_ERROR_CODE))
goto arena_alloc_fail; goto range_alloc_fail;
*dma_addrp = (iommu->page_table_map_base + *dma_addrp = (iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT)); (entry << IO_PAGE_SHIFT));
...@@ -219,10 +182,10 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, ...@@ -219,10 +182,10 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu_map_fail: iommu_map_fail:
/* Interrupts are disabled. */ /* Interrupts are disabled. */
spin_lock(&iommu->lock); spin_lock(&iommu->lock);
arena_free(&iommu->arena, entry, npages); iommu_range_free(iommu, *dma_addrp, npages);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
arena_alloc_fail: range_alloc_fail:
free_pages(first_page, order); free_pages(first_page, order);
return NULL; return NULL;
} }
...@@ -243,7 +206,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, ...@@ -243,7 +206,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
arena_free(&iommu->arena, entry, npages); iommu_range_free(iommu, dvma, npages);
do { do {
unsigned long num; unsigned long num;
...@@ -281,10 +244,10 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, ...@@ -281,10 +244,10 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
npages >>= IO_PAGE_SHIFT; npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
entry = arena_alloc(&iommu->arena, npages); entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L)) if (unlikely(entry == DMA_ERROR_CODE))
goto bad; goto bad;
bus_addr = (iommu->page_table_map_base + bus_addr = (iommu->page_table_map_base +
...@@ -319,7 +282,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, ...@@ -319,7 +282,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
iommu_map_fail: iommu_map_fail:
/* Interrupts are disabled. */ /* Interrupts are disabled. */
spin_lock(&iommu->lock); spin_lock(&iommu->lock);
arena_free(&iommu->arena, entry, npages); iommu_range_free(iommu, bus_addr, npages);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
...@@ -350,9 +313,9 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, ...@@ -350,9 +313,9 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; iommu_range_free(iommu, bus_addr, npages);
arena_free(&iommu->arena, entry, npages);
entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
do { do {
unsigned long num; unsigned long num;
...@@ -368,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, ...@@ -368,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, npages, i, prot; struct scatterlist *s, *outs, *segstart;
struct scatterlist *sg; unsigned long flags, handle, prot;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
int outcount, incount, i;
struct iommu *iommu; struct iommu *iommu;
long entry, err; long err;
u32 dma_base;
BUG_ON(direction == DMA_NONE);
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
dma_4v_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
sglist->dma_length = sglist->length;
return 1;
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
if (nelems == 0 || !iommu)
return 0;
if (unlikely(direction == DMA_NONE)) prot = HV_PCI_MAP_ATTR_READ;
goto bad; if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
npages = calc_npages(sglist, nelems); /* Init first segment length for backout at failure */
outs->dma_length = 0;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L)) iommu_batch_start(dev, prot, ~0UL);
goto bad;
dma_base = iommu->page_table_map_base + max_seg_size = dma_get_max_seg_size(dev);
(entry << IO_PAGE_SHIFT); for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, slen;
prot = HV_PCI_MAP_ATTR_READ; slen = s->length;
if (direction != DMA_TO_DEVICE) /* Sanity check */
prot |= HV_PCI_MAP_ATTR_WRITE; if (slen == 0) {
dma_next = 0;
local_irq_save(flags); continue;
}
iommu_batch_start(dev, prot, entry); /* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
for_each_sg(sglist, sg, nelems, i) { /* Handle failure */
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); if (unlikely(entry == DMA_ERROR_CODE)) {
unsigned long slen = sg->length; if (printk_ratelimit())
unsigned long this_npages; printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
this_npages = iommu_num_pages(paddr, slen); iommu_batch_new_entry(entry);
sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); /* Convert entry to a dma_addr_t */
sg->dma_length = slen; dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK; paddr &= IO_PAGE_MASK;
while (this_npages--) { while (npages--) {
err = iommu_batch_add(paddr); err = iommu_batch_add(paddr);
if (unlikely(err < 0L)) { if (unlikely(err < 0L))
local_irq_restore(flags);
goto iommu_map_failed; goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
} }
paddr += IO_PAGE_SIZE; /* If we are in an open segment, try merging */
dma_base += IO_PAGE_SIZE; if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
} }
} }
err = iommu_batch_end(); if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
}
local_irq_restore(flags); /* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
err = iommu_batch_end();
if (unlikely(err < 0L)) if (unlikely(err < 0L))
goto iommu_map_failed; goto iommu_map_failed;
return nelems; spin_unlock_irqrestore(&iommu->lock, flags);
bad: if (outcount < incount) {
if (printk_ratelimit()) outs = sg_next(outs);
WARN_ON(1); outs->dma_address = DMA_ERROR_CODE;
return 0; outs->dma_length = 0;
}
return outcount;
iommu_map_failed: iommu_map_failed:
spin_lock_irqsave(&iommu->lock, flags); for_each_sg(sglist, s, nelems, i) {
arena_free(&iommu->arena, entry, npages); if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
return 0; return 0;
...@@ -458,39 +464,43 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -458,39 +464,43 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, npages;
struct pci_pbm_info *pbm; struct pci_pbm_info *pbm;
u32 devhandle, bus_addr; struct scatterlist *sg;
struct iommu *iommu; struct iommu *iommu;
long entry; unsigned long flags;
u32 devhandle;
if (unlikely(direction == DMA_NONE)) { BUG_ON(direction == DMA_NONE);
if (printk_ratelimit())
WARN_ON(1);
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller; pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle; devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
npages = calc_npages(sglist, nelems);
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
arena_free(&iommu->arena, entry, npages); sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
do { if (!len)
break;
npages = iommu_num_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
while (npages) {
unsigned long num; unsigned long num;
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
npages); npages);
entry += num; entry += num;
npages -= num; npages -= num;
} while (npages != 0); }
sg = sg_next(sg);
}
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
......
/* $Id: a.out.h,v 1.13 2000/01/09 10:46:53 anton Exp $ */
#ifndef __SPARC_A_OUT_H__ #ifndef __SPARC_A_OUT_H__
#define __SPARC_A_OUT_H__ #define __SPARC_A_OUT_H__
#define SPARC_PGSIZE 0x2000 /* Thanks to the sun4 architecture... */ #define SPARC_PGSIZE 0x2000 /* Thanks to the sun4 architecture... */
#define SEGMENT_SIZE SPARC_PGSIZE /* whee... */ #define SEGMENT_SIZE SPARC_PGSIZE /* whee... */
#ifndef __ASSEMBLY__
struct exec { struct exec {
unsigned char a_dynamic:1; /* A __DYNAMIC is in this image */ unsigned char a_dynamic:1; /* A __DYNAMIC is in this image */
unsigned char a_toolversion:7; unsigned char a_toolversion:7;
unsigned char a_machtype; unsigned char a_machtype;
unsigned short a_info; unsigned short a_info;
unsigned long a_text; /* length of text, in bytes */ unsigned int a_text; /* length of text, in bytes */
unsigned long a_data; /* length of data, in bytes */ unsigned int a_data; /* length of data, in bytes */
unsigned long a_bss; /* length of bss, in bytes */ unsigned int a_bss; /* length of bss, in bytes */
unsigned long a_syms; /* length of symbol table, in bytes */ unsigned int a_syms; /* length of symbol table, in bytes */
unsigned long a_entry; /* where program begins */ unsigned int a_entry; /* where program begins */
unsigned long a_trsize; unsigned int a_trsize;
unsigned long a_drsize; unsigned int a_drsize;
}; };
#endif /* !__ASSEMBLY__ */
/* Where in the file does the text information begin? */ /* Where in the file does the text information begin? */
#define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? 0 : sizeof (struct exec)) #define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? 0 : sizeof (struct exec))
...@@ -28,19 +31,21 @@ struct exec { ...@@ -28,19 +31,21 @@ struct exec {
(x).a_drsize) (x).a_drsize)
/* Where does text segment go in memory after being loaded? */ /* Where does text segment go in memory after being loaded? */
#define N_TXTADDR(x) (((N_MAGIC(x) == ZMAGIC) && \ #define N_TXTADDR(x) (unsigned long)(((N_MAGIC(x) == ZMAGIC) && \
((x).a_entry < SPARC_PGSIZE)) ? \ ((x).a_entry < SPARC_PGSIZE)) ? \
0 : SPARC_PGSIZE) 0 : SPARC_PGSIZE)
/* And same for the data segment.. */ /* And same for the data segment.. */
#define N_DATADDR(x) (N_MAGIC(x)==OMAGIC ? \ #define N_DATADDR(x) (N_MAGIC(x)==OMAGIC ? \
(N_TXTADDR(x) + (x).a_text) \ (N_TXTADDR(x) + (x).a_text) \
: (_N_SEGMENT_ROUND (_N_TXTENDADDR(x)))) : (unsigned long) (_N_SEGMENT_ROUND (_N_TXTENDADDR(x))))
#define N_TRSIZE(a) ((a).a_trsize) #define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize) #define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms) #define N_SYMSIZE(a) ((a).a_syms)
#ifndef __ASSEMBLY__
/* /*
* Sparc relocation types * Sparc relocation types
*/ */
...@@ -77,14 +82,16 @@ enum reloc_type ...@@ -77,14 +82,16 @@ enum reloc_type
*/ */
struct relocation_info /* used when header.a_machtype == M_SPARC */ struct relocation_info /* used when header.a_machtype == M_SPARC */
{ {
unsigned long r_address; /* relocation addr */ unsigned int r_address; /* relocation addr */
unsigned int r_index:24; /* segment index or symbol index */ unsigned int r_index:24; /* segment index or symbol index */
unsigned int r_extern:1; /* if F, r_index==SEG#; if T, SYM idx */ unsigned int r_extern:1; /* if F, r_index==SEG#; if T, SYM idx */
unsigned int r_pad:2; /* <unused> */ unsigned int r_pad:2; /* <unused> */
enum reloc_type r_type:5; /* type of relocation to perform */ enum reloc_type r_type:5; /* type of relocation to perform */
long r_addend; /* addend for relocation value */ int r_addend; /* addend for relocation value */
}; };
#define N_RELOCATION_INFO_DECLARED 1 #define N_RELOCATION_INFO_DECLARED 1
#endif /* !(__ASSEMBLY__) */
#endif /* __SPARC_A_OUT_H__ */ #endif /* __SPARC_A_OUT_H__ */
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* with compliant or compatible devices. It will use whatever features * with compliant or compatible devices. It will use whatever features
* the device supports, prefering those that are typically faster. * the device supports, prefering those that are typically faster.
* *
* When the device is opened, it is left in COMPATABILITY mode, and * When the device is opened, it is left in COMPATIBILITY mode, and
* writes work like any printer device. The driver only attempt to * writes work like any printer device. The driver only attempt to
* negotiate 1284 modes when needed so that plugs can be pulled, * negotiate 1284 modes when needed so that plugs can be pulled,
* switch boxes switched, etc., without disrupting things. It will * switch boxes switched, etc., without disrupting things. It will
......
/* $Id: bsderrno.h,v 1.3 1996/04/25 06:12:47 davem Exp $
* bsderrno.h: Error numbers for NetBSD binary compatibility
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_BSDERRNO_H
#define _SPARC_BSDERRNO_H
#define BSD_EPERM 1 /* Operation not permitted */
#define BSD_ENOENT 2 /* No such file or directory */
#define BSD_ESRCH 3 /* No such process */
#define BSD_EINTR 4 /* Interrupted system call */
#define BSD_EIO 5 /* Input/output error */
#define BSD_ENXIO 6 /* Device not configured */
#define BSD_E2BIG 7 /* Argument list too long */
#define BSD_ENOEXEC 8 /* Exec format error */
#define BSD_EBADF 9 /* Bad file descriptor */
#define BSD_ECHILD 10 /* No child processes */
#define BSD_EDEADLK 11 /* Resource deadlock avoided */
#define BSD_ENOMEM 12 /* Cannot allocate memory */
#define BSD_EACCES 13 /* Permission denied */
#define BSD_EFAULT 14 /* Bad address */
#define BSD_ENOTBLK 15 /* Block device required */
#define BSD_EBUSY 16 /* Device busy */
#define BSD_EEXIST 17 /* File exists */
#define BSD_EXDEV 18 /* Cross-device link */
#define BSD_ENODEV 19 /* Operation not supported by device */
#define BSD_ENOTDIR 20 /* Not a directory */
#define BSD_EISDIR 21 /* Is a directory */
#define BSD_EINVAL 22 /* Invalid argument */
#define BSD_ENFILE 23 /* Too many open files in system */
#define BSD_EMFILE 24 /* Too many open files */
#define BSD_ENOTTY 25 /* Inappropriate ioctl for device */
#define BSD_ETXTBSY 26 /* Text file busy */
#define BSD_EFBIG 27 /* File too large */
#define BSD_ENOSPC 28 /* No space left on device */
#define BSD_ESPIPE 29 /* Illegal seek */
#define BSD_EROFS 30 /* Read-only file system */
#define BSD_EMLINK 31 /* Too many links */
#define BSD_EPIPE 32 /* Broken pipe */
#define BSD_EDOM 33 /* Numerical argument out of domain */
#define BSD_ERANGE 34 /* Result too large */
#define BSD_EAGAIN 35 /* Resource temporarily unavailable */
#define BSD_EWOULDBLOCK EAGAIN /* Operation would block */
#define BSD_EINPROGRESS 36 /* Operation now in progress */
#define BSD_EALREADY 37 /* Operation already in progress */
#define BSD_ENOTSOCK 38 /* Socket operation on non-socket */
#define BSD_EDESTADDRREQ 39 /* Destination address required */
#define BSD_EMSGSIZE 40 /* Message too long */
#define BSD_EPROTOTYPE 41 /* Protocol wrong type for socket */
#define BSD_ENOPROTOOPT 42 /* Protocol not available */
#define BSD_EPROTONOSUPPORT 43 /* Protocol not supported */
#define BSD_ESOCKTNOSUPPORT 44 /* Socket type not supported */
#define BSD_EOPNOTSUPP 45 /* Operation not supported */
#define BSD_EPFNOSUPPORT 46 /* Protocol family not supported */
#define BSD_EAFNOSUPPORT 47 /* Address family not supported by protocol family */
#define BSD_EADDRINUSE 48 /* Address already in use */
#define BSD_EADDRNOTAVAIL 49 /* Can't assign requested address */
#define BSD_ENETDOWN 50 /* Network is down */
#define BSD_ENETUNREACH 51 /* Network is unreachable */
#define BSD_ENETRESET 52 /* Network dropped connection on reset */
#define BSD_ECONNABORTED 53 /* Software caused connection abort */
#define BSD_ECONNRESET 54 /* Connection reset by peer */
#define BSD_ENOBUFS 55 /* No buffer space available */
#define BSD_EISCONN 56 /* Socket is already connected */
#define BSD_ENOTCONN 57 /* Socket is not connected */
#define BSD_ESHUTDOWN 58 /* Can't send after socket shutdown */
#define BSD_ETOOMANYREFS 59 /* Too many references: can't splice */
#define BSD_ETIMEDOUT 60 /* Operation timed out */
#define BSD_ECONNREFUSED 61 /* Connection refused */
#define BSD_ELOOP 62 /* Too many levels of symbolic links */
#define BSD_ENAMETOOLONG 63 /* File name too long */
#define BSD_EHOSTDOWN 64 /* Host is down */
#define BSD_EHOSTUNREACH 65 /* No route to host */
#define BSD_ENOTEMPTY 66 /* Directory not empty */
#define BSD_EPROCLIM 67 /* Too many processes */
#define BSD_EUSERS 68 /* Too many users */
#define BSD_EDQUOT 69 /* Disc quota exceeded */
#define BSD_ESTALE 70 /* Stale NFS file handle */
#define BSD_EREMOTE 71 /* Too many levels of remote in path */
#define BSD_EBADRPC 72 /* RPC struct is bad */
#define BSD_ERPCMISMATCH 73 /* RPC version wrong */
#define BSD_EPROGUNAVAIL 74 /* RPC prog. not avail */
#define BSD_EPROGMISMATCH 75 /* Program version wrong */
#define BSD_EPROCUNAVAIL 76 /* Bad procedure for program */
#define BSD_ENOLCK 77 /* No locks available */
#define BSD_ENOSYS 78 /* Function not implemented */
#define BSD_EFTYPE 79 /* Inappropriate file type or format */
#define BSD_EAUTH 80 /* Authentication error */
#define BSD_ENEEDAUTH 81 /* Need authenticator */
#define BSD_ELAST 81 /* Must be equal largest errno */
#endif /* !(_SPARC_BSDERRNO_H) */
...@@ -2,28 +2,16 @@ ...@@ -2,28 +2,16 @@
#define _SPARC_BUG_H #define _SPARC_BUG_H
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
/* Only use the inline asm until a gcc release that can handle __builtin_trap #include <linux/compiler.h>
* -rob 2003-06-25
*
* gcc-3.3.1 and later will be OK -DaveM
*/
#if (__GNUC__ > 3) || \
(__GNUC__ == 3 && __GNUC_MINOR__ > 3) || \
(__GNUC__ == 3 && __GNUC_MINOR__ == 3 && __GNUC_PATCHLEVEL__ >= 4)
#define __bug_trap() __builtin_trap()
#else
#define __bug_trap() \
__asm__ __volatile__ ("t 0x5\n\t" : : )
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
extern void do_BUG(const char *file, int line); extern void do_BUG(const char *file, int line);
#define BUG() do { \ #define BUG() do { \
do_BUG(__FILE__, __LINE__); \ do_BUG(__FILE__, __LINE__); \
__bug_trap(); \ __builtin_trap(); \
} while (0) } while (0)
#else #else
#define BUG() __bug_trap() #define BUG() __builtin_trap()
#endif #endif
#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG
......
/* $Id: bugs.h,v 1.1 1996/12/26 13:25:20 davem Exp $ /* include/asm-sparc/bugs.h: Sparc probes for various bugs.
* include/asm-sparc/bugs.h: Sparc probes for various bugs.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/ */
#ifdef CONFIG_SPARC32
#include <asm/cpudata.h> #include <asm/cpudata.h>
#endif
#ifdef CONFIG_SPARC64
#include <asm/sstate.h>
#endif
extern unsigned long loops_per_jiffy; extern unsigned long loops_per_jiffy;
static void check_bugs(void) static void __init check_bugs(void)
{ {
#ifndef CONFIG_SMP #if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
cpu_data(0).udelay_val = loops_per_jiffy; cpu_data(0).udelay_val = loops_per_jiffy;
#endif #endif
#ifdef CONFIG_SPARC64
sstate_running();
#endif
} }
/* $Id: byteorder.h,v 1.15 1997/12/16 19:20:44 davem Exp $ */
#ifndef _SPARC_BYTEORDER_H #ifndef _SPARC_BYTEORDER_H
#define _SPARC_BYTEORDER_H #define _SPARC_BYTEORDER_H
#include <asm/types.h> #include <asm/types.h>
#include <asm/asi.h>
#ifdef __GNUC__
#ifdef CONFIG_SPARC32
#define __SWAB_64_THRU_32__
#endif
#ifdef CONFIG_SPARC64
static inline __u16 ___arch__swab16p(const __u16 *addr)
{
__u16 ret;
__asm__ __volatile__ ("lduha [%1] %2, %0"
: "=r" (ret)
: "r" (addr), "i" (ASI_PL));
return ret;
}
static inline __u32 ___arch__swab32p(const __u32 *addr)
{
__u32 ret;
__asm__ __volatile__ ("lduwa [%1] %2, %0"
: "=r" (ret)
: "r" (addr), "i" (ASI_PL));
return ret;
}
static inline __u64 ___arch__swab64p(const __u64 *addr)
{
__u64 ret;
__asm__ __volatile__ ("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (addr), "i" (ASI_PL));
return ret;
}
#define __arch__swab16p(x) ___arch__swab16p(x)
#define __arch__swab32p(x) ___arch__swab32p(x)
#define __arch__swab64p(x) ___arch__swab64p(x)
#endif /* CONFIG_SPARC64 */
#define __BYTEORDER_HAS_U64__
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif #endif
#include <linux/byteorder/big_endian.h> #include <linux/byteorder/big_endian.h>
......
/* $Id: cache.h,v 1.9 1999/08/14 03:51:58 anton Exp $ /* cache.h: Cache specific code for the Sparc. These include flushing
* cache.h: Cache specific code for the Sparc. These include flushing
* and direct tag/data line access. * and direct tag/data line access.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
*/ */
#ifndef _SPARC_CACHE_H #ifndef _SPARC_CACHE_H
#define _SPARC_CACHE_H #define _SPARC_CACHE_H
#include <asm/asi.h>
#define L1_CACHE_SHIFT 5 #define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32 #define L1_CACHE_BYTES 32
#define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))) #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
#define SMP_CACHE_BYTES 32 #ifdef CONFIG_SPARC32
#define SMP_CACHE_BYTES_SHIFT 5
#else
#define SMP_CACHE_BYTES_SHIFT 6
#endif
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#ifdef CONFIG_SPARC32
#include <asm/asi.h>
/* Direct access to the instruction cache is provided through and /* Direct access to the instruction cache is provided through and
* alternate address space. The IDC bit must be off in the ICCR on * alternate address space. The IDC bit must be off in the ICCR on
...@@ -125,5 +133,6 @@ static inline void flush_ei_user(unsigned int addr) ...@@ -125,5 +133,6 @@ static inline void flush_ei_user(unsigned int addr)
"r" (addr), "i" (ASI_M_FLUSH_USER) : "r" (addr), "i" (ASI_M_FLUSH_USER) :
"memory"); "memory");
} }
#endif /* CONFIG_SPARC32 */
#endif /* !(_SPARC_CACHE_H) */ #endif /* !(_SPARC_CACHE_H) */
/* /* include/asm-sparc/current.h
* include/asm-sparc/current.h
* *
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) 2002 Pete Zaitcev (zaitcev@yahoo.com) * Copyright (C) 2002 Pete Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
* *
* Derived from "include/asm-s390/current.h" by * Derived from "include/asm-s390/current.h" by
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com)
* Derived from "include/asm-i386/current.h" * Derived from "include/asm-i386/current.h"
*/ */
#ifndef _ASM_CURRENT_H #ifndef _SPARC_CURRENT_H
#define _ASM_CURRENT_H #define _SPARC_CURRENT_H
/*
* At the sparc64 DaveM keeps current_thread_info in %g4.
* We might want to consider doing the same to shave a few cycles.
*/
#include <linux/thread_info.h> #include <linux/thread_info.h>
struct task_struct; #ifdef CONFIG_SPARC64
register struct task_struct *current asm("g4");
#endif
/* Two stage process (inline + #define) for type-checking. */ #ifdef CONFIG_SPARC32
/* We also obfuscate get_current() to check if anyone used that by mistake. */ /* We might want to consider using %g4 like sparc64 to shave a few cycles.
*
* Two stage process (inline + #define) for type-checking.
* We also obfuscate get_current() to check if anyone used that by mistake.
*/
struct task_struct;
static inline struct task_struct *__get_current(void) static inline struct task_struct *__get_current(void)
{ {
return current_thread_info()->task; return current_thread_info()->task;
} }
#define current __get_current() #define current __get_current()
#endif
#endif /* !(_ASM_CURRENT_H) */ #endif /* !(_SPARC_CURRENT_H) */
...@@ -19,5 +19,3 @@ struct dev_archdata { ...@@ -19,5 +19,3 @@ struct dev_archdata {
}; };
#endif /* _ASM_SPARC_DEVICE_H */ #endif /* _ASM_SPARC_DEVICE_H */
/* $Id: errno.h,v 1.6 1997/04/15 09:03:38 davem Exp $ */
#ifndef _SPARC_ERRNO_H #ifndef _SPARC_ERRNO_H
#define _SPARC_ERRNO_H #define _SPARC_ERRNO_H
......
#ifndef _ASM_FB_H_ #ifndef _SPARC_FB_H_
#define _ASM_FB_H_ #define _SPARC_FB_H_
#include <linux/fb.h> #include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
#include <asm/prom.h> #include <asm/prom.h>
#define fb_pgprotect(...) do {} while (0) static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
#ifdef CONFIG_SPARC64
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
}
static inline int fb_is_primary_device(struct fb_info *info) static inline int fb_is_primary_device(struct fb_info *info)
{ {
...@@ -18,4 +26,4 @@ static inline int fb_is_primary_device(struct fb_info *info) ...@@ -18,4 +26,4 @@ static inline int fb_is_primary_device(struct fb_info *info)
return 0; return 0;
} }
#endif /* _ASM_FB_H_ */ #endif /* _SPARC_FB_H_ */
...@@ -21,9 +21,4 @@ extern struct bus_type sbus_bus_type; ...@@ -21,9 +21,4 @@ extern struct bus_type sbus_bus_type;
#define of_bus_type of_platform_bus_type /* for compatibility */ #define of_bus_type of_platform_bus_type /* for compatibility */
extern struct of_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent,
struct bus_type *bus);
#endif /* _ASM_SPARC_OF_PLATFORM_H */ #endif /* _ASM_SPARC_OF_PLATFORM_H */
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* Copyright (C) 1996-2005 Paul Mackerras. * Copyright (C) 1996-2005 Paul Mackerras.
* *
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
* Updates for SPARC32 by David S. Miller * Updates for SPARC by David S. Miller
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -39,6 +39,7 @@ struct property { ...@@ -39,6 +39,7 @@ struct property {
unsigned int unique_id; unsigned int unique_id;
}; };
struct of_irq_controller;
struct device_node { struct device_node {
const char *name; const char *name;
const char *type; const char *type;
...@@ -58,11 +59,19 @@ struct device_node { ...@@ -58,11 +59,19 @@ struct device_node {
unsigned long _flags; unsigned long _flags;
void *data; void *data;
unsigned int unique_id; unsigned int unique_id;
struct of_irq_controller *irq_trans;
};
struct of_irq_controller {
unsigned int (*irq_build)(struct device_node *, unsigned int, void *);
void *data;
}; };
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
extern struct device_node *of_find_node_by_cpuid(int cpuid);
extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern int of_getintprop_default(struct device_node *np, extern int of_getintprop_default(struct device_node *np,
const char *name, const char *name,
......
/* $Id: a.out.h,v 1.8 2002/02/09 19:49:31 davem Exp $ */ #include <asm-sparc/a.out.h>
#ifndef __SPARC64_A_OUT_H__
#define __SPARC64_A_OUT_H__
#define SPARC_PGSIZE 0x2000 /* Thanks to the sun4 architecture... */
#define SEGMENT_SIZE SPARC_PGSIZE /* whee... */
#ifndef __ASSEMBLY__
struct exec {
unsigned char a_dynamic:1; /* A __DYNAMIC is in this image */
unsigned char a_toolversion:7;
unsigned char a_machtype;
unsigned short a_info;
unsigned int a_text; /* length of text, in bytes */
unsigned int a_data; /* length of data, in bytes */
unsigned int a_bss; /* length of bss, in bytes */
unsigned int a_syms; /* length of symbol table, in bytes */
unsigned int a_entry; /* where program begins */
unsigned int a_trsize;
unsigned int a_drsize;
};
#endif /* !__ASSEMBLY__ */
/* Where in the file does the text information begin? */
#define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? 0 : sizeof (struct exec))
/* Where do the Symbols start? */
#define N_SYMOFF(x) (N_TXTOFF(x) + (x).a_text + \
(x).a_data + (x).a_trsize + \
(x).a_drsize)
/* Where does text segment go in memory after being loaded? */
#define N_TXTADDR(x) (unsigned long)(((N_MAGIC(x) == ZMAGIC) && \
((x).a_entry < SPARC_PGSIZE)) ? \
0 : SPARC_PGSIZE)
/* And same for the data segment.. */
#define N_DATADDR(x) (N_MAGIC(x)==OMAGIC ? \
(N_TXTADDR(x) + (x).a_text) \
: (unsigned long)(_N_SEGMENT_ROUND (_N_TXTENDADDR(x))))
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifndef __ASSEMBLY__
/*
* Sparc relocation types
*/
enum reloc_type
{
RELOC_8,
RELOC_16,
RELOC_32, /* simplest relocs */
RELOC_DISP8,
RELOC_DISP16,
RELOC_DISP32, /* Disp's (pc-rel) */
RELOC_WDISP30,
RELOC_WDISP22, /* SR word disp's */
RELOC_HI22,
RELOC_22, /* SR 22-bit relocs */
RELOC_13,
RELOC_LO10, /* SR 13&10-bit relocs */
RELOC_SFA_BASE,
RELOC_SFA_OFF13, /* SR S.F.A. relocs */
RELOC_BASE10,
RELOC_BASE13,
RELOC_BASE22, /* base_relative pic */
RELOC_PC10,
RELOC_PC22, /* special pc-rel pic */
RELOC_JMP_TBL, /* jmp_tbl_rel in pic */
RELOC_SEGOFF16, /* ShLib offset-in-seg */
RELOC_GLOB_DAT,
RELOC_JMP_SLOT,
RELOC_RELATIVE /* rtld relocs */
};
/*
* Format of a relocation datum.
*/
struct relocation_info /* used when header.a_machtype == M_SPARC */
{
unsigned int r_address; /* relocation addr */
unsigned int r_index:24; /* segment index or symbol index */
unsigned int r_extern:1; /* if F, r_index==SEG#; if T, SYM idx */
unsigned int r_pad:2; /* <unused> */
enum reloc_type r_type:5; /* type of relocation to perform */
int r_addend; /* addend for relocation value */
};
#define N_RELOCATION_INFO_DECLARED 1
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_A_OUT_H__) */
#ifndef __ASM_SPARC64_AUXVEC_H #include <asm-sparc/auxvec.h>
#define __ASM_SPARC64_AUXVEC_H
#endif /* !(__ASM_SPARC64_AUXVEC_H) */
#ifndef _SPARC64_BPP_H #include <asm-sparc/bpp.h>
#define _SPARC64_BPP_H
/*
* Copyright (c) 1995 Picture Elements
* Stephen Williams
* Gus Baldauf
*
* Linux/SPARC port by Peter Zaitcev.
* Integration into SPARC tree by Tom Dyas.
*/
#include <linux/ioctl.h>
/*
* This is a driver that supports IEEE Std 1284-1994 communications
* with compliant or compatible devices. It will use whatever features
* the device supports, prefering those that are typically faster.
*
* When the device is opened, it is left in COMPATIBILITY mode, and
* writes work like any printer device. The driver only attempt to
* negotiate 1284 modes when needed so that plugs can be pulled,
* switch boxes switched, etc., without disrupting things. It will
* also leave the device in compatibility mode when closed.
*/
/*
* This driver also supplies ioctls to manually manipulate the
* pins. This is great for testing devices, or writing code to deal
* with bizzarro-mode of the ACME Special TurboThingy Plus.
*
* NOTE: These ioctl currently do not interact well with
* read/write. Caveat emptor.
*
* PUT_PINS allows us to assign the sense of all the pins, including
* the data pins if being driven by the host. The GET_PINS returns the
* pins that the peripheral drives, including data if appropriate.
*/
# define BPP_PUT_PINS _IOW('B', 1, int)
# define BPP_GET_PINS _IOR('B', 2, char) /* that's bogus - should've been _IO */
# define BPP_PUT_DATA _IOW('B', 3, int)
# define BPP_GET_DATA _IOR('B', 4, char) /* ditto */
/*
* Set the data bus to input mode. Disengage the data bin driver and
* be prepared to read values from the peripheral. If the arg is 0,
* then revert the bus to output mode.
*/
# define BPP_SET_INPUT _IOW('B', 5, int)
/*
* These bits apply to the PUT operation...
*/
# define BPP_PP_nStrobe 0x0001
# define BPP_PP_nAutoFd 0x0002
# define BPP_PP_nInit 0x0004
# define BPP_PP_nSelectIn 0x0008
/*
* These apply to the GET operation, which also reads the current value
* of the previously put values. A bit mask of these will be returned
* as a bit mask in the return code of the ioctl().
*/
# define BPP_GP_nAck 0x0100
# define BPP_GP_Busy 0x0200
# define BPP_GP_PError 0x0400
# define BPP_GP_Select 0x0800
# define BPP_GP_nFault 0x1000
#endif
/* $Id: bsderrno.h,v 1.1 1996/12/26 13:25:21 davem Exp $
* bsderrno.h: Error numbers for NetBSD binary compatibility
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC64_BSDERRNO_H
#define _SPARC64_BSDERRNO_H
#define BSD_EPERM 1 /* Operation not permitted */
#define BSD_ENOENT 2 /* No such file or directory */
#define BSD_ESRCH 3 /* No such process */
#define BSD_EINTR 4 /* Interrupted system call */
#define BSD_EIO 5 /* Input/output error */
#define BSD_ENXIO 6 /* Device not configured */
#define BSD_E2BIG 7 /* Argument list too long */
#define BSD_ENOEXEC 8 /* Exec format error */
#define BSD_EBADF 9 /* Bad file descriptor */
#define BSD_ECHILD 10 /* No child processes */
#define BSD_EDEADLK 11 /* Resource deadlock avoided */
#define BSD_ENOMEM 12 /* Cannot allocate memory */
#define BSD_EACCES 13 /* Permission denied */
#define BSD_EFAULT 14 /* Bad address */
#define BSD_ENOTBLK 15 /* Block device required */
#define BSD_EBUSY 16 /* Device busy */
#define BSD_EEXIST 17 /* File exists */
#define BSD_EXDEV 18 /* Cross-device link */
#define BSD_ENODEV 19 /* Operation not supported by device */
#define BSD_ENOTDIR 20 /* Not a directory */
#define BSD_EISDIR 21 /* Is a directory */
#define BSD_EINVAL 22 /* Invalid argument */
#define BSD_ENFILE 23 /* Too many open files in system */
#define BSD_EMFILE 24 /* Too many open files */
#define BSD_ENOTTY 25 /* Inappropriate ioctl for device */
#define BSD_ETXTBSY 26 /* Text file busy */
#define BSD_EFBIG 27 /* File too large */
#define BSD_ENOSPC 28 /* No space left on device */
#define BSD_ESPIPE 29 /* Illegal seek */
#define BSD_EROFS 30 /* Read-only file system */
#define BSD_EMLINK 31 /* Too many links */
#define BSD_EPIPE 32 /* Broken pipe */
#define BSD_EDOM 33 /* Numerical argument out of domain */
#define BSD_ERANGE 34 /* Result too large */
#define BSD_EAGAIN 35 /* Resource temporarily unavailable */
#define BSD_EWOULDBLOCK EAGAIN /* Operation would block */
#define BSD_EINPROGRESS 36 /* Operation now in progress */
#define BSD_EALREADY 37 /* Operation already in progress */
#define BSD_ENOTSOCK 38 /* Socket operation on non-socket */
#define BSD_EDESTADDRREQ 39 /* Destination address required */
#define BSD_EMSGSIZE 40 /* Message too long */
#define BSD_EPROTOTYPE 41 /* Protocol wrong type for socket */
#define BSD_ENOPROTOOPT 42 /* Protocol not available */
#define BSD_EPROTONOSUPPORT 43 /* Protocol not supported */
#define BSD_ESOCKTNOSUPPORT 44 /* Socket type not supported */
#define BSD_EOPNOTSUPP 45 /* Operation not supported */
#define BSD_EPFNOSUPPORT 46 /* Protocol family not supported */
#define BSD_EAFNOSUPPORT 47 /* Address family not supported by protocol family */
#define BSD_EADDRINUSE 48 /* Address already in use */
#define BSD_EADDRNOTAVAIL 49 /* Can't assign requested address */
#define BSD_ENETDOWN 50 /* Network is down */
#define BSD_ENETUNREACH 51 /* Network is unreachable */
#define BSD_ENETRESET 52 /* Network dropped connection on reset */
#define BSD_ECONNABORTED 53 /* Software caused connection abort */
#define BSD_ECONNRESET 54 /* Connection reset by peer */
#define BSD_ENOBUFS 55 /* No buffer space available */
#define BSD_EISCONN 56 /* Socket is already connected */
#define BSD_ENOTCONN 57 /* Socket is not connected */
#define BSD_ESHUTDOWN 58 /* Can't send after socket shutdown */
#define BSD_ETOOMANYREFS 59 /* Too many references: can't splice */
#define BSD_ETIMEDOUT 60 /* Operation timed out */
#define BSD_ECONNREFUSED 61 /* Connection refused */
#define BSD_ELOOP 62 /* Too many levels of symbolic links */
#define BSD_ENAMETOOLONG 63 /* File name too long */
#define BSD_EHOSTDOWN 64 /* Host is down */
#define BSD_EHOSTUNREACH 65 /* No route to host */
#define BSD_ENOTEMPTY 66 /* Directory not empty */
#define BSD_EPROCLIM 67 /* Too many processes */
#define BSD_EUSERS 68 /* Too many users */
#define BSD_EDQUOT 69 /* Disc quota exceeded */
#define BSD_ESTALE 70 /* Stale NFS file handle */
#define BSD_EREMOTE 71 /* Too many levels of remote in path */
#define BSD_EBADRPC 72 /* RPC struct is bad */
#define BSD_ERPCMISMATCH 73 /* RPC version wrong */
#define BSD_EPROGUNAVAIL 74 /* RPC prog. not avail */
#define BSD_EPROGMISMATCH 75 /* Program version wrong */
#define BSD_EPROCUNAVAIL 76 /* Bad procedure for program */
#define BSD_ENOLCK 77 /* No locks available */
#define BSD_ENOSYS 78 /* Function not implemented */
#define BSD_EFTYPE 79 /* Inappropriate file type or format */
#define BSD_EAUTH 80 /* Authentication error */
#define BSD_ENEEDAUTH 81 /* Need authenticator */
#define BSD_ELAST 81 /* Must be equal largest errno */
#endif /* !(_SPARC64_BSDERRNO_H) */
#ifndef _SPARC64_BUG_H #include <asm-sparc/bug.h>
#define _SPARC64_BUG_H
#ifdef CONFIG_BUG
#include <linux/compiler.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
extern void do_BUG(const char *file, int line);
#define BUG() do { \
do_BUG(__FILE__, __LINE__); \
__builtin_trap(); \
} while (0)
#else
#define BUG() __builtin_trap()
#endif
#define HAVE_ARCH_BUG
#endif
#include <asm-generic/bug.h>
#endif
/* bugs.h: Sparc64 probes for various bugs. #include <asm-sparc/bugs.h>
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/
#include <asm/sstate.h>
static void __init check_bugs(void)
{
sstate_running();
}
#ifndef _SPARC64_BYTEORDER_H #include <asm-sparc/byteorder.h>
#define _SPARC64_BYTEORDER_H
#include <asm/types.h>
#include <asm/asi.h>
#ifdef __GNUC__
static inline __u16 ___arch__swab16p(const __u16 *addr)
{
__u16 ret;
__asm__ __volatile__ ("lduha [%1] %2, %0"
: "=r" (ret)
: "r" (addr), "i" (ASI_PL));
return ret;
}
static inline __u32 ___arch__swab32p(const __u32 *addr)
{
__u32 ret;
__asm__ __volatile__ ("lduwa [%1] %2, %0"
: "=r" (ret)
: "r" (addr), "i" (ASI_PL));
return ret;
}
static inline __u64 ___arch__swab64p(const __u64 *addr)
{
__u64 ret;
__asm__ __volatile__ ("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (addr), "i" (ASI_PL));
return ret;
}
#define __arch__swab16p(x) ___arch__swab16p(x)
#define __arch__swab32p(x) ___arch__swab32p(x)
#define __arch__swab64p(x) ___arch__swab64p(x)
#define __BYTEORDER_HAS_U64__
#endif /* __GNUC__ */
#include <linux/byteorder/big_endian.h>
#endif /* _SPARC64_BYTEORDER_H */
/* #include <asm-sparc/cache.h>
* include/asm-sparc64/cache.h
*/
#ifndef __ARCH_SPARC64_CACHE_H
#define __ARCH_SPARC64_CACHE_H
/* bytes per L1 cache line */
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES_SHIFT 6
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#endif
#ifndef __SPARC64_CPUTIME_H #include <asm-sparc/cputime.h>
#define __SPARC64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SPARC64_CPUTIME_H */
#ifndef _SPARC64_CURRENT_H #include <asm-sparc/current.h>
#define _SPARC64_CURRENT_H
#include <linux/thread_info.h>
register struct task_struct *current asm("g4");
#endif /* !(_SPARC64_CURRENT_H) */
/* #include <asm-sparc/device.h>
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_SPARC64_DEVICE_H
#define _ASM_SPARC64_DEVICE_H
struct device_node;
struct of_device;
struct dev_archdata {
void *iommu;
void *stc;
void *host_controller;
struct device_node *prom_node;
struct of_device *op;
};
#endif /* _ASM_SPARC64_DEVICE_H */
#include <asm-generic/div64.h> #include <asm-sparc/div64.h>
#ifndef _ASM_EMERGENCY_RESTART_H #include <asm-sparc/emergency-restart.h>
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */
/* $Id: errno.h,v 1.2 1997/04/15 12:46:11 jj Exp $ */ #include <asm-sparc/errno.h>
#ifndef _SPARC64_ERRNO_H
#define _SPARC64_ERRNO_H
/* These match the SunOS error numbering scheme. */
#include <asm-generic/errno-base.h>
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define EINPROGRESS 36 /* Operation now in progress */
#define EALREADY 37 /* Operation already in progress */
#define ENOTSOCK 38 /* Socket operation on non-socket */
#define EDESTADDRREQ 39 /* Destination address required */
#define EMSGSIZE 40 /* Message too long */
#define EPROTOTYPE 41 /* Protocol wrong type for socket */
#define ENOPROTOOPT 42 /* Protocol not available */
#define EPROTONOSUPPORT 43 /* Protocol not supported */
#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
#define EOPNOTSUPP 45 /* Op not supported on transport endpoint */
#define EPFNOSUPPORT 46 /* Protocol family not supported */
#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
#define EADDRINUSE 48 /* Address already in use */
#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
#define ENETDOWN 50 /* Network is down */
#define ENETUNREACH 51 /* Network is unreachable */
#define ENETRESET 52 /* Net dropped connection because of reset */
#define ECONNABORTED 53 /* Software caused connection abort */
#define ECONNRESET 54 /* Connection reset by peer */
#define ENOBUFS 55 /* No buffer space available */
#define EISCONN 56 /* Transport endpoint is already connected */
#define ENOTCONN 57 /* Transport endpoint is not connected */
#define ESHUTDOWN 58 /* No send after transport endpoint shutdown */
#define ETOOMANYREFS 59 /* Too many references: cannot splice */
#define ETIMEDOUT 60 /* Connection timed out */
#define ECONNREFUSED 61 /* Connection refused */
#define ELOOP 62 /* Too many symbolic links encountered */
#define ENAMETOOLONG 63 /* File name too long */
#define EHOSTDOWN 64 /* Host is down */
#define EHOSTUNREACH 65 /* No route to host */
#define ENOTEMPTY 66 /* Directory not empty */
#define EPROCLIM 67 /* SUNOS: Too many processes */
#define EUSERS 68 /* Too many users */
#define EDQUOT 69 /* Quota exceeded */
#define ESTALE 70 /* Stale NFS file handle */
#define EREMOTE 71 /* Object is remote */
#define ENOSTR 72 /* Device not a stream */
#define ETIME 73 /* Timer expired */
#define ENOSR 74 /* Out of streams resources */
#define ENOMSG 75 /* No message of desired type */
#define EBADMSG 76 /* Not a data message */
#define EIDRM 77 /* Identifier removed */
#define EDEADLK 78 /* Resource deadlock would occur */
#define ENOLCK 79 /* No record locks available */
#define ENONET 80 /* Machine is not on the network */
#define ERREMOTE 81 /* SunOS: Too many lvls of remote in path */
#define ENOLINK 82 /* Link has been severed */
#define EADV 83 /* Advertise error */
#define ESRMNT 84 /* Srmount error */
#define ECOMM 85 /* Communication error on send */
#define EPROTO 86 /* Protocol error */
#define EMULTIHOP 87 /* Multihop attempted */
#define EDOTDOT 88 /* RFS specific error */
#define EREMCHG 89 /* Remote address changed */
#define ENOSYS 90 /* Function not implemented */
/* The rest have no SunOS equivalent. */
#define ESTRPIPE 91 /* Streams pipe error */
#define EOVERFLOW 92 /* Value too large for defined data type */
#define EBADFD 93 /* File descriptor in bad state */
#define ECHRNG 94 /* Channel number out of range */
#define EL2NSYNC 95 /* Level 2 not synchronized */
#define EL3HLT 96 /* Level 3 halted */
#define EL3RST 97 /* Level 3 reset */
#define ELNRNG 98 /* Link number out of range */
#define EUNATCH 99 /* Protocol driver not attached */
#define ENOCSI 100 /* No CSI structure available */
#define EL2HLT 101 /* Level 2 halted */
#define EBADE 102 /* Invalid exchange */
#define EBADR 103 /* Invalid request descriptor */
#define EXFULL 104 /* Exchange full */
#define ENOANO 105 /* No anode */
#define EBADRQC 106 /* Invalid request code */
#define EBADSLT 107 /* Invalid slot */
#define EDEADLOCK 108 /* File locking deadlock error */
#define EBFONT 109 /* Bad font file format */
#define ELIBEXEC 110 /* Cannot exec a shared library directly */
#define ENODATA 111 /* No data available */
#define ELIBBAD 112 /* Accessing a corrupted shared library */
#define ENOPKG 113 /* Package not installed */
#define ELIBACC 114 /* Can not access a needed shared library */
#define ENOTUNIQ 115 /* Name not unique on network */
#define ERESTART 116 /* Interrupted syscall should be restarted */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EILSEQ 122 /* Illegal byte sequence */
#define ELIBMAX 123 /* Atmpt to link in too many shared libs */
#define ELIBSCN 124 /* .lib section in a.out corrupted */
#define ENOMEDIUM 125 /* No medium found */
#define EMEDIUMTYPE 126 /* Wrong medium type */
#define ECANCELED 127 /* Operation Cancelled */
#define ENOKEY 128 /* Required key not available */
#define EKEYEXPIRED 129 /* Key has expired */
#define EKEYREVOKED 130 /* Key has been revoked */
#define EKEYREJECTED 131 /* Key was rejected by service */
/* for robust mutexes */
#define EOWNERDEAD 132 /* Owner died */
#define ENOTRECOVERABLE 133 /* State not recoverable */
#endif /* !(_SPARC64_ERRNO_H) */
#ifndef _ASM_FB_H_ #include <asm-sparc/fb.h>
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
#include <asm/prom.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
struct device *dev = info->device;
struct device_node *node;
node = dev->archdata.prom_node;
if (node &&
node == of_console_device)
return 1;
return 0;
}
#endif /* _ASM_FB_H_ */
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
/* BIO layer definitions. */ /* BIO layer definitions. */
extern unsigned long kern_base, kern_size; extern unsigned long kern_base, kern_size;
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define BIO_VMERGE_BOUNDARY 0 #define BIO_VMERGE_BOUNDARY 8192
static inline u8 _inb(unsigned long addr) static inline u8 _inb(unsigned long addr)
{ {
......
...@@ -26,6 +26,7 @@ struct iommu_arena { ...@@ -26,6 +26,7 @@ struct iommu_arena {
struct iommu { struct iommu {
spinlock_t lock; spinlock_t lock;
struct iommu_arena arena; struct iommu_arena arena;
void (*flush_all)(struct iommu *);
iopte_t *page_table; iopte_t *page_table;
u32 page_table_map_base; u32 page_table_map_base;
unsigned long iommu_control; unsigned long iommu_control;
......
...@@ -14,11 +14,15 @@ typedef u32 kprobe_opcode_t; ...@@ -14,11 +14,15 @@ typedef u32 kprobe_opcode_t;
#define arch_remove_kprobe(p) do {} while (0) #define arch_remove_kprobe(p) do {} while (0)
#define ARCH_SUPPORTS_KRETPROBES
#define flush_insn_slot(p) \ #define flush_insn_slot(p) \
do { flushi(&(p)->ainsn.insn[0]); \ do { flushi(&(p)->ainsn.insn[0]); \
flushi(&(p)->ainsn.insn[1]); \ flushi(&(p)->ainsn.insn[1]); \
} while (0) } while (0)
void kretprobe_trampoline(void);
/* Architecture specific copy of original instruction*/ /* Architecture specific copy of original instruction*/
struct arch_specific_insn { struct arch_specific_insn {
/* copy of the original instruction */ /* copy of the original instruction */
......
#ifndef _ASM_SPARC64_OF_DEVICE_H #include <asm-sparc/of_device.h>
#define _ASM_SPARC64_OF_DEVICE_H
#ifdef __KERNEL__
#include <linux/device.h>
#include <linux/of.h>
#include <linux/mod_devicetable.h>
#include <asm/openprom.h>
/*
* The of_device is a kind of "base class" that is a superset of
* struct device for use by devices attached to an OF node and
* probed using OF properties.
*/
struct of_device
{
struct device_node *node;
struct device dev;
struct resource resource[PROMREG_MAX];
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
void *sysdata;
int slot;
int portid;
int clock_freq;
};
extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
/* These are just here during the transition */
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif /* __KERNEL__ */
#endif /* _ASM_SPARC64_OF_DEVICE_H */
...@@ -22,9 +22,4 @@ extern struct bus_type sbus_bus_type; ...@@ -22,9 +22,4 @@ extern struct bus_type sbus_bus_type;
#define of_bus_type of_platform_bus_type /* for compatibility */ #define of_bus_type of_platform_bus_type /* for compatibility */
extern struct of_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent,
struct bus_type *bus);
#endif /* _ASM_SPARC64_OF_PLATFORM_H */ #endif /* _ASM_SPARC64_OF_PLATFORM_H */
#ifndef _SPARC64_PROM_H #include <asm-sparc/prom.h>
#define _SPARC64_PROM_H
#ifdef __KERNEL__
/*
* Definitions for talking to the Open Firmware PROM on
* Power Macintosh computers.
*
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
* Updates for SPARC64 by David S. Miller
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <asm/atomic.h>
#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
#define of_compat_cmp(s1, s2, l) strncmp((s1), (s2), (l))
#define of_prop_cmp(s1, s2) strcasecmp((s1), (s2))
#define of_node_cmp(s1, s2) strcmp((s1), (s2))
typedef u32 phandle;
typedef u32 ihandle;
struct property {
char *name;
int length;
void *value;
struct property *next;
unsigned long _flags;
unsigned int unique_id;
};
struct of_irq_controller;
struct device_node {
const char *name;
const char *type;
phandle node;
char *path_component_name;
char *full_name;
struct property *properties;
struct property *deadprops; /* removed properties */
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
struct device_node *next; /* next device of same type */
struct device_node *allnext; /* next in list of all nodes */
struct proc_dir_entry *pde; /* this node's proc directory */
struct kref kref;
unsigned long _flags;
void *data;
unsigned int unique_id;
struct of_irq_controller *irq_trans;
};
struct of_irq_controller {
unsigned int (*irq_build)(struct device_node *, unsigned int, void *);
void *data;
};
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
extern struct device_node *of_find_node_by_cpuid(int cpuid);
extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern int of_getintprop_default(struct device_node *np,
const char *name,
int def);
extern int of_find_in_proplist(const char *list, const char *match, int len);
extern void prom_build_devicetree(void);
/* Dummy ref counting routines - to be implemented later */
static inline struct device_node *of_node_get(struct device_node *node)
{
return node;
}
static inline void of_node_put(struct device_node *node)
{
}
/*
* NB: This is here while we transition from using asm/prom.h
* to linux/of.h
*/
#include <linux/of.h>
extern struct device_node *of_console_device;
extern char *of_console_path;
extern char *of_console_options;
#endif /* __KERNEL__ */
#endif /* _SPARC64_PROM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment