Commit 5de1ccbe authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: (41 commits)
  m68knommu: improve compile arch switch settings
  m68knommu: fix 5407 ColdFire UART vector setup
  m68knommu: fix 5307 ColdFire UART vector setup
  m68knommu: fix 5249 ColdFire UART vector setup
  m68knommu: fix 5249 ColdFire UART setup
  m68knommu: fix end of uart table marker
  m68knommu: switch to using generic_handle_irq()
  m68k: merge the mmu and non-mmu versions of tlbflush.h
  m68knommu: introduce basic clk infrastructure
  m68k: merge the mmu and non-mmu versions of module.h
  m68knommu: add missing interrupt line definition for UART 2
  m68k: merge the mmu and non-mmu versions of mmu_context.h
  m68k: merge the mmu and non-mmu versions of current.h
  m68k: merge the mmu and non-mmu versions of div64.h
  m68k: merge the mmu and non-mmu versions of bugs.h
  m68k: merge the mmu and non-mmu versions of bug.h
  m68k: use the mmu version of cache.h for m68knommu as well
  m68k: use the mmu version of bootinfo.h for m68knommu as well
  m68k: merge the mmu and non-mmu versions of fb.h
  m68k: merge the mmu and non-mmu versions of segment.h
  ...
parents 223cdea4 b4d63e8e
This diff is collapsed.
This diff is collapsed.
#ifdef __uClinux__
#include "bug_no.h"
#ifndef _M68K_BUG_H
#define _M68K_BUG_H
#ifdef CONFIG_MMU
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_SUN3
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__builtin_trap(); \
} while (0)
#else
#include "bug_mm.h"
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
panic("BUG!"); \
} while (0)
#endif
#else
#define BUG() do { \
__builtin_trap(); \
} while (0)
#endif
#define HAVE_ARCH_BUG
#endif
#endif /* CONFIG_MMU */
#include <asm-generic/bug.h>
#endif
#ifndef _M68K_BUG_H
#define _M68K_BUG_H
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_SUN3
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__builtin_trap(); \
} while (0)
#else
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
panic("BUG!"); \
} while (0)
#endif
#else
#define BUG() do { \
__builtin_trap(); \
} while (0)
#endif
#define HAVE_ARCH_BUG
#endif
#include <asm-generic/bug.h>
#endif
#ifndef _M68KNOMMU_BUG_H
#define _M68KNOMMU_BUG_H
#include <asm-generic/bug.h>
#endif
#ifdef __uClinux__
#include "bugs_no.h"
/*
* include/asm-m68k/bugs.h
*
* Copyright (C) 1994 Linus Torvalds
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
#ifdef CONFIG_MMU
extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */
#else
#include "bugs_mm.h"
static void check_bugs(void)
{
}
#endif
/*
* include/asm-m68k/bugs.h
*
* Copyright (C) 1994 Linus Torvalds
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */
/*
* include/asm-m68k/bugs.h
*
* Copyright (C) 1994 Linus Torvalds
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
static void check_bugs(void)
{
}
#ifdef __uClinux__
#include "cache_no.h"
#else
#include "cache_mm.h"
/*
* include/asm-m68k/cache.h
*/
#ifndef __ARCH_M68K_CACHE_H
#define __ARCH_M68K_CACHE_H
/* bytes per L1 cache line */
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
#endif
/*
* include/asm-m68k/cache.h
*/
#ifndef __ARCH_M68K_CACHE_H
#define __ARCH_M68K_CACHE_H
/* bytes per L1 cache line */
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
#endif
#ifndef __ARCH_M68KNOMMU_CACHE_H
#define __ARCH_M68KNOMMU_CACHE_H
/* bytes per L1 cache line */
#define L1_CACHE_BYTES 16 /* this need to be at least 1 */
/* m68k-elf-gcc 2.95.2 doesn't like these */
#define __cacheline_aligned
#define ____cacheline_aligned
#endif
#ifdef __uClinux__
#include "current_no.h"
#ifndef _M68K_CURRENT_H
#define _M68K_CURRENT_H
#ifdef CONFIG_MMU
register struct task_struct *current __asm__("%a2");
#else
#include "current_mm.h"
#endif
/*
* Rather than dedicate a register (as the m68k source does), we
* just keep a global, we should probably just change it all to be
* current and lose _current_task.
*/
#include <linux/thread_info.h>
struct task_struct;
static inline struct task_struct *get_current(void)
{
return(current_thread_info()->task);
}
#define current get_current()
#endif /* CONFNIG_MMU */
#endif /* !(_M68K_CURRENT_H) */
#ifndef _M68K_CURRENT_H
#define _M68K_CURRENT_H
register struct task_struct *current __asm__("%a2");
#endif /* !(_M68K_CURRENT_H) */
#ifndef _M68KNOMMU_CURRENT_H
#define _M68KNOMMU_CURRENT_H
/*
* current.h
* (C) Copyright 2000, Lineo, David McCullough <davidm@uclinux.org>
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
*
* rather than dedicate a register (as the m68k source does), we
* just keep a global, we should probably just change it all to be
* current and lose _current_task.
*/
#include <linux/thread_info.h>
struct task_struct;
static inline struct task_struct *get_current(void)
{
return(current_thread_info()->task);
}
#define current get_current()
#endif /* _M68KNOMMU_CURRENT_H */
#ifdef __uClinux__
#include "div64_no.h"
#ifndef _M68K_DIV64_H
#define _M68K_DIV64_H
#ifdef CONFIG_MMU
#include <linux/types.h>
/* n = n / base; return rem; */
#define do_div(n, base) ({ \
union { \
unsigned long n32[2]; \
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
: "=d" (__n.n32[0]), "=d" (__upper) \
: "d" (base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
: "=d" (__n.n32[1]), "=d" (__rem) \
: "d" (base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
#else
#include "div64_mm.h"
#endif
#include <asm-generic/div64.h>
#endif /* CONFIG_MMU */
#endif /* _M68K_DIV64_H */
#ifndef _M68K_DIV64_H
#define _M68K_DIV64_H
#include <linux/types.h>
/* n = n / base; return rem; */
#define do_div(n, base) ({ \
union { \
unsigned long n32[2]; \
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
: "=d" (__n.n32[0]), "=d" (__upper) \
: "d" (base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
: "=d" (__n.n32[1]), "=d" (__rem) \
: "d" (base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
#endif /* _M68K_DIV64_H */
#include <asm-generic/div64.h>
#ifdef __uClinux__
#include "dma-mapping_no.h"
#ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H
#include <asm/cache.h>
struct scatterlist;
#ifndef CONFIG_MMU_SUN3
static inline int dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
return 0;
}
static inline int dma_get_cache_alignment(void)
{
return 1 << L1_CACHE_SHIFT;
}
static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
extern void *dma_alloc_coherent(struct device *, size_t,
dma_addr_t *, gfp_t);
extern void dma_free_coherent(struct device *, size_t,
void *, dma_addr_t);
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag)
{
return dma_alloc_coherent(dev, size, handle, flag);
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *addr, dma_addr_t handle)
{
dma_free_coherent(dev, size, addr, handle);
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* we use coherent allocation, so not much to do here. */
}
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
}
extern dma_addr_t dma_map_page(struct device *, struct page *,
unsigned long, size_t size,
enum dma_data_direction);
static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
size_t size, enum dma_data_direction dir)
{
}
extern int dma_map_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction);
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction dir)
{
}
extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle, unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
}
static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle, unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
{
return 0;
}
#else
#include "dma-mapping_mm.h"
#include <asm-generic/dma-mapping-broken.h>
#endif
#endif /* _M68K_DMA_MAPPING_H */
#ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H
#include <asm/cache.h>
struct scatterlist;
#ifndef CONFIG_MMU_SUN3
static inline int dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
return 0;
}
static inline int dma_get_cache_alignment(void)
{
return 1 << L1_CACHE_SHIFT;
}
static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
extern void *dma_alloc_coherent(struct device *, size_t,
dma_addr_t *, gfp_t);
extern void dma_free_coherent(struct device *, size_t,
void *, dma_addr_t);
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag)
{
return dma_alloc_coherent(dev, size, handle, flag);
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *addr, dma_addr_t handle)
{
dma_free_coherent(dev, size, addr, handle);
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* we use coherent allocation, so not much to do here. */
}
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
}
extern dma_addr_t dma_map_page(struct device *, struct page *,
unsigned long, size_t size,
enum dma_data_direction);
static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
size_t size, enum dma_data_direction dir)
{
}
extern int dma_map_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction);
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction dir)
{
}
extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle, unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
}
static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle, unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
{
return 0;
}
#else
#include <asm-generic/dma-mapping-broken.h>
#endif
#endif /* _M68K_DMA_MAPPING_H */
#ifndef _M68KNOMMU_DMA_MAPPING_H
#define _M68KNOMMU_DMA_MAPPING_H
#include <asm-generic/dma-mapping-broken.h>
#endif /* _M68KNOMMU_DMA_MAPPING_H */
#ifdef __uClinux__
#include "elf_no.h"
#ifndef __ASMm68k_ELF_H
#define __ASMm68k_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
/*
* 68k ELF relocation types
*/
#define R_68K_NONE 0
#define R_68K_32 1
#define R_68K_16 2
#define R_68K_8 3
#define R_68K_PC32 4
#define R_68K_PC16 5
#define R_68K_PC8 6
#define R_68K_GOT32 7
#define R_68K_GOT16 8
#define R_68K_GOT8 9
#define R_68K_GOT32O 10
#define R_68K_GOT16O 11
#define R_68K_GOT8O 12
#define R_68K_PLT32 13
#define R_68K_PLT16 14
#define R_68K_PLT8 15
#define R_68K_PLT32O 16
#define R_68K_PLT16O 17
#define R_68K_PLT8O 18
#define R_68K_COPY 19
#define R_68K_GLOB_DAT 20
#define R_68K_JMP_SLOT 21
#define R_68K_RELATIVE 22
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_m68kfp_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_68K)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_68K
/* For SVR4/m68k the function pointer to be registered with `atexit' is
passed in %a1. Although my copy of the ABI has no such statement, it
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else
#include "elf_mm.h"
#define ELF_EXEC_PAGESIZE 8192
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef CONFIG_SUN3
#define ELF_ET_DYN_BASE 0xD0000000UL
#else
#define ELF_ET_DYN_BASE 0x0D800000UL
#endif
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
/* Bleech. */ \
pr_reg[0] = regs->d1; \
pr_reg[1] = regs->d2; \
pr_reg[2] = regs->d3; \
pr_reg[3] = regs->d4; \
pr_reg[4] = regs->d5; \
pr_reg[7] = regs->a0; \
pr_reg[8] = regs->a1; \
pr_reg[9] = regs->a2; \
pr_reg[14] = regs->d0; \
pr_reg[15] = rdusp(); \
pr_reg[16] = regs->orig_d0; \
pr_reg[17] = regs->sr; \
pr_reg[18] = regs->pc; \
pr_reg[19] = (regs->format << 12) | regs->vector; \
{ \
struct switch_stack *sw = ((struct switch_stack *)regs) - 1; \
pr_reg[5] = sw->d6; \
pr_reg[6] = sw->d7; \
pr_reg[10] = sw->a3; \
pr_reg[11] = sw->a4; \
pr_reg[12] = sw->a5; \
pr_reg[13] = sw->a6; \
}
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#endif
#ifndef __ASMm68k_ELF_H
#define __ASMm68k_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
/*
* 68k ELF relocation types
*/
#define R_68K_NONE 0
#define R_68K_32 1
#define R_68K_16 2
#define R_68K_8 3
#define R_68K_PC32 4
#define R_68K_PC16 5
#define R_68K_PC8 6
#define R_68K_GOT32 7
#define R_68K_GOT16 8
#define R_68K_GOT8 9
#define R_68K_GOT32O 10
#define R_68K_GOT16O 11
#define R_68K_GOT8O 12
#define R_68K_PLT32 13
#define R_68K_PLT16 14
#define R_68K_PLT8 15
#define R_68K_PLT32O 16
#define R_68K_PLT16O 17
#define R_68K_PLT8O 18
#define R_68K_COPY 19
#define R_68K_GLOB_DAT 20
#define R_68K_JMP_SLOT 21
#define R_68K_RELATIVE 22
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_m68kfp_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_68K)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_68K
/* For SVR4/m68k the function pointer to be registered with `atexit' is
passed in %a1. Although my copy of the ABI has no such statement, it
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else
#define ELF_EXEC_PAGESIZE 8192
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef CONFIG_SUN3
#define ELF_ET_DYN_BASE 0xD0000000UL
#else
#define ELF_ET_DYN_BASE 0x0D800000UL
#endif
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
/* Bleech. */ \
pr_reg[0] = regs->d1; \
pr_reg[1] = regs->d2; \
pr_reg[2] = regs->d3; \
pr_reg[3] = regs->d4; \
pr_reg[4] = regs->d5; \
pr_reg[7] = regs->a0; \
pr_reg[8] = regs->a1; \
pr_reg[9] = regs->a2; \
pr_reg[14] = regs->d0; \
pr_reg[15] = rdusp(); \
pr_reg[16] = regs->orig_d0; \
pr_reg[17] = regs->sr; \
pr_reg[18] = regs->pc; \
pr_reg[19] = (regs->format << 12) | regs->vector; \
{ \
struct switch_stack *sw = ((struct switch_stack *)regs) - 1; \
pr_reg[5] = sw->d6; \
pr_reg[6] = sw->d7; \
pr_reg[10] = sw->a3; \
pr_reg[11] = sw->a4; \
pr_reg[12] = sw->a5; \
pr_reg[13] = sw->a6; \
}
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#endif
#ifndef __ASMm68k_ELF_H
#define __ASMm68k_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
/*
* 68k ELF relocation types
*/
#define R_68K_NONE 0
#define R_68K_32 1
#define R_68K_16 2
#define R_68K_8 3
#define R_68K_PC32 4
#define R_68K_PC16 5
#define R_68K_PC8 6
#define R_68K_GOT32 7
#define R_68K_GOT16 8
#define R_68K_GOT8 9
#define R_68K_GOT32O 10
#define R_68K_GOT16O 11
#define R_68K_GOT8O 12
#define R_68K_PLT32 13
#define R_68K_PLT16 14
#define R_68K_PLT8 15
#define R_68K_PLT32O 16
#define R_68K_PLT16O 17
#define R_68K_PLT8O 18
#define R_68K_COPY 19
#define R_68K_GLOB_DAT 20
#define R_68K_JMP_SLOT 21
#define R_68K_RELATIVE 22
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_m68kfp_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_68K)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_68K
/* For SVR4/m68k the function pointer to be registered with `atexit' is
passed in %a1. Although my copy of the ABI has no such statement, it
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE 0xD0000000UL
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
/* Bleech. */ \
pr_reg[0] = regs->d1; \
pr_reg[1] = regs->d2; \
pr_reg[2] = regs->d3; \
pr_reg[3] = regs->d4; \
pr_reg[4] = regs->d5; \
pr_reg[7] = regs->a0; \
pr_reg[8] = regs->a1; \
pr_reg[14] = regs->d0; \
pr_reg[15] = rdusp(); \
pr_reg[16] = 0 /* regs->orig_d0 */; \
pr_reg[17] = regs->sr; \
pr_reg[18] = regs->pc; \
/* pr_reg[19] = (regs->format << 12) | regs->vector; */ \
{ \
struct switch_stack *sw = ((struct switch_stack *)regs) - 1; \
pr_reg[5] = sw->d6; \
pr_reg[6] = sw->d7; \
pr_reg[10] = sw->a3; \
pr_reg[11] = sw->a4; \
pr_reg[12] = sw->a5; \
pr_reg[13] = sw->a6; \
}
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#endif
#ifdef __uClinux__
#include "fb_no.h"
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
#include <asm/setup.h>
#ifdef CONFIG_MMU
#ifdef CONFIG_SUN3
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
}
#else
#include "fb_mm.h"
#endif
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
if (CPU_IS_020_OR_030)
pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
if (CPU_IS_040_OR_060) {
pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
/* Use no-cache mode, serialized */
pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
}
}
#endif /* CONFIG_SUN3 */
#else
#define fb_pgprotect(...) do {} while (0)
#endif /* CONFIG_MMU */
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
#include <asm/setup.h>
#ifdef CONFIG_SUN3
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
}
#else
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
if (CPU_IS_020_OR_030)
pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
if (CPU_IS_040_OR_060) {
pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
/* Use no-cache mode, serialized */
pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
}
}
#endif /* CONFIG_SUN3 */
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#define fb_pgprotect(...) do {} while (0)
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */
#ifdef __uClinux__
#include "fpu_no.h"
#ifndef __M68K_FPU_H
#define __M68K_FPU_H
/*
* MAX floating point unit state size (FSAVE/FRESTORE)
*/
#if defined(CONFIG_M68020) || defined(CONFIG_M68030)
#define FPSTATESIZE (216)
#elif defined(CONFIG_M68040)
#define FPSTATESIZE (96)
#elif defined(CONFIG_M68KFPU_EMU)
#define FPSTATESIZE (28)
#elif defined(CONFIG_M68060)
#define FPSTATESIZE (12)
#else
#include "fpu_mm.h"
#define FPSTATESIZE (0)
#endif
#endif /* __M68K_FPU_H */
#ifndef __M68K_FPU_H
#define __M68K_FPU_H
/*
* MAX floating point unit state size (FSAVE/FRESTORE)
*/
#if defined(CONFIG_M68020) || defined(CONFIG_M68030)
#define FPSTATESIZE (216)
#elif defined(CONFIG_M68040)
#define FPSTATESIZE (96)
#elif defined(CONFIG_M68KFPU_EMU)
#define FPSTATESIZE (28)
#elif defined(CONFIG_M68060)
#define FPSTATESIZE (12)
#else
#define FPSTATESIZE (0)
#endif
#endif /* __M68K_FPU_H */
#ifndef __M68KNOMMU_FPU_H
#define __M68KNOMMU_FPU_H
/*
* MAX floating point unit state size (FSAVE/FRESTORE)
*/
#if defined(CONFIG_M68020) || defined(CONFIG_M68030)
#define FPSTATESIZE (216/sizeof(unsigned char))
#elif defined(CONFIG_M68040)
#define FPSTATESIZE (96/sizeof(unsigned char))
#elif defined(CONFIG_M68KFPU_EMU)
#define FPSTATESIZE (28/sizeof(unsigned char))
#elif defined(CONFIG_M68060)
#define FPSTATESIZE (12/sizeof(unsigned char))
#else
/* Assume no FP unit present then... */
#define FPSTATESIZE (2) /* dummy size */
#endif
#endif /* __M68K_FPU_H */
#ifdef __uClinux__
#include "hw_irq_no.h"
#else
#include "hw_irq_mm.h"
#ifndef __ASM_M68K_HW_IRQ_H
#define __ASM_M68K_HW_IRQ_H
/* Dummy include. */
#endif
#ifndef __ASM_M68K_HW_IRQ_H
#define __ASM_M68K_HW_IRQ_H
/* Dummy include. */
#endif
#ifndef __M68KNOMMU_HW_IRQ_H__
#define __M68KNOMMU_HW_IRQ_H__
#endif /* __M68KNOMMU_HW_IRQ_H__ */
#ifdef __uClinux__
#include "kmap_types_no.h"
#else
#include "kmap_types_mm.h"
#endif
#ifndef __ASM_M68K_KMAP_TYPES_H
#define __ASM_M68K_KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#endif /* __ASM_M68K_KMAP_TYPES_H */
#ifndef __ASM_M68K_KMAP_TYPES_H
#define __ASM_M68K_KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#endif /* __ASM_M68K_KMAP_TYPES_H */
#ifndef __ASM_M68K_KMAP_TYPES_H
#define __ASM_M68K_KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#endif
......@@ -16,6 +16,7 @@
#define MCFINT_VECBASE 64
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
#define MCF_WTM_WCR MCF_REG16(0xFC098000)
......
#ifdef __uClinux__
#include "mc146818rtc_no.h"
#else
#include "mc146818rtc_mm.h"
#endif
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
#ifdef CONFIG_ATARI
/* RTC in Atari machines */
#include <asm/atarihw.h>
#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
#define RTC_ALWAYS_BCD 0
#define CMOS_READ(addr) ({ \
atari_outb_p((addr),RTC_PORT(0)); \
atari_inb_p(RTC_PORT(1)); \
})
#define CMOS_WRITE(val, addr) ({ \
atari_outb_p((addr),RTC_PORT(0)); \
atari_outb_p((val),RTC_PORT(1)); \
})
#endif /* CONFIG_ATARI */
#endif /* _ASM_MC146818RTC_H */
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
#ifdef CONFIG_ATARI
/* RTC in Atari machines */
#include <asm/atarihw.h>
#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
#define RTC_ALWAYS_BCD 0
#define CMOS_READ(addr) ({ \
atari_outb_p((addr),RTC_PORT(0)); \
atari_inb_p(RTC_PORT(1)); \
})
#define CMOS_WRITE(val, addr) ({ \
atari_outb_p((addr),RTC_PORT(0)); \
atari_outb_p((val),RTC_PORT(1)); \
})
#endif /* CONFIG_ATARI */
#endif /* _ASM_MC146818RTC_H */
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _M68KNOMMU_MC146818RTC_H
#define _M68KNOMMU_MC146818RTC_H
/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */
#endif /* _M68KNOMMU_MC146818RTC_H */
/****************************************************************************/
/*
* mcfpci.h -- PCI bridge on ColdFire eval boards.
*
* (C) Copyright 2000, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2000, Lineo Inc. (www.lineo.com)
*/
/****************************************************************************/
#ifndef mcfpci_h
#define mcfpci_h
/****************************************************************************/
#ifdef CONFIG_PCI
/*
* Address regions in the PCI address space are not mapped into the
* normal memory space of the ColdFire. They must be accessed via
* handler routines. This is easy for I/O space (inb/outb/etc) but
* needs some code changes to support ordinary memory. Interrupts
* also need to be vectored through the PCI handler first, then it
* will call the actual driver sub-handlers.
*/
/*
* Un-define all the standard I/O access routines.
*/
#undef inb
#undef inw
#undef inl
#undef inb_p
#undef inw_p
#undef insb
#undef insw
#undef insl
#undef outb
#undef outw
#undef outl
#undef outb_p
#undef outw_p
#undef outsb
#undef outsw
#undef outsl
#undef request_irq
#undef free_irq
#undef bus_to_virt
#undef virt_to_bus
/*
* Re-direct all I/O memory accesses functions to PCI specific ones.
*/
#define inb pci_inb
#define inw pci_inw
#define inl pci_inl
#define inb_p pci_inb
#define inw_p pci_inw
#define insb pci_insb
#define insw pci_insw
#define insl pci_insl
#define outb pci_outb
#define outw pci_outw
#define outl pci_outl
#define outb_p pci_outb
#define outw_p pci_outw
#define outsb pci_outsb
#define outsw pci_outsw
#define outsl pci_outsl
#define request_irq pci_request_irq
#define free_irq pci_free_irq
#define virt_to_bus pci_virt_to_bus
#define bus_to_virt pci_bus_to_virt
#define CONFIG_COMEMPCI 1
/*
* Prototypes of the real PCI functions (defined in bios32.c).
*/
unsigned char pci_inb(unsigned int addr);
unsigned short pci_inw(unsigned int addr);
unsigned int pci_inl(unsigned int addr);
void pci_insb(void *addr, void *buf, int len);
void pci_insw(void *addr, void *buf, int len);
void pci_insl(void *addr, void *buf, int len);
void pci_outb(unsigned char val, unsigned int addr);
void pci_outw(unsigned short val, unsigned int addr);
void pci_outl(unsigned int val, unsigned int addr);
void pci_outsb(void *addr, void *buf, int len);
void pci_outsw(void *addr, void *buf, int len);
void pci_outsl(void *addr, void *buf, int len);
int pci_request_irq(unsigned int irq,
void (*handler)(int, void *, struct pt_regs *),
unsigned long flags,
const char *device,
void *dev_id);
void pci_free_irq(unsigned int irq, void *dev_id);
void *pci_bmalloc(int size);
void pci_bmfree(void *bmp, int len);
void pci_copytoshmem(unsigned long bmp, void *src, int size);
void pci_copyfromshmem(void *dst, unsigned long bmp, int size);
unsigned long pci_virt_to_bus(volatile void *address);
void *pci_bus_to_virt(unsigned long address);
void pci_bmcpyto(void *dst, void *src, int len);
void pci_bmcpyfrom(void *dst, void *src, int len);
#endif /* CONFIG_PCI */
/****************************************************************************/
#endif /* mcfpci_h */
#ifdef __uClinux__
#include "mmu_no.h"
#ifndef __MMU_H
#define __MMU_H
#ifdef CONFIG_MMU
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#else
#include "mmu_mm.h"
typedef struct {
unsigned long end_brk;
} mm_context_t;
#endif
#endif
#ifdef __uClinux__
#include "mmu_context_no.h"
#ifndef __M68K_MMU_CONTEXT_H
#define __M68K_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
#ifdef CONFIG_MMU
#ifndef CONFIG_SUN3
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = virt_to_phys(mm->pgd);
return 0;
}
#define destroy_context(mm) do { } while(0)
static inline void switch_mm_0230(struct mm_struct *mm)
{
unsigned long crp[2] = {
0x80000000 | _PAGE_TABLE, mm->context
};
unsigned long tmp;
asm volatile (".chip 68030");
/* flush MC68030/MC68020 caches (they are virtually addressed) */
asm volatile (
"movec %%cacr,%0;"
"orw %1,%0; "
"movec %0,%%cacr"
: "=d" (tmp) : "di" (FLUSH_I_AND_D));
/* Switch the root pointer. For a 030-only kernel,
* avoid flushing the whole ATC, we only need to
* flush the user entries. The 68851 does this by
* itself. Avoid a runtime check here.
*/
asm volatile (
#ifdef CPU_M68030_ONLY
"pmovefd %0,%%crp; "
"pflush #0,#4"
#else
#include "mmu_context_mm.h"
"pmove %0,%%crp"
#endif
: : "m" (crp[0]));
asm volatile (".chip 68k");
}
static inline void switch_mm_0460(struct mm_struct *mm)
{
asm volatile (".chip 68040");
/* flush address translation cache (user entries) */
asm volatile ("pflushan");
/* switch the root pointer */
asm volatile ("movec %0,%%urp" : : "r" (mm->context));
if (CPU_IS_060) {
unsigned long tmp;
/* clear user entries in the branch cache */
asm volatile (
"movec %%cacr,%0; "
"orl %1,%0; "
"movec %0,%%cacr"
: "=d" (tmp): "di" (0x00200000));
}
asm volatile (".chip 68k");
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
if (CPU_IS_020_OR_030)
switch_mm_0230(next);
else
switch_mm_0460(next);
}
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
next_mm->context = virt_to_phys(next_mm->pgd);
if (CPU_IS_020_OR_030)
switch_mm_0230(next_mm);
else
switch_mm_0460(next_mm);
}
#else /* CONFIG_SUN3 */
#include <asm/sun3mmu.h>
#include <linux/sched.h>
extern unsigned long get_free_context(struct mm_struct *mm);
extern void clear_context(unsigned long context);
/* set the context for a new task to unmapped */
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = SUN3_INVALID_CONTEXT;
return 0;
}
/* find the context given to this process, and if it hasn't already
got one, go get one for it. */
static inline void get_mmu_context(struct mm_struct *mm)
{
if(mm->context == SUN3_INVALID_CONTEXT)
mm->context = get_free_context(mm);
}
/* flush context if allocated... */
static inline void destroy_context(struct mm_struct *mm)
{
if(mm->context != SUN3_INVALID_CONTEXT)
clear_context(mm->context);
}
static inline void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
sun3_put_context(mm->context);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
activate_context(tsk->mm);
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
activate_context(next_mm);
}
#endif
#else /* !CONFIG_MMU */
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
return 0;
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
}
#define destroy_context(mm) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
{
}
#endif /* CONFIG_MMU */
#endif /* __M68K_MMU_CONTEXT_H */
#ifndef __M68K_MMU_CONTEXT_H
#define __M68K_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
#ifndef CONFIG_SUN3
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = virt_to_phys(mm->pgd);
return 0;
}
#define destroy_context(mm) do { } while(0)
static inline void switch_mm_0230(struct mm_struct *mm)
{
unsigned long crp[2] = {
0x80000000 | _PAGE_TABLE, mm->context
};
unsigned long tmp;
asm volatile (".chip 68030");
/* flush MC68030/MC68020 caches (they are virtually addressed) */
asm volatile (
"movec %%cacr,%0;"
"orw %1,%0; "
"movec %0,%%cacr"
: "=d" (tmp) : "di" (FLUSH_I_AND_D));
/* Switch the root pointer. For a 030-only kernel,
* avoid flushing the whole ATC, we only need to
* flush the user entries. The 68851 does this by
* itself. Avoid a runtime check here.
*/
asm volatile (
#ifdef CPU_M68030_ONLY
"pmovefd %0,%%crp; "
"pflush #0,#4"
#else
"pmove %0,%%crp"
#endif
: : "m" (crp[0]));
asm volatile (".chip 68k");
}
static inline void switch_mm_0460(struct mm_struct *mm)
{
asm volatile (".chip 68040");
/* flush address translation cache (user entries) */
asm volatile ("pflushan");
/* switch the root pointer */
asm volatile ("movec %0,%%urp" : : "r" (mm->context));
if (CPU_IS_060) {
unsigned long tmp;
/* clear user entries in the branch cache */
asm volatile (
"movec %%cacr,%0; "
"orl %1,%0; "
"movec %0,%%cacr"
: "=d" (tmp): "di" (0x00200000));
}
asm volatile (".chip 68k");
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
if (CPU_IS_020_OR_030)
switch_mm_0230(next);
else
switch_mm_0460(next);
}
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
next_mm->context = virt_to_phys(next_mm->pgd);
if (CPU_IS_020_OR_030)
switch_mm_0230(next_mm);
else
switch_mm_0460(next_mm);
}
#else /* CONFIG_SUN3 */
#include <asm/sun3mmu.h>
#include <linux/sched.h>
extern unsigned long get_free_context(struct mm_struct *mm);
extern void clear_context(unsigned long context);
/* set the context for a new task to unmapped */
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = SUN3_INVALID_CONTEXT;
return 0;
}
/* find the context given to this process, and if it hasn't already
got one, go get one for it. */
static inline void get_mmu_context(struct mm_struct *mm)
{
if(mm->context == SUN3_INVALID_CONTEXT)
mm->context = get_free_context(mm);
}
/* flush context if allocated... */
static inline void destroy_context(struct mm_struct *mm)
{
if(mm->context != SUN3_INVALID_CONTEXT)
clear_context(mm->context);
}
static inline void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
sun3_put_context(mm->context);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
activate_context(tsk->mm);
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
activate_context(next_mm);
}
#endif
#endif
#ifndef __M68KNOMMU_MMU_CONTEXT_H
#define __M68KNOMMU_MMU_CONTEXT_H
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
// mm->context = virt_to_phys(mm->pgd);
return(0);
}
#define destroy_context(mm) do { } while(0)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
}
#endif
#ifndef __MMU_H
#define __MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#endif
#ifndef __M68KNOMMU_MMU_H
#define __M68KNOMMU_MMU_H
/* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
typedef struct {
unsigned long end_brk;
} mm_context_t;
#endif /* __M68KNOMMU_MMU_H */
#ifdef __uClinux__
#include "module_no.h"
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
#ifdef CONFIG_MMU
struct mod_arch_specific {
struct m68k_fixup_info *fixup_start, *fixup_end;
};
#define MODULE_ARCH_INIT { \
.fixup_start = __start_fixup, \
.fixup_end = __stop_fixup, \
}
enum m68k_fixup_type {
m68k_fixup_memoffset,
m68k_fixup_vnode_shift,
};
struct m68k_fixup_info {
enum m68k_fixup_type type;
void *addr;
};
#define m68k_fixup(type, addr) \
" .section \".m68k_fixup\",\"aw\"\n" \
" .long " #type "," #addr "\n" \
" .previous\n"
extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
struct module;
extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end);
#else
#include "module_mm.h"
#endif
struct mod_arch_specific {
};
#endif /* CONFIG_MMU */
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#endif /* _ASM_M68K_MODULE_H */
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
struct mod_arch_specific {
struct m68k_fixup_info *fixup_start, *fixup_end;
};
#define MODULE_ARCH_INIT { \
.fixup_start = __start_fixup, \
.fixup_end = __stop_fixup, \
}
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
enum m68k_fixup_type {
m68k_fixup_memoffset,
m68k_fixup_vnode_shift,
};
struct m68k_fixup_info {
enum m68k_fixup_type type;
void *addr;
};
#define m68k_fixup(type, addr) \
" .section \".m68k_fixup\",\"aw\"\n" \
" .long " #type "," #addr "\n" \
" .previous\n"
extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
struct module;
extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end);
#endif /* _ASM_M68K_MODULE_H */
#ifndef ASM_M68KNOMMU_MODULE_H
#define ASM_M68KNOMMU_MODULE_H
struct mod_arch_specific {
};
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#endif /* ASM_M68KNOMMU_MODULE_H */
#ifdef __uClinux__
#include "page_offset_no.h"
/* This handles the memory map.. */
#ifdef CONFIG_MMU
#ifndef CONFIG_SUN3
#define PAGE_OFFSET_RAW 0x00000000
#else
#include "page_offset_mm.h"
#define PAGE_OFFSET_RAW 0x0E000000
#endif
#else
#define PAGE_OFFSET_RAW CONFIG_RAMBASE
#endif
/* This handles the memory map.. */
#ifndef CONFIG_SUN3
#define PAGE_OFFSET_RAW 0x00000000
#else
#define PAGE_OFFSET_RAW 0x0E000000
#endif
/* This handles the memory map.. */
#define PAGE_OFFSET_RAW CONFIG_RAMBASE
#ifdef __uClinux__
#include "pci_no.h"
#else
#include "pci_mm.h"
#endif
#ifndef _ASM_M68K_PCI_H
#define _ASM_M68K_PCI_H
#include <asm-generic/pci-dma-compat.h>
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* _ASM_M68K_PCI_H */
#ifndef _ASM_M68K_PCI_H
#define _ASM_M68K_PCI_H
#include <asm-generic/pci-dma-compat.h>
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* _ASM_M68K_PCI_H */
#ifndef M68KNOMMU_PCI_H
#define M68KNOMMU_PCI_H
#include <asm/pci_mm.h>
#ifdef CONFIG_COMEMPCI
/*
* These are pretty much arbitary with the CoMEM implementation.
* We have the whole address space to ourselves.
*/
#define PCIBIOS_MIN_IO 0x100
#define PCIBIOS_MIN_MEM 0x00010000
#define pcibios_scan_all_fns(a, b) 0
/*
* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
}
#endif /* CONFIG_COMEMPCI */
#endif /* M68KNOMMU_PCI_H */
#ifdef __uClinux__
#include "pgalloc_no.h"
#ifndef M68K_PGALLOC_H
#define M68K_PGALLOC_H
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/setup.h>
#ifdef CONFIG_MMU
#include <asm/virtconvert.h>
#ifdef CONFIG_SUN3
#include <asm/sun3_pgalloc.h>
#else
#include "pgalloc_mm.h"
#include <asm/motorola_pgalloc.h>
#endif
extern void m68k_setup_node(int node);
#endif
#endif /* M68K_PGALLOC_H */
#ifndef M68K_PGALLOC_H
#define M68K_PGALLOC_H
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/setup.h>
#include <asm/virtconvert.h>
#ifdef CONFIG_SUN3
#include <asm/sun3_pgalloc.h>
#else
#include <asm/motorola_pgalloc.h>
#endif
extern void m68k_setup_node(int node);
#endif /* M68K_PGALLOC_H */
#ifndef _M68KNOMMU_PGALLOC_H
#define _M68KNOMMU_PGALLOC_H
#include <asm/setup.h>
#define check_pgt_cache() do { } while (0)
#endif /* _M68KNOMMU_PGALLOC_H */
......@@ -67,4 +67,6 @@ extern unsigned int kobjsize(const void *objp);
#include <asm-generic/pgtable.h>
#define check_pgt_cache() do { } while (0)
#endif /* _M68KNOMMU_PGTABLE_H */
#ifdef __uClinux__
#include "scatterlist_no.h"
#else
#include "scatterlist_mm.h"
#ifndef _M68K_SCATTERLIST_H
#define _M68K_SCATTERLIST_H
#include <linux/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address; /* A place to hang host-specific addresses at. */
};
/* This is bogus and should go away. */
#define ISA_DMA_THRESHOLD (0x00ffffff)
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
#endif /* !(_M68K_SCATTERLIST_H) */
#ifndef _M68K_SCATTERLIST_H
#define _M68K_SCATTERLIST_H
#include <linux/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
__u32 dma_address; /* A place to hang host-specific addresses at. */
};
/* This is bogus and should go away. */
#define ISA_DMA_THRESHOLD (0x00ffffff)
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
#endif /* !(_M68K_SCATTERLIST_H) */
#ifndef _M68KNOMMU_SCATTERLIST_H
#define _M68KNOMMU_SCATTERLIST_H
#include <linux/mm.h>
#include <asm/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
dma_addr_t dma_address;
unsigned int length;
};
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
#define ISA_DMA_THRESHOLD (0xffffffff)
#endif /* !(_M68KNOMMU_SCATTERLIST_H) */
#ifdef __uClinux__
#include "segment_no.h"
#ifndef _M68K_SEGMENT_H
#define _M68K_SEGMENT_H
/* define constants */
/* Address spaces (FC0-FC2) */
#define USER_DATA (1)
#ifndef __USER_DS
#define __USER_DS (USER_DATA)
#endif
#define USER_PROGRAM (2)
#define SUPER_DATA (5)
#ifndef __KERNEL_DS
#define __KERNEL_DS (SUPER_DATA)
#endif
#define SUPER_PROGRAM (6)
#define CPU_SPACE (7)
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
/*
* Get/set the SFC/DFC registers for MOVES instructions
*/
static inline mm_segment_t get_fs(void)
{
#ifdef CONFIG_MMU
mm_segment_t _v;
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
return _v;
#else
#include "segment_mm.h"
return USER_DS;
#endif
}
static inline mm_segment_t get_ds(void)
{
/* return the supervisor data space code */
return KERNEL_DS;
}
static inline void set_fs(mm_segment_t val)
{
#ifdef CONFIG_MMU
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val.seg) : "memory");
#endif
}
#define segment_eq(a,b) ((a).seg == (b).seg)
#endif /* __ASSEMBLY__ */
#endif /* _M68K_SEGMENT_H */
#ifndef _M68K_SEGMENT_H
#define _M68K_SEGMENT_H
/* define constants */
/* Address spaces (FC0-FC2) */
#define USER_DATA (1)
#ifndef __USER_DS
#define __USER_DS (USER_DATA)
#endif
#define USER_PROGRAM (2)
#define SUPER_DATA (5)
#ifndef __KERNEL_DS
#define __KERNEL_DS (SUPER_DATA)
#endif
#define SUPER_PROGRAM (6)
#define CPU_SPACE (7)
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
/*
* Get/set the SFC/DFC registers for MOVES instructions
*/
static inline mm_segment_t get_fs(void)
{
mm_segment_t _v;
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
return _v;
}
static inline mm_segment_t get_ds(void)
{
/* return the supervisor data space code */
return KERNEL_DS;
}
static inline void set_fs(mm_segment_t val)
{
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val.seg) : "memory");
}
#define segment_eq(a,b) ((a).seg == (b).seg)
#endif /* __ASSEMBLY__ */
#endif /* _M68K_SEGMENT_H */
#ifndef _M68K_SEGMENT_H
#define _M68K_SEGMENT_H
/* define constants */
/* Address spaces (FC0-FC2) */
#define USER_DATA (1)
#ifndef __USER_DS
#define __USER_DS (USER_DATA)
#endif
#define USER_PROGRAM (2)
#define SUPER_DATA (5)
#ifndef __KERNEL_DS
#define __KERNEL_DS (SUPER_DATA)
#endif
#define SUPER_PROGRAM (6)
#define CPU_SPACE (7)
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
/*
* Get/set the SFC/DFC registers for MOVES instructions
*/
static inline mm_segment_t get_fs(void)
{
return USER_DS;
}
static inline mm_segment_t get_ds(void)
{
/* return the supervisor data space code */
return KERNEL_DS;
}
static inline void set_fs(mm_segment_t val)
{
}
#define segment_eq(a,b) ((a).seg == (b).seg)
#endif /* __ASSEMBLY__ */
#endif /* _M68K_SEGMENT_H */
#ifdef __uClinux__
#include "timex_no.h"
#else
#include "timex_mm.h"
/*
* linux/include/asm-m68k/timex.h
*
* m68k architecture timex specifications
*/
#ifndef _ASMm68k_TIMEX_H
#define _ASMm68k_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
return 0;
}
#endif
/*
* linux/include/asm-m68k/timex.h
*
* m68k architecture timex specifications
*/
#ifndef _ASMm68k_TIMEX_H
#define _ASMm68k_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
return 0;
}
#endif
/*
* linux/include/asm-m68knommu/timex.h
*
* m68knommu architecture timex specifications
*/
#ifndef _ASM_M68KNOMMU_TIMEX_H
#define _ASM_M68KNOMMU_TIMEX_H
#ifdef CONFIG_COLDFIRE
#include <asm/coldfire.h>
#define CLOCK_TICK_RATE MCF_CLK
#else
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
#endif
typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
return 0;
}
#endif
#ifdef __uClinux__
#include "tlbflush_no.h"
#ifndef _M68K_TLBFLUSH_H
#define _M68K_TLBFLUSH_H
#ifdef CONFIG_MMU
#ifndef CONFIG_SUN3
#include <asm/current.h>
static inline void flush_tlb_kernel_page(void *addr)
{
if (CPU_IS_040_OR_060) {
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
__asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t"
".chip 68k"
: : "a" (addr));
set_fs(old_fs);
} else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
}
/*
* flush all user-space atc entries.
*/
static inline void __flush_tlb(void)
{
if (CPU_IS_040_OR_060)
__asm__ __volatile__(".chip 68040\n\t"
"pflushan\n\t"
".chip 68k");
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #0,#4");
}
static inline void __flush_tlb040_one(unsigned long addr)
{
__asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t"
".chip 68k"
: : "a" (addr));
}
static inline void __flush_tlb_one(unsigned long addr)
{
if (CPU_IS_040_OR_060)
__flush_tlb040_one(addr);
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
}
#define flush_tlb() __flush_tlb()
/*
* flush all atc entries (both kernel and user-space entries).
*/
static inline void flush_tlb_all(void)
{
if (CPU_IS_040_OR_060)
__asm__ __volatile__(".chip 68040\n\t"
"pflusha\n\t"
".chip 68k");
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflusha");
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (vma->vm_mm == current->active_mm) {
mm_segment_t old_fs = get_fs();
set_fs(USER_DS);
__flush_tlb_one(addr);
set_fs(old_fs);
}
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_tlb_all();
}
#else
#include "tlbflush_mm.h"
/* Reserved PMEGs. */
extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
/* Flush all userspace mappings one by one... (why no flush command,
sun?) */
static inline void flush_tlb_all(void)
{
unsigned long addr;
unsigned char ctx, oldctx;
oldctx = sun3_get_context();
for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
for(ctx = 0; ctx < 8; ctx++) {
sun3_put_context(ctx);
sun3_put_segmap(addr, SUN3_INVALID_PMEG);
}
}
sun3_put_context(oldctx);
/* erase all of the userspace pmeg maps, we've clobbered them
all anyway */
for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
if(pmeg_alloc[addr] == 1) {
pmeg_alloc[addr] = 0;
pmeg_ctx[addr] = 0;
pmeg_vaddr[addr] = 0;
}
}
}
/* Clear user TLB entries within the context named in mm */
static inline void flush_tlb_mm (struct mm_struct *mm)
{
unsigned char oldctx;
unsigned char seg;
unsigned long i;
oldctx = sun3_get_context();
sun3_put_context(mm->context);
for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
seg = sun3_get_segmap(i);
if(seg == SUN3_INVALID_PMEG)
continue;
sun3_put_segmap(i, SUN3_INVALID_PMEG);
pmeg_alloc[seg] = 0;
pmeg_ctx[seg] = 0;
pmeg_vaddr[seg] = 0;
}
sun3_put_context(oldctx);
}
/* Flush a single TLB page. In this case, we're limited to flushing a
single PMEG */
static inline void flush_tlb_page (struct vm_area_struct *vma,
unsigned long addr)
{
unsigned char oldctx;
unsigned char i;
oldctx = sun3_get_context();
sun3_put_context(vma->vm_mm->context);
addr &= ~SUN3_PMEG_MASK;
if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
{
pmeg_alloc[i] = 0;
pmeg_ctx[i] = 0;
pmeg_vaddr[i] = 0;
sun3_put_segmap (addr, SUN3_INVALID_PMEG);
}
sun3_put_context(oldctx);
}
/* Flush a range of pages from TLB. */
static inline void flush_tlb_range (struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned char seg, oldctx;
start &= ~SUN3_PMEG_MASK;
oldctx = sun3_get_context();
sun3_put_context(mm->context);
while(start < end)
{
if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
goto next;
if(pmeg_ctx[seg] == mm->context) {
pmeg_alloc[seg] = 0;
pmeg_ctx[seg] = 0;
pmeg_vaddr[seg] = 0;
}
sun3_put_segmap(start, SUN3_INVALID_PMEG);
next:
start += SUN3_PMEG_SIZE;
}
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_tlb_all();
}
/* Flush kernel page from TLB. */
static inline void flush_tlb_kernel_page (unsigned long addr)
{
sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
}
#endif
#else /* !CONFIG_MMU */
/*
* flush all user-space atc entries.
*/
static inline void __flush_tlb(void)
{
BUG();
}
static inline void __flush_tlb_one(unsigned long addr)
{
BUG();
}
#define flush_tlb() __flush_tlb()
/*
* flush all atc entries (both kernel and user-space entries).
*/
static inline void flush_tlb_all(void)
{
BUG();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
BUG();
}
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
BUG();
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG();
}
static inline void flush_tlb_kernel_page(unsigned long addr)
{
BUG();
}
#endif /* CONFIG_MMU */
#endif /* _M68K_TLBFLUSH_H */
#ifndef _M68K_TLBFLUSH_H
#define _M68K_TLBFLUSH_H
#ifndef CONFIG_SUN3
#include <asm/current.h>
static inline void flush_tlb_kernel_page(void *addr)
{
if (CPU_IS_040_OR_060) {
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
__asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t"
".chip 68k"
: : "a" (addr));
set_fs(old_fs);
} else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
}
/*
* flush all user-space atc entries.
*/
static inline void __flush_tlb(void)
{
if (CPU_IS_040_OR_060)
__asm__ __volatile__(".chip 68040\n\t"
"pflushan\n\t"
".chip 68k");
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #0,#4");
}
static inline void __flush_tlb040_one(unsigned long addr)
{
__asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t"
".chip 68k"
: : "a" (addr));
}
static inline void __flush_tlb_one(unsigned long addr)
{
if (CPU_IS_040_OR_060)
__flush_tlb040_one(addr);
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
}
#define flush_tlb() __flush_tlb()
/*
* flush all atc entries (both kernel and user-space entries).
*/
static inline void flush_tlb_all(void)
{
if (CPU_IS_040_OR_060)
__asm__ __volatile__(".chip 68040\n\t"
"pflusha\n\t"
".chip 68k");
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflusha");
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (vma->vm_mm == current->active_mm) {
mm_segment_t old_fs = get_fs();
set_fs(USER_DS);
__flush_tlb_one(addr);
set_fs(old_fs);
}
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_tlb_all();
}
#else
/* Reserved PMEGs. */
extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
/* Flush all userspace mappings one by one... (why no flush command,
sun?) */
static inline void flush_tlb_all(void)
{
unsigned long addr;
unsigned char ctx, oldctx;
oldctx = sun3_get_context();
for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
for(ctx = 0; ctx < 8; ctx++) {
sun3_put_context(ctx);
sun3_put_segmap(addr, SUN3_INVALID_PMEG);
}
}
sun3_put_context(oldctx);
/* erase all of the userspace pmeg maps, we've clobbered them
all anyway */
for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
if(pmeg_alloc[addr] == 1) {
pmeg_alloc[addr] = 0;
pmeg_ctx[addr] = 0;
pmeg_vaddr[addr] = 0;
}
}
}
/* Clear user TLB entries within the context named in mm */
static inline void flush_tlb_mm (struct mm_struct *mm)
{
unsigned char oldctx;
unsigned char seg;
unsigned long i;
oldctx = sun3_get_context();
sun3_put_context(mm->context);
for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
seg = sun3_get_segmap(i);
if(seg == SUN3_INVALID_PMEG)
continue;
sun3_put_segmap(i, SUN3_INVALID_PMEG);
pmeg_alloc[seg] = 0;
pmeg_ctx[seg] = 0;
pmeg_vaddr[seg] = 0;
}
sun3_put_context(oldctx);
}
/* Flush a single TLB page. In this case, we're limited to flushing a
single PMEG */
static inline void flush_tlb_page (struct vm_area_struct *vma,
unsigned long addr)
{
unsigned char oldctx;
unsigned char i;
oldctx = sun3_get_context();
sun3_put_context(vma->vm_mm->context);
addr &= ~SUN3_PMEG_MASK;
if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
{
pmeg_alloc[i] = 0;
pmeg_ctx[i] = 0;
pmeg_vaddr[i] = 0;
sun3_put_segmap (addr, SUN3_INVALID_PMEG);
}
sun3_put_context(oldctx);
}
/* Flush a range of pages from TLB. */
static inline void flush_tlb_range (struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned char seg, oldctx;
start &= ~SUN3_PMEG_MASK;
oldctx = sun3_get_context();
sun3_put_context(mm->context);
while(start < end)
{
if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
goto next;
if(pmeg_ctx[seg] == mm->context) {
pmeg_alloc[seg] = 0;
pmeg_ctx[seg] = 0;
pmeg_vaddr[seg] = 0;
}
sun3_put_segmap(start, SUN3_INVALID_PMEG);
next:
start += SUN3_PMEG_SIZE;
}
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_tlb_all();
}
/* Flush kernel page from TLB. */
static inline void flush_tlb_kernel_page (unsigned long addr)
{
sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
}
#endif
#endif /* _M68K_TLBFLUSH_H */
#ifndef _M68KNOMMU_TLBFLUSH_H
#define _M68KNOMMU_TLBFLUSH_H
/*
* Copyright (C) 2000 Lineo, David McCullough <davidm@uclinux.org>
* Copyright (C) 2000-2002, Greg Ungerer <gerg@snapgear.com>
*/
#include <asm/setup.h>
/*
* flush all user-space atc entries.
*/
static inline void __flush_tlb(void)
{
BUG();
}
static inline void __flush_tlb_one(unsigned long addr)
{
BUG();
}
#define flush_tlb() __flush_tlb()
/*
* flush all atc entries (both kernel and user-space entries).
*/
static inline void flush_tlb_all(void)
{
BUG();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
BUG();
}
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
BUG();
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG();
}
static inline void flush_tlb_kernel_page(unsigned long addr)
{
BUG();
}
#endif /* _M68KNOMMU_TLBFLUSH_H */
#ifdef __uClinux__
#include "ucontext_no.h"
#else
#include "ucontext_mm.h"
#ifndef _M68K_UCONTEXT_H
#define _M68K_UCONTEXT_H
typedef int greg_t;
#define NGREG 18
typedef greg_t gregset_t[NGREG];
typedef struct fpregset {
int f_fpcntl[3];
int f_fpregs[8*3];
} fpregset_t;
struct mcontext {
int version;
gregset_t gregs;
fpregset_t fpregs;
};
#define MCONTEXT_VERSION 2
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
struct mcontext uc_mcontext;
unsigned long uc_filler[80];
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif
#ifndef _M68K_UCONTEXT_H
#define _M68K_UCONTEXT_H
typedef int greg_t;
#define NGREG 18
typedef greg_t gregset_t[NGREG];
typedef struct fpregset {
int f_fpcntl[3];
int f_fpregs[8*3];
} fpregset_t;
struct mcontext {
int version;
gregset_t gregs;
fpregset_t fpregs;
};
#define MCONTEXT_VERSION 2
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
struct mcontext uc_mcontext;
unsigned long uc_filler[80];
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif
#ifndef _M68KNOMMU_UCONTEXT_H
#define _M68KNOMMU_UCONTEXT_H
typedef int greg_t;
#define NGREG 18
typedef greg_t gregset_t[NGREG];
typedef struct fpregset {
int f_pcr;
int f_psr;
int f_fpiaddr;
int f_fpregs[8][3];
} fpregset_t;
struct mcontext {
int version;
gregset_t gregs;
fpregset_t fpregs;
};
#define MCONTEXT_VERSION 2
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
struct mcontext uc_mcontext;
unsigned long uc_filler[80];
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif
#ifdef __uClinux__
#include "unaligned_no.h"
#ifndef _ASM_M68K_UNALIGNED_H
#define _ASM_M68K_UNALIGNED_H
#ifdef CONFIG_COLDFIRE
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#else
#include "unaligned_mm.h"
/*
* The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif
#endif /* _ASM_M68K_UNALIGNED_H */
#ifndef _ASM_M68K_UNALIGNED_H
#define _ASM_M68K_UNALIGNED_H
/*
* The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif /* _ASM_M68K_UNALIGNED_H */
#ifndef _ASM_M68KNOMMU_UNALIGNED_H
#define _ASM_M68KNOMMU_UNALIGNED_H
#ifdef CONFIG_COLDFIRE
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#else
/*
* The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif
#endif /* _ASM_M68KNOMMU_UNALIGNED_H */
......@@ -88,18 +88,18 @@ export PLATFORM BOARD MODEL CPUCLASS
#
# Some CFLAG additions based on specific CPU type.
#
cflags-$(CONFIG_M5206) := -m5200
cflags-$(CONFIG_M5206e) := -m5200
cflags-$(CONFIG_M520x) := -m5307
cflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
cflags-$(CONFIG_M5206e) := $(call cc-option,-m5206e,-m5200)
cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200)
cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
cflags-$(CONFIG_M5249) := -m5200
cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
cflags-$(CONFIG_M5272) := -m5307
cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200)
cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
cflags-$(CONFIG_M5307) := -m5307
cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200)
cflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
cflags-$(CONFIG_M5407) := -m5200
cflags-$(CONFIG_M5407) := $(call cc-option,-m5407,-m5200)
cflags-$(CONFIG_M68328) := -m68000
cflags-$(CONFIG_M68EZ328) := -m68000
cflags-$(CONFIG_M68VZ328) := -m68000
......
......@@ -9,10 +9,11 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
......@@ -34,3 +35,8 @@ void dma_free_coherent(struct device *dev, size_t size,
{
free_pages((unsigned long)vaddr, get_order(size));
}
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
}
......@@ -23,7 +23,7 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
struct pt_regs *oldregs = set_irq_regs(regs);
irq_enter();
__do_IRQ(irq);
generic_handle_irq(irq);
irq_exit();
set_irq_regs(oldregs);
......
......@@ -111,11 +111,7 @@ void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = 0;
#endif
zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
free_area_init(zones_size);
}
}
......
......@@ -32,7 +32,8 @@ static struct mcf_platform_uart m5249_uart_platform[] = {
{
.mapbase = MCF_MBAR + MCFUART_BASE2,
.irq = 74,
}
},
{ },
};
static struct platform_device m5249_uart = {
......@@ -50,12 +51,12 @@ static struct platform_device *m5249_devices[] __initdata = {
static void __init m5249_uart_init_line(int line, int irq)
{
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
} else if (line == 1) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
}
}
......
......@@ -65,12 +65,12 @@ static struct platform_device *m5307_devices[] __initdata = {
static void __init m5307_uart_init_line(int line, int irq)
{
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
} else if (line == 1) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
}
}
......
......@@ -56,12 +56,12 @@ static struct platform_device *m5407_devices[] __initdata = {
static void __init m5407_uart_init_line(int line, int irq)
{
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
} else if (line == 1) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
}
}
......
......@@ -14,7 +14,7 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-$(CONFIG_COLDFIRE) += dma.o entry.o vectors.o
obj-$(CONFIG_COLDFIRE) += clk.o dma.o entry.o vectors.o
obj-$(CONFIG_M5206) += timers.o
obj-$(CONFIG_M5206e) += timers.o
obj-$(CONFIG_M520x) += pit.o
......
/***************************************************************************/
/*
* clk.c -- general ColdFire CPU kernel clk handling
*
* Copyright (C) 2009, Greg Ungerer (gerg@snapgear.com)
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <asm/coldfire.h>
/***************************************************************************/
struct clk *clk_get(struct device *dev, const char *id)
{
return NULL;
}
int clk_enable(struct clk *clk)
{
return 0;
}
void clk_disable(struct clk *clk)
{
}
void clk_put(struct clk *clk)
{
}
unsigned long clk_get_rate(struct clk *clk)
{
return MCF_CLK;
}
/***************************************************************************/
......@@ -513,7 +513,7 @@ static int __init mcf_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
if ((co->index >= 0) && (co->index <= MCF_MAXPORTS))
if ((co->index < 0) || (co->index >= MCF_MAXPORTS))
co->index = 0;
port = &mcf_ports[co->index].port;
if (port->membase == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment