Commit 58cf3522 authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'x86/mmio', 'x86/delay', 'x86/idle', 'x86/oprofile',...

Merge branches 'x86/mmio', 'x86/delay', 'x86/idle', 'x86/oprofile', 'x86/debug', 'x86/ptrace' and 'x86/amd-iommu' into x86/devel
......@@ -271,6 +271,17 @@ and is between 256 and 4096 characters. It is defined in the file
aic79xx= [HW,SCSI]
See Documentation/scsi/aic79xx.txt.
amd_iommu= [HW,X86-84]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
isolate - enable device isolation (each device, as far
as possible, will get its own protection
domain)
amd_iommu_size= [HW,X86-64]
Define the size of the aperture for the AMD IOMMU
driver. Possible values are:
'32M', '64M' (default), '128M', '256M', '512M', '1G'
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
......
......@@ -376,6 +376,12 @@ L: linux-geode@lists.infradead.org (moderated for non-subscribers)
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
AMD IOMMU (AMD-VI)
P: Joerg Roedel
M: joerg.roedel@amd.com
L: iommu@lists.linux-foundation.org
S: Supported
AMS (Apple Motion Sensor) DRIVER
P: Stelian Pop
M: stelian@popies.net
......
......@@ -533,6 +533,21 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
Calgary anyway, pass 'iommu=calgary' on the kernel command line.
If unsure, say Y.
config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
depends on X86_64 && PCI && ACPI
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
remapping of DMA memory accesses from devices. With an AMD IOMMU you
can isolate the the DMA memory of different devices and protect the
system from misbehaving device drivers or hardware.
You can find out if your system has an AMD IOMMU if you look into
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
bool
......
......@@ -99,6 +99,7 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
......
This diff is collapsed.
This diff is collapsed.
......@@ -925,11 +925,11 @@ error_kernelspace:
iret run with kernel gs again, so don't set the user space flag.
B stepping K8s sometimes report an truncated RIP for IRET
exceptions returning to compat mode. Check for these here too. */
leaq irq_return(%rip),%rbp
cmpq %rbp,RIP(%rsp)
leaq irq_return(%rip),%rcx
cmpq %rcx,RIP(%rsp)
je error_swapgs
movl %ebp,%ebp /* zero extend */
cmpq %rbp,RIP(%rsp)
movl %ecx,%ecx /* zero extend */
cmpq %rcx,RIP(%rsp)
je error_swapgs
cmpq $gs_change,RIP(%rsp)
je error_swapgs
......
......@@ -162,7 +162,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
int ret;
if (!cpu_has_fxsr)
return -EIO;
return -ENODEV;
ret = init_fpu(target);
if (ret)
......@@ -179,7 +179,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
int ret;
if (!cpu_has_fxsr)
return -EIO;
return -ENODEV;
ret = init_fpu(target);
if (ret)
......
......@@ -7,6 +7,7 @@
#include <asm/dma.h>
#include <asm/gart.h>
#include <asm/calgary.h>
#include <asm/amd_iommu.h>
int forbid_dac __read_mostly;
EXPORT_SYMBOL(forbid_dac);
......@@ -123,6 +124,8 @@ void __init pci_iommu_alloc(void)
detect_intel_iommu();
amd_iommu_detect();
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
......@@ -503,6 +506,8 @@ static int __init pci_iommu_init(void)
intel_iommu_init();
amd_iommu_init();
#ifdef CONFIG_GART_IOMMU
gart_iommu_init();
#endif
......
......@@ -943,13 +943,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_XFP,
0, sizeof(struct user_fxsr_struct),
datap);
datap) ? -EIO : 0;
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_XFP,
0, sizeof(struct user_fxsr_struct),
datap);
datap) ? -EIO : 0;
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
......
......@@ -123,6 +123,8 @@ void __init time_init(void)
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
cpu_khz = calculate_cpu_khz();
lpj_fine = ((unsigned long)tsc_khz * 1000)/HZ;
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
......
#include <linux/sched.h>
#include <linux/clocksource.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/cpufreq.h>
#include <linux/jiffies.h>
#include <linux/init.h>
......@@ -403,6 +404,7 @@ static inline void check_geode_tsc_reliable(void) { }
void __init tsc_init(void)
{
int cpu;
u64 lpj;
if (!cpu_has_tsc || tsc_disabled > 0)
return;
......@@ -415,6 +417,10 @@ void __init tsc_init(void)
return;
}
lpj = ((u64)tsc_khz * 1000);
do_div(lpj, HZ);
lpj_fine = lpj;
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
......
......@@ -3,6 +3,7 @@
*
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
* Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
*
* The __delay function must _NOT_ be inlined as its execution time
* depends wildly on alignment on many x86 processors. The additional
......@@ -28,16 +29,22 @@
/* simple loop based delay: */
static void delay_loop(unsigned long loops)
{
int d0;
__asm__ __volatile__(
"\tjmp 1f\n"
".align 16\n"
"1:\tjmp 2f\n"
".align 16\n"
"2:\tdecl %0\n\tjns 2b"
:"=&a" (d0)
:"0" (loops));
" test %0,%0 \n"
" jz 3f \n"
" jmp 1f \n"
".align 16 \n"
"1: jmp 2f \n"
".align 16 \n"
"2: decl %0 \n"
" jnz 2b \n"
"3: decl %0 \n"
: /* we don't need output */
:"a" (loops)
);
}
/* TSC based delay: */
......
......@@ -392,11 +392,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
printk(KERN_CONT "NULL pointer dereference");
else
printk(KERN_CONT "paging request");
#ifdef CONFIG_X86_32
printk(KERN_CONT " at %08lx\n", address);
#else
printk(KERN_CONT " at %016lx\n", address);
#endif
printk(KERN_CONT " at %p\n", (void *) address);
printk(KERN_ALERT "IP:");
printk_address(regs->ip, 1);
dump_pagetable(address);
......@@ -796,14 +792,10 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
printk(
#ifdef CONFIG_X86_32
"%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
#else
"%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
#endif
"%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address, regs->ip,
regs->sp, error_code);
tsk->comm, task_pid_nr(tsk), address,
(void *) regs->ip, (void *) regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
printk("\n");
}
......
......@@ -269,12 +269,13 @@ static void nmi_cpu_shutdown(void *dummy)
static void nmi_shutdown(void)
{
struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
nmi_enabled = 0;
on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
unregister_die_notifier(&profile_exceptions_nb);
model->shutdown(msrs);
free_msrs();
put_cpu_var(cpu_msrs);
}
static void nmi_cpu_start(void *dummy)
......
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_AMD_IOMMU_H
#define _ASM_X86_AMD_IOMMU_H
#ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void);
extern void amd_iommu_detect(void);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
#endif
#endif
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __AMD_IOMMU_TYPES_H__
#define __AMD_IOMMU_TYPES_H__
#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
/*
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 256
#define ALIAS_TABLE_ENTRY_SIZE 2
#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* helper macros */
#define LOW_U32(x) ((x) & ((1ULL << 32)-1))
#define HIGH_U32(x) (LOW_U32((x) >> 32))
/* Length of the MMIO region for the AMD IOMMU */
#define MMIO_REGION_LENGTH 0x4000
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
#define MMIO_RANGE_OFFSET 0x0c
/* Masks, shifts and macros to parse the device range capability */
#define MMIO_RANGE_LD_MASK 0xff000000
#define MMIO_RANGE_FD_MASK 0x00ff0000
#define MMIO_RANGE_BUS_MASK 0x0000ff00
#define MMIO_RANGE_LD_SHIFT 24
#define MMIO_RANGE_FD_SHIFT 16
#define MMIO_RANGE_BUS_SHIFT 8
#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
/* Flag masks for the AMD IOMMU exclusion range */
#define MMIO_EXCL_ENABLE_MASK 0x01ULL
#define MMIO_EXCL_ALLOW_MASK 0x02ULL
/* Used offsets into the MMIO space */
#define MMIO_DEV_TABLE_OFFSET 0x0000
#define MMIO_CMD_BUF_OFFSET 0x0008
#define MMIO_EVT_BUF_OFFSET 0x0010
#define MMIO_CONTROL_OFFSET 0x0018
#define MMIO_EXCL_BASE_OFFSET 0x0020
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
#define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020
/* feature control bits */
#define CONTROL_IOMMU_EN 0x00ULL
#define CONTROL_HT_TUN_EN 0x01ULL
#define CONTROL_EVT_LOG_EN 0x02ULL
#define CONTROL_EVT_INT_EN 0x03ULL
#define CONTROL_COMWAIT_EN 0x04ULL
#define CONTROL_PASSPW_EN 0x08ULL
#define CONTROL_RESPASSPW_EN 0x09ULL
#define CONTROL_COHERENT_EN 0x0aULL
#define CONTROL_ISOC_EN 0x0bULL
#define CONTROL_CMDBUF_EN 0x0cULL
#define CONTROL_PPFLOG_EN 0x0dULL
#define CONTROL_PPFINT_EN 0x0eULL
/* command specific defines */
#define CMD_COMPL_WAIT 0x01
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_COMPL_WAIT_STORE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
/* macros and definitions for device table entries */
#define DEV_ENTRY_VALID 0x00
#define DEV_ENTRY_TRANSLATION 0x01
#define DEV_ENTRY_IR 0x3d
#define DEV_ENTRY_IW 0x3e
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
#define DEV_ENTRY_NMI_PASS 0xba
#define DEV_ENTRY_LINT0_PASS 0xbe
#define DEV_ENTRY_LINT1_PASS 0xbf
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
#define CMD_BUFFER_ENTRIES 512
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03
#define IOMMU_PDE_NL_0 0x000ULL
#define IOMMU_PDE_NL_1 0x200ULL
#define IOMMU_PDE_NL_2 0x400ULL
#define IOMMU_PDE_NL_3 0x600ULL
#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_U (1ULL << 59)
#define IOMMU_PTE_FC (1ULL << 60)
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
#define IOMMU_L1_PDE(address) \
((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define IOMMU_L2_PDE(address) \
((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
#define IOMMU_PROT_MASK 0x03
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26
#define MAX_DOMAIN_ID 65536
struct protection_domain {
spinlock_t lock;
u16 id;
int mode;
u64 *pt_root;
void *priv;
};
struct dma_ops_domain {
struct list_head list;
struct protection_domain domain;
unsigned long aperture_size;
unsigned long next_bit;
unsigned long *bitmap;
u64 **pte_pages;
};
struct amd_iommu {
struct list_head list;
spinlock_t lock;
u16 devid;
u16 cap_ptr;
u64 mmio_phys;
u8 *mmio_base;
u32 cap;
u16 first_device;
u16 last_device;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_size;
int need_sync;
struct dma_ops_domain *default_dom;
};
extern struct list_head amd_iommu_list;
struct dev_table_entry {
u32 data[8];
};
struct unity_map_entry {
struct list_head list;
u16 devid_start;
u16 devid_end;
u64 address_start;
u64 address_end;
int prot;
};
extern struct list_head amd_iommu_unity_map;
/* data structures for device handling */
extern struct dev_table_entry *amd_iommu_dev_table;
extern u16 *amd_iommu_alias_table;
extern struct amd_iommu **amd_iommu_rlookup_table;
extern unsigned amd_iommu_aperture_order;
extern u16 amd_iommu_last_bdf;
/* data structures for protection domain handling */
extern struct protection_domain **amd_iommu_pd_table;
extern unsigned long *amd_iommu_pd_alloc_bitmap;
extern int amd_iommu_isolate;
static inline void print_devid(u16 devid, int nl)
{
int bus = devid >> 8;
int dev = devid >> 3 & 0x1f;
int fn = devid & 0x07;
printk("%02x:%02x.%x", bus, dev, fn);
if (nl)
printk("\n");
}
#endif
......@@ -106,6 +106,7 @@
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */
/*
* Auxiliary flags: Linux defined - For features scattered in various
......
......@@ -22,8 +22,9 @@ extern int gart_iommu_aperture_allowed;
extern int gart_iommu_aperture_disabled;
extern int fix_aperture;
#else
#define gart_iommu_aperture 0
#define gart_iommu_aperture_allowed 0
#define gart_iommu_aperture 0
#define gart_iommu_aperture_allowed 0
#define gart_iommu_aperture_disabled 1
static inline void early_gart_iommu_check(void)
{
......
......@@ -3,6 +3,62 @@
#define ARCH_HAS_IOREMAP_WC
#include <linux/compiler.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
{ type ret; asm volatile("mov" size " %1,%0":"=" reg (ret) \
:"m" (*(volatile type __force *)addr) barrier); return ret; }
#define build_mmio_write(name, size, type, reg, barrier) \
static inline void name(type val, volatile void __iomem *addr) \
{ asm volatile("mov" size " %0,%1": :reg (val), \
"m" (*(volatile type __force *)addr) barrier); }
build_mmio_read(readb, "b", unsigned char, "q", :"memory")
build_mmio_read(readw, "w", unsigned short, "r", :"memory")
build_mmio_read(readl, "l", unsigned int, "r", :"memory")
build_mmio_read(__readb, "b", unsigned char, "q", )
build_mmio_read(__readw, "w", unsigned short, "r", )
build_mmio_read(__readl, "l", unsigned int, "r", )
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
build_mmio_write(writel, "l", unsigned int, "r", :"memory")
build_mmio_write(__writeb, "b", unsigned char, "q", )
build_mmio_write(__writew, "w", unsigned short, "r", )
build_mmio_write(__writel, "l", unsigned int, "r", )
#define readb_relaxed(a) __readb(a)
#define readw_relaxed(a) __readw(a)
#define readl_relaxed(a) __readl(a)
#define __raw_readb __readb
#define __raw_readw __readw
#define __raw_readl __readl
#define __raw_writeb __writeb
#define __raw_writew __writew
#define __raw_writel __writel
#define mmiowb() barrier()
#ifdef CONFIG_X86_64
build_mmio_read(readq, "q", unsigned long, "r", :"memory")
build_mmio_read(__readq, "q", unsigned long, "r", )
build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
build_mmio_write(__writeq, "q", unsigned long, "r", )
#define readq_relaxed(a) __readq(a)
#define __raw_readq __readq
#define __raw_writeq writeq
/* Let people know we have them */
#define readq readq
#define writeq writeq
#endif
#ifdef CONFIG_X86_32
# include "io_32.h"
#else
......
......@@ -149,55 +149,6 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the x86 architecture, we just read/write the
* memory location directly.
*/
static inline unsigned char readb(const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)addr;
}
static inline unsigned short readw(const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)addr;
}
static inline unsigned int readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
static inline void writeb(unsigned char b, volatile void __iomem *addr)
{
*(volatile unsigned char __force *)addr = b;
}
static inline void writew(unsigned short b, volatile void __iomem *addr)
{
*(volatile unsigned short __force *)addr = b;
}
static inline void writel(unsigned int b, volatile void __iomem *addr)
{
*(volatile unsigned int __force *)addr = b;
}
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define mmiowb()
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
......
......@@ -204,77 +204,6 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the x86 architecture, we just read/write the
* memory location directly.
*/
static inline __u8 __readb(const volatile void __iomem *addr)
{
return *(__force volatile __u8 *)addr;
}
static inline __u16 __readw(const volatile void __iomem *addr)
{
return *(__force volatile __u16 *)addr;
}
static __always_inline __u32 __readl(const volatile void __iomem *addr)
{
return *(__force volatile __u32 *)addr;
}
static inline __u64 __readq(const volatile void __iomem *addr)
{
return *(__force volatile __u64 *)addr;
}
#define readb(x) __readb(x)
#define readw(x) __readw(x)
#define readl(x) __readl(x)
#define readq(x) __readq(x)
#define readb_relaxed(a) readb(a)
#define readw_relaxed(a) readw(a)
#define readl_relaxed(a) readl(a)
#define readq_relaxed(a) readq(a)
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_readq readq
#define mmiowb()
static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
}
static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;
}
static inline void __writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)addr = b;
}
static inline void __writew(__u16 b, volatile void __iomem *addr)
{
*(__force volatile __u16 *)addr = b;
}
#define writeq(val, addr) __writeq((val), (addr))
#define writel(val, addr) __writel((val), (addr))
#define writew(val, addr) __writew((val), (addr))
#define writeb(val, addr) __writeb((val), (addr))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
void __memcpy_fromio(void *, unsigned long, unsigned);
void __memcpy_toio(unsigned long, const void *, unsigned);
......
......@@ -41,6 +41,7 @@ static inline void ndelay(unsigned long x)
#define ndelay(x) ndelay(x)
#endif
extern unsigned long lpj_fine;
void calibrate_delay(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
......
......@@ -8,7 +8,9 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/smp.h>
unsigned long lpj_fine;
unsigned long preset_lpj;
static int __init lpj_setup(char *str)
{
......@@ -33,9 +35,9 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
unsigned long pre_start, start, post_start;
unsigned long pre_end, end, post_end;
unsigned long start_jiffies;
unsigned long tsc_rate_min, tsc_rate_max;
unsigned long good_tsc_sum = 0;
unsigned long good_tsc_count = 0;
unsigned long timer_rate_min, timer_rate_max;
unsigned long good_timer_sum = 0;
unsigned long good_timer_count = 0;
int i;
if (read_current_timer(&pre_start) < 0 )
......@@ -79,22 +81,24 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
}
read_current_timer(&post_end);
tsc_rate_max = (post_end - pre_start) / DELAY_CALIBRATION_TICKS;
tsc_rate_min = (pre_end - post_start) / DELAY_CALIBRATION_TICKS;
timer_rate_max = (post_end - pre_start) /
DELAY_CALIBRATION_TICKS;
timer_rate_min = (pre_end - post_start) /
DELAY_CALIBRATION_TICKS;
/*
* If the upper limit and lower limit of the tsc_rate is
* If the upper limit and lower limit of the timer_rate is
* >= 12.5% apart, redo calibration.
*/
if (pre_start != 0 && pre_end != 0 &&
(tsc_rate_max - tsc_rate_min) < (tsc_rate_max >> 3)) {
good_tsc_count++;
good_tsc_sum += tsc_rate_max;
(timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
good_timer_count++;
good_timer_sum += timer_rate_max;
}
}
if (good_tsc_count)
return (good_tsc_sum/good_tsc_count);
if (good_timer_count)
return (good_timer_sum/good_timer_count);
printk(KERN_WARNING "calibrate_delay_direct() failed to get a good "
"estimate for loops_per_jiffy.\nProbably due to long platform interrupts. Consider using \"lpj=\" boot option.\n");
......@@ -108,6 +112,10 @@ static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;}
* This is the number of bits of precision for the loops_per_jiffy. Each
* bit takes on average 1.5/HZ seconds. This (like the original) is a little
* better than 1%
* For the boot cpu we can skip the delay calibration and assign it a value
* calculated based on the timer frequency.
* For the rest of the CPUs we cannot assume that the timer frequency is same as
* the cpu frequency, hence do the calibration for those.
*/
#define LPS_PREC 8
......@@ -118,20 +126,20 @@ void __cpuinit calibrate_delay(void)
if (preset_lpj) {
loops_per_jiffy = preset_lpj;
printk("Calibrating delay loop (skipped)... "
"%lu.%02lu BogoMIPS preset\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
printk(KERN_INFO
"Calibrating delay loop (skipped) preset value.. ");
} else if ((smp_processor_id() == 0) && lpj_fine) {
loops_per_jiffy = lpj_fine;
printk(KERN_INFO
"Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
} else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
printk("Calibrating delay using timer specific routine.. ");
printk("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100,
loops_per_jiffy);
printk(KERN_INFO
"Calibrating delay using timer specific routine.. ");
} else {
loops_per_jiffy = (1<<12);
printk(KERN_DEBUG "Calibrating delay loop... ");
printk(KERN_INFO "Calibrating delay loop... ");
while ((loops_per_jiffy <<= 1) != 0) {
/* wait for "start of" clock tick */
ticks = jiffies;
......@@ -161,12 +169,8 @@ void __cpuinit calibrate_delay(void)
if (jiffies != ticks) /* longer than 1 tick */
loops_per_jiffy &= ~loopbit;
}
/* Round the value and print it */
printk("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100,
loops_per_jiffy);
}
printk(KERN_INFO "%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment