Commit 3ef076bb authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86-64 merge

This brings the x86-64 port uptodate.  Only architecture specific
changes.

The biggest change is the forward port of the 2.4 timing code with full
HPET support.  This should improve timing stability on some Opteron
boxes considerably.

Also add the optimized low level functions from 2.4 (clear_page,
copy_page, memcpy, csum_copy etc.) They were supposed to be merged
earlier, but got dropped due to some SNAFU.  Especially the clear_page
changes should improve performance considerably, because the old version
used write combining writes which put all the new process data out of
cache.  New version serves cache hot.

Also some other bugfixes.

Full changelog:
- Re-add some lost patches: improved copy_page, clear_page, memset, memcpy,
  csum_copy from 2.4.
- New timing code from 2.4 (Bryan O'Sullivan, John Stultz, Vojtech Pavlik)
- Use correct MSR to write northbridge MCE configuration
- Fix and reenable simics check in APIC timer calibration
- Check if BIOS enabled APIC and don't use APIC mode if not.
- Remove some obsolete code in APIC handling.
- Fix potential races in the IOMMU code.
- Don't print backtrace twice on oops.
- Fix compilation of swsuspend (Pavel Machek)
- Add oops locking to kernel page faults.
- Use prefetcht0 for C level kernel prefetches.
parent ccfd4cc5
...@@ -52,6 +52,18 @@ config EARLY_PRINTK ...@@ -52,6 +52,18 @@ config EARLY_PRINTK
klogd/syslogd or the X server. You should normally N here, unless klogd/syslogd or the X server. You should normally N here, unless
you want to debug such a crash. you want to debug such a crash.
config HPET_TIMER
bool
default y
help
Use the IA-PC HPET (High Precision Event Timer) to manage
time in preference to the PIT and RTC, if a HPET is
present. The HPET provides a stable time base on SMP
systems, unlike the RTC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
<http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
If unsure, say Y.
config GENERIC_ISA_DMA config GENERIC_ISA_DMA
bool bool
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
ENTRY(ia32_cstar_target) ENTRY(ia32_cstar_target)
swapgs swapgs
movl %esp,%r8d movl %esp,%r8d
movq %r8,%gs:pda_oldrsp
movq %gs:pda_kernelstack,%rsp movq %gs:pda_kernelstack,%rsp
sti sti
SAVE_ARGS 8,1 SAVE_ARGS 8,1
......
...@@ -244,9 +244,11 @@ acpi_parse_hpet ( ...@@ -244,9 +244,11 @@ acpi_parse_hpet (
return -1; return -1;
} }
hpet.address = hpet_tbl->addr.addrl | ((long) hpet_tbl->addr.addrh << 32); vxtime.hpet_address = hpet_tbl->addr.addrl |
((long) hpet_tbl->addr.addrh << 32);
printk(KERN_INFO "acpi: HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet.address); printk(KERN_INFO "acpi: HPET id: %#x base: %#lx\n",
hpet_tbl->id, vxtime.hpet_address);
return 0; return 0;
} }
......
...@@ -690,7 +690,13 @@ static void setup_APIC_timer(unsigned int clocks) ...@@ -690,7 +690,13 @@ static void setup_APIC_timer(unsigned int clocks)
} }
/* wait for irq slice */ /* wait for irq slice */
{ if (vxtime.hpet_address) {
int trigger = hpet_readl(HPET_T0_CMP);
while (hpet_readl(HPET_COUNTER) >= trigger)
/* do nothing */ ;
while (hpet_readl(HPET_COUNTER) < trigger)
/* do nothing */ ;
} else {
int c1, c2; int c1, c2;
outb_p(0x00, 0x43); outb_p(0x00, 0x43);
c2 = inb_p(0x40); c2 = inb_p(0x40);
......
...@@ -363,22 +363,12 @@ static void __init k8_mcheck_init(struct cpuinfo_x86 *c) ...@@ -363,22 +363,12 @@ static void __init k8_mcheck_init(struct cpuinfo_x86 *c)
machine_check_vector = k8_machine_check; machine_check_vector = k8_machine_check;
for (i = 0; i < banks; i++) { for (i = 0; i < banks; i++) {
u64 val = ((1UL<<i) & disabled_banks) ? 0 : ~0UL; u64 val = ((1UL<<i) & disabled_banks) ? 0 : ~0UL;
if (val && i == 4)
val = k8_nb_flags;
wrmsrl(MSR_IA32_MC0_CTL+4*i, val); wrmsrl(MSR_IA32_MC0_CTL+4*i, val);
wrmsrl(MSR_IA32_MC0_STATUS+4*i,0); wrmsrl(MSR_IA32_MC0_STATUS+4*i,0);
} }
nb = find_k8_nb();
if (nb != NULL) {
u32 reg, reg2;
pci_read_config_dword(nb, 0x40, &reg);
pci_write_config_dword(nb, 0x40, k8_nb_flags);
pci_read_config_dword(nb, 0x44, &reg2);
pci_write_config_dword(nb, 0x44, reg2);
printk(KERN_INFO "Machine Check for K8 Northbridge %d enabled (%x,%x)\n",
nb->devfn, reg, reg2);
ignored_banks |= (1UL<<4);
}
set_in_cr4(X86_CR4_MCE); set_in_cr4(X86_CR4_MCE);
if (mcheck_interval && (smp_processor_id() == 0)) { if (mcheck_interval && (smp_processor_id() == 0)) {
......
...@@ -173,12 +173,10 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -173,12 +173,10 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
if (iommu_page == -1) if (iommu_page == -1)
goto error; goto error;
/* Fill in the GATT, allocating pages as needed. */ /* Fill in the GATT */
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
unsigned long phys_mem; unsigned long phys_mem;
void *mem = memory + i*PAGE_SIZE; void *mem = memory + i*PAGE_SIZE;
if (i > 0)
atomic_inc(&virt_to_page(mem)->count);
phys_mem = virt_to_phys(mem); phys_mem = virt_to_phys(mem);
BUG_ON(phys_mem & ~PHYSICAL_PAGE_MASK); BUG_ON(phys_mem & ~PHYSICAL_PAGE_MASK);
iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
...@@ -206,16 +204,14 @@ void pci_free_consistent(struct pci_dev *hwdev, size_t size, ...@@ -206,16 +204,14 @@ void pci_free_consistent(struct pci_dev *hwdev, size_t size,
size = round_up(size, PAGE_SIZE); size = round_up(size, PAGE_SIZE);
if (bus >= iommu_bus_base && bus <= iommu_bus_base + iommu_size) { if (bus >= iommu_bus_base && bus <= iommu_bus_base + iommu_size) {
unsigned pages = size >> PAGE_SHIFT; unsigned pages = size >> PAGE_SHIFT;
int i;
iommu_page = (bus - iommu_bus_base) >> PAGE_SHIFT; iommu_page = (bus - iommu_bus_base) >> PAGE_SHIFT;
vaddr = __va(GPTE_DECODE(iommu_gatt_base[iommu_page])); vaddr = __va(GPTE_DECODE(iommu_gatt_base[iommu_page]));
#ifdef CONFIG_IOMMU_DEBUG
int i;
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
u64 pte = iommu_gatt_base[iommu_page + i]; u64 pte = iommu_gatt_base[iommu_page + i];
BUG_ON((pte & GPTE_VALID) == 0); BUG_ON((pte & GPTE_VALID) == 0);
iommu_gatt_base[iommu_page + i] = 0; iommu_gatt_base[iommu_page + i] = 0;
} }
#endif
free_iommu(iommu_page, pages); free_iommu(iommu_page, pages);
} }
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
...@@ -319,11 +315,6 @@ dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir) ...@@ -319,11 +315,6 @@ dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
*/ */
iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
#ifdef CONFIG_IOMMU_DEBUG
/* paranoia check */
BUG_ON(GPTE_DECODE(iommu_gatt_base[iommu_page+i]) != phys_mem);
#endif
#ifdef CONFIG_IOMMU_LEAK #ifdef CONFIG_IOMMU_LEAK
/* XXX need eventually caller of pci_map_sg */ /* XXX need eventually caller of pci_map_sg */
if (iommu_leak_tab) if (iommu_leak_tab)
...@@ -350,7 +341,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, ...@@ -350,7 +341,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
return; return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = round_up(size + (dma_addr & ~PAGE_MASK), PAGE_SIZE) >> PAGE_SHIFT; npages = round_up(size + (dma_addr & ~PAGE_MASK), PAGE_SIZE) >> PAGE_SHIFT;
#ifdef CONFIG_IOMMU_DEBUG
int i; int i;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = 0; iommu_gatt_base[iommu_page + i] = 0;
...@@ -359,7 +349,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, ...@@ -359,7 +349,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
iommu_leak_tab[iommu_page + i] = 0; iommu_leak_tab[iommu_page + i] = 0;
#endif #endif
} }
#endif
free_iommu(iommu_page, npages); free_iommu(iommu_page, npages);
} }
......
...@@ -150,7 +150,7 @@ __setup("idle=", idle_setup); ...@@ -150,7 +150,7 @@ __setup("idle=", idle_setup);
/* Prints also some state that isn't saved in the pt_regs */ /* Prints also some state that isn't saved in the pt_regs */
void show_regs(struct pt_regs * regs) void __show_regs(struct pt_regs * regs)
{ {
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned int fsindex,gsindex; unsigned int fsindex,gsindex;
...@@ -192,7 +192,11 @@ void show_regs(struct pt_regs * regs) ...@@ -192,7 +192,11 @@ void show_regs(struct pt_regs * regs)
fs,fsindex,gs,gsindex,shadowgs); fs,fsindex,gs,gsindex,shadowgs);
printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
}
void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
show_trace(&regs->rsp); show_trace(&regs->rsp);
} }
......
...@@ -67,6 +67,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; ...@@ -67,6 +67,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */ /* Set when the idlers are all forked */
int smp_threads_ready; int smp_threads_ready;
extern void time_init_smp(void);
/* /*
* Trampoline 80x86 program as an array. * Trampoline 80x86 program as an array.
*/ */
...@@ -760,7 +762,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -760,7 +762,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if (APIC_init_uniprocessor()) if (APIC_init_uniprocessor())
printk(KERN_NOTICE "Local APIC not detected." printk(KERN_NOTICE "Local APIC not detected."
" Using dummy APIC emulation.\n"); " Using dummy APIC emulation.\n");
return; goto smp_done;
} }
/* /*
...@@ -784,7 +786,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -784,7 +786,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_online_map = phys_cpu_present_map = 1; cpu_online_map = phys_cpu_present_map = 1;
phys_cpu_present_map = 1; phys_cpu_present_map = 1;
disable_apic = 1; disable_apic = 1;
return; goto smp_done;
} }
verify_local_APIC(); verify_local_APIC();
...@@ -799,7 +801,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -799,7 +801,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_online_map = phys_cpu_present_map = 1; cpu_online_map = phys_cpu_present_map = 1;
phys_cpu_present_map = 1; phys_cpu_present_map = 1;
disable_apic = 1; disable_apic = 1;
return; goto smp_done;
} }
connect_bsp_APIC(); connect_bsp_APIC();
...@@ -883,6 +885,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -883,6 +885,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/ */
if (cpu_has_tsc && cpucount) if (cpu_has_tsc && cpucount)
synchronize_tsc_bp(); synchronize_tsc_bp();
smp_done:
time_init_smp();
} }
/* These are wrappers to interface to the new boot process. Someone /* These are wrappers to interface to the new boot process. Someone
......
This diff is collapsed.
...@@ -263,7 +263,7 @@ void show_registers(struct pt_regs *regs) ...@@ -263,7 +263,7 @@ void show_registers(struct pt_regs *regs)
rsp = regs->rsp; rsp = regs->rsp;
printk("CPU %d ", cpu); printk("CPU %d ", cpu);
show_regs(regs); __show_regs(regs);
printk("Process %s (pid: %d, stackpage=%08lx)\n", printk("Process %s (pid: %d, stackpage=%08lx)\n",
cur->comm, cur->pid, 4096+(unsigned long)cur); cur->comm, cur->pid, 4096+(unsigned long)cur);
......
...@@ -78,13 +78,21 @@ static force_inline void do_vgettimeofday(struct timeval * tv) ...@@ -78,13 +78,21 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
do { do {
sequence = read_seqbegin(&__xtime_lock); sequence = read_seqbegin(&__xtime_lock);
sync_core();
rdtscll(t);
sec = __xtime.tv_sec; sec = __xtime.tv_sec;
usec = (__xtime.tv_nsec / 1000) + usec = (__xtime.tv_nsec / 1000) +
(__jiffies - __wall_jiffies) * (1000000 / HZ) + (__jiffies - __wall_jiffies) * (1000000 / HZ);
(t - __hpet.last_tsc) * (1000000 / HZ) / __hpet.ticks + __hpet.offset;
if (__vxtime.mode == VXTIME_TSC) {
sync_core();
rdtscll(t);
usec += ((t - __vxtime.last_tsc) *
__vxtime.tsc_quot) >> 32;
} else {
#if 0
usec += ((readl(fix_to_virt(VSYSCALL_HPET) + 0xf0) -
__vxtime.last) * __vxtime.quot) >> 32;
#endif
}
} while (read_seqretry(&__xtime_lock, sequence)); } while (read_seqretry(&__xtime_lock, sequence));
tv->tv_sec = sec + usec / 1000000; tv->tv_sec = sec + usec / 1000000;
......
/*
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
#include <linux/linkage.h>
/* /*
* Zero a page. * Zero a page.
* rdi page * rdi page
*/ */
ENTRY(clear_page) .globl clear_page
.p2align 4
clear_page:
xorl %eax,%eax xorl %eax,%eax
movl $4096/128,%ecx movl $4096/64,%ecx
movl $128,%edx .p2align 4
loop: .Lloop:
decl %ecx
#define PUT(x) movq %rax,x*8(%rdi) #define PUT(x) movq %rax,x*8(%rdi)
PUT(0) movq %rax,(%rdi)
PUT(1) PUT(1)
PUT(2) PUT(2)
PUT(3) PUT(3)
...@@ -21,17 +20,8 @@ loop: ...@@ -21,17 +20,8 @@ loop:
PUT(5) PUT(5)
PUT(6) PUT(6)
PUT(7) PUT(7)
PUT(8) leaq 64(%rdi),%rdi
PUT(9) jnz .Lloop
PUT(10) nop
PUT(11)
PUT(12)
PUT(13)
PUT(14)
PUT(15)
addq %rdx,%rdi
decl %ecx
jnz loop
sfence
ret ret
/* /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
#include <linux/linkage.h> /* Don't use streaming store because it's better when the target
#include <linux/config.h> ends up in cache. */
#ifdef CONFIG_PREEMPT
#warning "check your fpu context saving!" /* Could vary the prefetch distance based on SMP/UP */
#endif
/*
* Copy a page.
*
* rdi destination page
* rsi source page
*
* src/dst must be aligned to 16 bytes.
*
* Warning: in case of super lazy FP save this needs to be preempt_stop
*/
.globl copy_page .globl copy_page
.p2align .p2align 4
copy_page: copy_page:
prefetchnta (%rsi) prefetch (%rsi)
prefetchnta 64(%rsi) prefetch 1*64(%rsi)
prefetch 2*64(%rsi)
movq %rsp,%rax prefetch 3*64(%rsi)
subq $16*4,%rsp prefetch 4*64(%rsi)
andq $~15,%rsp prefetchw (%rdi)
movdqa %xmm0,(%rsp) prefetchw 1*64(%rdi)
movdqa %xmm1,16(%rsp) prefetchw 2*64(%rdi)
movdqa %xmm2,32(%rsp) prefetchw 3*64(%rdi)
movdqa %xmm3,48(%rsp) prefetchw 4*64(%rdi)
movl $(4096/128)-2,%ecx subq $3*8,%rsp
movl $128,%edx movq %rbx,(%rsp)
loop: movq %r12,1*8(%rsp)
prefetchnta (%rsi) movq %r13,2*8(%rsp)
prefetchnta 64(%rsi)
loop_no_prefetch: movl $(4096/64)-5,%ecx
movdqa (%rsi),%xmm0 .p2align 4
movdqa 16(%rsi),%xmm1 .Loop64:
movdqa 32(%rsi),%xmm2 dec %rcx
movdqa 48(%rsi),%xmm3
movntdq %xmm0,(%rdi) movq (%rsi), %rax
movntdq %xmm1,16(%rdi) movq 8 (%rsi), %rbx
movntdq %xmm2,32(%rdi) movq 16 (%rsi), %rdx
movntdq %xmm3,48(%rdi) movq 24 (%rsi), %r8
movq 32 (%rsi), %r9
movdqa 64(%rsi),%xmm0 movq 40 (%rsi), %r10
movdqa 80(%rsi),%xmm1 movq 48 (%rsi), %r11
movdqa 96(%rsi),%xmm2 movq 56 (%rsi), %r12
movdqa 112(%rsi),%xmm3
movntdq %xmm0,64(%rdi) prefetch 5*64(%rsi)
movntdq %xmm1,80(%rdi)
movntdq %xmm2,96(%rdi) movq %rax, (%rdi)
movntdq %xmm3,112(%rdi) movq %rbx, 8 (%rdi)
movq %rdx, 16 (%rdi)
movq %r8, 24 (%rdi)
movq %r9, 32 (%rdi)
movq %r10, 40 (%rdi)
movq %r11, 48 (%rdi)
movq %r12, 56 (%rdi)
addq %rdx,%rdi prefetchw 5*64(%rdi)
addq %rdx,%rsi
leaq 64 (%rsi), %rsi
leaq 64 (%rdi), %rdi
jnz .Loop64
movl $5,%ecx
.p2align 4
.Loop2:
decl %ecx decl %ecx
jns loop
cmpl $-1,%ecx
je loop_no_prefetch
sfence
movdqa (%rsp),%xmm0 movq (%rsi), %rax
movdqa 16(%rsp),%xmm1 movq 8 (%rsi), %rbx
movdqa 32(%rsp),%xmm2 movq 16 (%rsi), %rdx
movdqa 48(%rsp),%xmm3 movq 24 (%rsi), %r8
movq %rax,%rsp movq 32 (%rsi), %r9
movq 40 (%rsi), %r10
movq 48 (%rsi), %r11
movq 56 (%rsi), %r12
movq %rax, (%rdi)
movq %rbx, 8 (%rdi)
movq %rdx, 16 (%rdi)
movq %r8, 24 (%rdi)
movq %r9, 32 (%rdi)
movq %r10, 40 (%rdi)
movq %r11, 48 (%rdi)
movq %r12, 56 (%rdi)
leaq 64(%rdi),%rdi
leaq 64(%rsi),%rsi
jnz .Loop2
movq (%rsp),%rbx
movq 1*8(%rsp),%r12
movq 2*8(%rsp),%r13
addq $3*8,%rsp
ret ret
/* /*
* Copyright 2002 Andi Kleen * Copyright 2002,2003 Andi Kleen, SuSE Labs.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive * License. See the file COPYING in the main directory of this archive
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
// #define FIX_ALIGNMENT 1
/* /*
* Checksum copy with exception handling. * Checksum copy with exception handling.
* On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the * On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
...@@ -26,17 +25,14 @@ ...@@ -26,17 +25,14 @@
* eax 64bit sum. undefined in case of exception. * eax 64bit sum. undefined in case of exception.
* *
* Wrappers need to take care of valid exception sum and zeroing. * Wrappers need to take care of valid exception sum and zeroing.
* They also should align source or destination to 8 bytes.
*/ */
/* for now - should vary this based on direction */
#define prefetch prefetcht2
#define movnti movq
.macro source .macro source
10: 10:
.section __ex_table,"a" .section __ex_table,"a"
.align 8 .align 8
.quad 10b,bad_source .quad 10b,.Lbad_source
.previous .previous
.endm .endm
...@@ -44,57 +40,74 @@ ...@@ -44,57 +40,74 @@
20: 20:
.section __ex_table,"a" .section __ex_table,"a"
.align 8 .align 8
.quad 20b,bad_dest .quad 20b,.Lbad_dest
.previous .previous
.endm .endm
.macro ignore L=.Lignore
30:
.section __ex_table,"a"
.align 8
.quad 30b,\L
.previous
.endm
.globl csum_partial_copy_generic .globl csum_partial_copy_generic
.p2align .p2align 4
csum_partial_copy_generic: csum_partial_copy_generic:
prefetchnta (%rdi) cmpl $3*64,%edx
jle .Lignore
ignore
prefetch (%rdi)
ignore
prefetch 1*64(%rdi)
ignore
prefetch 2*64(%rdi)
ignore
prefetch 3*64(%rdi)
ignore
prefetch 4*64(%rdi)
ignore
prefetchw (%rsi)
ignore
prefetchw 1*64(%rsi)
ignore
prefetchw 2*64(%rsi)
ignore
prefetchw 3*64(%rsi)
ignore
prefetchw 4*64(%rsi)
.Lignore:
subq $7*8,%rsp
movq %rbx,2*8(%rsp)
movq %r12,3*8(%rsp)
movq %r14,4*8(%rsp)
movq %r13,5*8(%rsp)
movq %rbp,6*8(%rsp)
movq %r8,(%rsp)
movq %r9,1*8(%rsp)
pushq %rbx
pushq %r12
pushq %r14
pushq %r15
movq %r8,%r14
movq %r9,%r15
movl %ecx,%eax movl %ecx,%eax
movl %edx,%ecx movl %edx,%ecx
#ifdef FIX_ALIGNMENT
/* align source to 8 bytes */
movl %edi,%r8d
andl $7,%r8d
jnz bad_alignment
after_bad_alignment:
#endif
movl $64,%r10d
xorl %r9d,%r9d xorl %r9d,%r9d
movq %rcx,%r12 movq %rcx,%r12
shrq $6,%r12 shrq $6,%r12
/* loopcounter is maintained as one less to test efficiently for the jz .Lhandle_tail /* < 64 */
previous to last iteration. This is needed to stop the prefetching. */
decq %r12 clc
js handle_tail /* < 64 */
jz loop_no_prefetch /* = 64 + X */
/* main loop. clear in 64 byte blocks */ /* main loop. clear in 64 byte blocks */
/* tries hard not to prefetch over the boundary */ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r10: 64, r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */ /* r11: temp3, rdx: temp4, r12 loopcnt */
.p2align /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
loop: .p2align 4
/* Could prefetch more than one loop, but then it would be even .Lloop:
trickier to avoid prefetching over the boundary. The hardware prefetch
should take care of this anyways. The reason for this prefetch is
just the non temporal hint to avoid cache pollution. Hopefully this
will be handled properly by the hardware. */
prefetchnta 64(%rdi)
loop_no_prefetch:
source source
movq (%rdi),%rbx movq (%rdi),%rbx
source source
...@@ -104,175 +117,136 @@ loop_no_prefetch: ...@@ -104,175 +117,136 @@ loop_no_prefetch:
source source
movq 24(%rdi),%rdx movq 24(%rdi),%rdx
dest source
movnti %rbx,(%rsi) movq 32(%rdi),%r10
dest source
movnti %r8,8(%rsi) movq 40(%rdi),%rbp
dest source
movnti %r11,16(%rsi) movq 48(%rdi),%r14
dest source
movnti %rdx,24(%rsi) movq 56(%rdi),%r13
addq %rbx,%rax ignore 2f
prefetch 5*64(%rdi)
2:
adcq %rbx,%rax
adcq %r8,%rax adcq %r8,%rax
adcq %r11,%rax adcq %r11,%rax
adcq %rdx,%rax adcq %rdx,%rax
adcq %r10,%rax
adcq %rbp,%rax
adcq %r14,%rax
adcq %r13,%rax
source decl %r12d
movq 32(%rdi),%rbx
source
movq 40(%rdi),%r8
source
movq 48(%rdi),%r11
source
movq 56(%rdi),%rdx
dest dest
movnti %rbx,32(%rsi) movq %rbx,(%rsi)
dest dest
movnti %r8,40(%rsi) movq %r8,8(%rsi)
dest dest
movnti %r11,48(%rsi) movq %r11,16(%rsi)
dest dest
movnti %rdx,56(%rsi) movq %rdx,24(%rsi)
adcq %rbx,%rax dest
adcq %r8,%rax movq %r10,32(%rsi)
adcq %r11,%rax dest
adcq %rdx,%rax movq %rbp,40(%rsi)
dest
movq %r14,48(%rsi)
dest
movq %r13,56(%rsi)
adcq %r9,%rax /* add in carry */ ignore 3f
prefetchw 5*64(%rsi)
3:
addq %r10,%rdi leaq 64(%rdi),%rdi
addq %r10,%rsi leaq 64(%rsi),%rsi
decq %r12 jnz .Lloop
jz loop_no_prefetch /* previous to last iteration? */
jns loop adcq %r9,%rax
/* do last upto 56 bytes */ /* do last upto 56 bytes */
handle_tail: .Lhandle_tail:
/* ecx: count */ /* ecx: count */
movl %ecx,%r10d movl %ecx,%r10d
andl $63,%ecx andl $63,%ecx
shrl $3,%ecx shrl $3,%ecx
jz fold jz .Lfold
clc clc
movl $8,%edx .p2align 4
loop_8: .Lloop_8:
source source
movq (%rdi),%rbx movq (%rdi),%rbx
adcq %rbx,%rax adcq %rbx,%rax
dest
movnti %rbx,(%rsi)
leaq (%rsi,%rdx),%rsi /* preserve carry */
leaq (%rdi,%rdx),%rdi
decl %ecx decl %ecx
jnz loop_8 dest
movq %rbx,(%rsi)
leaq 8(%rsi),%rsi /* preserve carry */
leaq 8(%rdi),%rdi
jnz .Lloop_8
adcq %r9,%rax /* add in carry */ adcq %r9,%rax /* add in carry */
fold: .Lfold:
/* reduce checksum to 32bits */
movl %eax,%ebx movl %eax,%ebx
shrq $32,%rax shrq $32,%rax
addq %rbx,%rax addl %ebx,%eax
adcl %r9d,%eax
/* do last upto 6 bytes */ /* do last upto 6 bytes */
handle_7: .Lhandle_7:
movl %r10d,%ecx movl %r10d,%ecx
andl $7,%ecx andl $7,%ecx
shrl $1,%ecx shrl $1,%ecx
jz handle_1 jz .Lhandle_1
movl $2,%edx movl $2,%edx
xorl %ebx,%ebx xorl %ebx,%ebx
clc clc
loop_1: .p2align 4
.Lloop_1:
source source
movw (%rdi),%bx movw (%rdi),%bx
adcq %rbx,%rax adcl %ebx,%eax
dest dest
movw %bx,(%rsi)
addq %rdx,%rdi
addq %rdx,%rsi
decl %ecx decl %ecx
jnz loop_1 movw %bx,(%rsi)
adcw %r9w,%ax /* add in carry */ leaq 2(%rdi),%rdi
leaq 2(%rsi),%rsi
jnz .Lloop_1
adcl %r9d,%eax /* add in carry */
/* handle last odd byte */ /* handle last odd byte */
handle_1: .Lhandle_1:
testl $1,%r10d testl $1,%r10d
jz ende jz .Lende
xorl %ebx,%ebx xorl %ebx,%ebx
source source
movb (%rdi),%bl movb (%rdi),%bl
dest dest
movb %bl,(%rsi) movb %bl,(%rsi)
addw %bx,%ax addl %ebx,%eax
adcw %r9w,%ax /* carry */ adcl %r9d,%eax /* carry */
ende: .Lende:
sfence movq 2*8(%rsp),%rbx
popq %r15 movq 3*8(%rsp),%r12
popq %r14 movq 4*8(%rsp),%r14
popq %r12 movq 5*8(%rsp),%r13
popq %rbx movq 6*8(%rsp),%rbp
addq $7*8,%rsp
ret ret
#ifdef FIX_ALIGNMENT
/* align source to 8 bytes. */
/* r8d: unalignedness, ecx len */
bad_alignment:
testl $1,%edi
jnz odd_source
/* compute distance to next aligned position */
movl $8,%r8d
xchgl %r8d,%ecx
subl %r8d,%ecx
/* handle unaligned part */
shrl $1,%ecx
xorl %ebx,%ebx
movl $2,%r10d
align_loop:
source
movw (%rdi),%bx
addq %rbx,%rax /* carry cannot happen */
dest
movw %bx,(%rsi)
addq %r10,%rdi
addq %r10,%rsi
decl %ecx
jnz align_loop
jmp after_bad_alignment
/* weird case. need to swap the sum at the end because the spec requires
16 bit words of the sum to be always paired.
handle it recursively because it should be rather rare. */
odd_source:
/* copy odd byte */
xorl %ebx,%ebx
source
movb (%rdi),%bl
addl %ebx,%eax /* add to old checksum */
adcl $0,%ecx
dest
movb %al,(%rsi)
/* fix arguments */
movl %eax,%ecx
incq %rsi
incq %rdi
decq %rdx
call csum_partial_copy_generic
bswap %eax /* this should work, but check */
jmp ende
#endif
/* Exception handlers. Very simple, zeroing is done in the wrappers */ /* Exception handlers. Very simple, zeroing is done in the wrappers */
bad_source: .Lbad_source:
movl $-EFAULT,(%r14) movq (%rsp),%rax
jmp ende movl $-EFAULT,(%rax)
jmp .Lende
bad_dest: .Lbad_dest:
movl $-EFAULT,(%r15) movq 8(%rsp),%rax
jmp ende movl $-EFAULT,(%rax)
jmp .Lende
...@@ -7,35 +7,39 @@ ...@@ -7,35 +7,39 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/checksum.h>
/* Better way for this sought */ #define __force_inline inline __attribute__((always_inline))
static inline unsigned short from64to16(unsigned long x)
static inline unsigned short from32to16(unsigned a)
{ {
/* add up 32-bit words for 33 bits */ unsigned short b = a >> 16;
x = (x & 0xffffffff) + (x >> 32); asm("addw %w2,%w0\n\t"
/* add up 16-bit and 17-bit words for 17+c bits */ "adcw $0,%w0\n"
x = (x & 0xffff) + (x >> 16); : "=r" (b)
/* add up 16-bit and 2-bit for 16+c bit */ : "0" (b), "r" (a));
x = (x & 0xffff) + (x >> 16); return b;
/* add up carry.. */
x = (x & 0xffff) + (x >> 16);
return x;
} }
/* /*
* Do a 64-bit checksum on an arbitrary memory area. * Do a 64-bit checksum on an arbitrary memory area.
* Returns a 32bit checksum. * Returns a 32bit checksum.
* *
* This isn't a great routine, but it's not _horrible_ either. * This isn't as time critical as it used to be because many NICs
* We rely on the compiler to unroll. * do hardware checksumming these days.
*
* Things tried and found to not make it faster:
* Manual Prefetching
* Unrolling to an 128 bytes inner loop.
* Using interleaving with more registers to break the carry chains.
*/ */
static inline unsigned do_csum(const unsigned char * buff, int len) static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len)
{ {
int odd, count; unsigned odd, count;
unsigned long result = 0; unsigned long result = 0;
if (len <= 0) if (unlikely(len == 0))
goto out; return result;
odd = 1 & (unsigned long) buff; odd = 1 & (unsigned long) buff;
if (unlikely(odd)) { if (unlikely(odd)) {
result = *buff << 8; result = *buff << 8;
...@@ -45,7 +49,7 @@ static inline unsigned do_csum(const unsigned char * buff, int len) ...@@ -45,7 +49,7 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
count = len >> 1; /* nr of 16-bit words.. */ count = len >> 1; /* nr of 16-bit words.. */
if (count) { if (count) {
if (2 & (unsigned long) buff) { if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff; result += *(unsigned short *)buff;
count--; count--;
len -= 2; len -= 2;
buff += 2; buff += 2;
...@@ -59,18 +63,41 @@ static inline unsigned do_csum(const unsigned char * buff, int len) ...@@ -59,18 +63,41 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
buff += 4; buff += 4;
} }
count >>= 1; /* nr of 64-bit words.. */ count >>= 1; /* nr of 64-bit words.. */
if (count) {
/* main loop using 64byte blocks */
unsigned long zero = 0; unsigned long zero = 0;
do { unsigned count64 = count >> 3;
asm(" addq %1,%0\n" while (count64) {
" adcq %2,%0\n" asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq 4*8(%[src]),%[res]\n\t"
"adcq 5*8(%[src]),%[res]\n\t"
"adcq 6*8(%[src]),%[res]\n\t"
"adcq 7*8(%[src]),%[res]\n\t"
"adcq %[zero],%[res]"
: [res] "=r" (result)
: [src] "r" (buff), [zero] "r" (zero),
"[res]" (result));
buff += 64;
count64--;
}
/* last upto 7 8byte blocks */
count %= 8;
while (count) {
asm("addq %1,%0\n\t"
"adcq %2,%0\n"
: "=r" (result) : "=r" (result)
: "m" (*buff), "r" (zero), "0" (result)); : "m" (*(unsigned long *)buff),
count--; "r" (zero), "0" (result));
--count;
buff += 8; buff += 8;
} while (count);
result = (result & 0xffffffff) + (result >> 32);
} }
result = add32_with_carry(result>>32,
result&0xffffffff);
if (len & 4) { if (len & 4) {
result += *(unsigned int *) buff; result += *(unsigned int *) buff;
buff += 4; buff += 4;
...@@ -83,10 +110,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len) ...@@ -83,10 +110,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
} }
if (len & 1) if (len & 1)
result += *buff; result += *buff;
result = from64to16(result); result = add32_with_carry(result>>32, result & 0xffffffff);
if (unlikely(odd)) if (unlikely(odd)) {
return ((result >> 8) & 0xff) | ((result & 0xff) << 8); result = from32to16(result);
out: result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return result; return result;
} }
...@@ -102,18 +130,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len) ...@@ -102,18 +130,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
* *
* it's best to have buff aligned on a 64-bit boundary * it's best to have buff aligned on a 64-bit boundary
*/ */
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) unsigned csum_partial(const unsigned char *buff, unsigned len, unsigned sum)
{ {
unsigned result = do_csum(buff, len); return add32_with_carry(do_csum(buff, len), sum);
/* add in old sum, and carry.. */
asm("addl %1,%0\n\t"
"adcl $0,%0" : "=r" (result) : "r" (sum), "0" (result));
return result;
} }
//EXPORT_SYMBOL(csum_partial);
/* /*
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c * in icmp.c
...@@ -123,4 +144,3 @@ unsigned short ip_compute_csum(unsigned char * buff, int len) ...@@ -123,4 +144,3 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
return ~csum_partial(buff,len,0); return ~csum_partial(buff,len,0);
} }
EXPORT_SYMBOL(ip_compute_csum);
/* Copyright 2002 Andi Kleen, SuSE Labs. /* Copyright 2002,2003 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v.2 * Subject to the GNU Public License v.2
* *
* Wrappers of assembly checksum functions for x86-64. * Wrappers of assembly checksum functions for x86-64.
...@@ -7,18 +7,6 @@ ...@@ -7,18 +7,6 @@
#include <asm/checksum.h> #include <asm/checksum.h>
#include <linux/module.h> #include <linux/module.h>
/* Better way for this sought */
static inline unsigned from64to32(unsigned long x)
{
/* add up 32-bit words for 33 bits */
x = (x & 0xffffffff) + (x >> 32);
/* add up 16-bit and 17-bit words for 17+c bits */
x = (x & 0xffff) + (x >> 16);
/* add up 16-bit and 2-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
return x;
}
/** /**
* csum_partial_copy_from_user - Copy and checksum from user space. * csum_partial_copy_from_user - Copy and checksum from user space.
* @src: source address (user space) * @src: source address (user space)
...@@ -36,14 +24,32 @@ csum_partial_copy_from_user(const char *src, char *dst, ...@@ -36,14 +24,32 @@ csum_partial_copy_from_user(const char *src, char *dst,
{ {
*errp = 0; *errp = 0;
if (likely(access_ok(VERIFY_READ,src, len))) { if (likely(access_ok(VERIFY_READ,src, len))) {
unsigned long sum; /* Why 6, not 7? To handle odd addresses aligned we
sum = csum_partial_copy_generic(src,dst,len,isum,errp,NULL); would need to do considerable complications to fix the
checksum which is defined as an 16bit accumulator. The
fix alignment code is primarily for performance
compatibility with 32bit and that will handle odd
addresses slowly too. */
if (unlikely((unsigned long)src & 6)) {
while (((unsigned long)src & 6) && len >= 2) {
__u16 val16;
*errp = __get_user(val16, (__u16 *)src);
if (*errp)
return isum;
*(__u16 *)dst = val16;
isum = add32_with_carry(isum, val16);
src += 2;
dst += 2;
len -= 2;
}
}
isum = csum_partial_copy_generic(src,dst,len,isum,errp,NULL);
if (likely(*errp == 0)) if (likely(*errp == 0))
return from64to32(sum); return isum;
} }
*errp = -EFAULT; *errp = -EFAULT;
memset(dst,0,len); memset(dst,0,len);
return 0; return isum;
} }
EXPORT_SYMBOL(csum_partial_copy_from_user); EXPORT_SYMBOL(csum_partial_copy_from_user);
...@@ -67,8 +73,22 @@ csum_partial_copy_to_user(const char *src, char *dst, ...@@ -67,8 +73,22 @@ csum_partial_copy_to_user(const char *src, char *dst,
*errp = -EFAULT; *errp = -EFAULT;
return 0; return 0;
} }
if (unlikely((unsigned long)dst & 6)) {
while (((unsigned long)dst & 6) && len >= 2) {
__u16 val16 = *(__u16 *)src;
isum = add32_with_carry(isum, val16);
*errp = __put_user(val16, (__u16 *)dst);
if (*errp)
return isum;
src += 2;
dst += 2;
len -= 2;
}
}
*errp = 0; *errp = 0;
return from64to32(csum_partial_copy_generic(src,dst,len,isum,NULL,errp)); return csum_partial_copy_generic(src,dst,len,isum,NULL,errp);
} }
EXPORT_SYMBOL(csum_partial_copy_to_user); EXPORT_SYMBOL(csum_partial_copy_to_user);
...@@ -85,11 +105,9 @@ EXPORT_SYMBOL(csum_partial_copy_to_user); ...@@ -85,11 +105,9 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
unsigned int unsigned int
csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum) csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum)
{ {
return from64to32(csum_partial_copy_generic(src,dst,len,sum,NULL,NULL)); return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
} }
//EXPORT_SYMBOL(csum_partial_copy_nocheck);
unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
__u32 len, unsigned short proto, unsigned int sum) __u32 len, unsigned short proto, unsigned int sum)
{ {
...@@ -103,7 +121,7 @@ unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, ...@@ -103,7 +121,7 @@ unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
" adcq $0,%[sum]\n" " adcq $0,%[sum]\n"
: [sum] "=r" (sum64) : [sum] "=r" (sum64)
: "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
return csum_fold(from64to32(sum64)); return csum_fold(add32_with_carry(sum64 & 0xffffffff, sum64>>32));
} }
EXPORT_SYMBOL(csum_ipv6_magic); EXPORT_SYMBOL(csum_ipv6_magic);
...@@ -12,103 +12,78 @@ ...@@ -12,103 +12,78 @@
* rax original destination * rax original destination
*/ */
// #define FIX_ALIGNMENT
.globl __memcpy .globl __memcpy
.globl memcpy .globl memcpy
.p2align .p2align 4
__memcpy: __memcpy:
memcpy: memcpy:
pushq %rbx pushq %rbx
movq %rdi,%rax movq %rdi,%rax
#ifdef FIX_ALIGNMENT movl %edx,%ecx
movl %edi,%ecx shrl $6,%ecx
andl $7,%ecx jz .Lhandle_tail
jnz bad_alignment
after_bad_alignment: .p2align 4
#endif .Lloop_64:
decl %ecx
movq %rdx,%rcx
movl $64,%ebx
shrq $6,%rcx
jz handle_tail
loop_64:
movq (%rsi),%r11 movq (%rsi),%r11
movq 8(%rsi),%r8 movq 8(%rsi),%r8
movq 2*8(%rsi),%r9
movq 3*8(%rsi),%r10
movq %r11,(%rdi) movq %r11,(%rdi)
movq %r8,1*8(%rdi) movq %r8,1*8(%rdi)
movq 2*8(%rsi),%r9
movq 3*8(%rsi),%r10
movq %r9,2*8(%rdi) movq %r9,2*8(%rdi)
movq %r10,3*8(%rdi) movq %r10,3*8(%rdi)
movq 4*8(%rsi),%r11 movq 4*8(%rsi),%r11
movq 5*8(%rsi),%r8 movq 5*8(%rsi),%r8
movq 6*8(%rsi),%r9
movq 7*8(%rsi),%r10
movq %r11,4*8(%rdi) movq %r11,4*8(%rdi)
movq %r8,5*8(%rdi) movq %r8,5*8(%rdi)
movq 6*8(%rsi),%r9
movq 7*8(%rsi),%r10
movq %r9,6*8(%rdi) movq %r9,6*8(%rdi)
movq %r10,7*8(%rdi) movq %r10,7*8(%rdi)
addq %rbx,%rsi leaq 64(%rsi),%rsi
addq %rbx,%rdi leaq 64(%rdi),%rdi
decl %ecx jnz .Lloop_64
jnz loop_64
handle_tail: .Lhandle_tail:
movl %edx,%ecx movl %edx,%ecx
andl $63,%ecx andl $63,%ecx
shrl $3,%ecx shrl $3,%ecx
jz handle_7 jz .Lhandle_7
movl $8,%ebx .p2align 4
loop_8: .Lloop_8:
decl %ecx
movq (%rsi),%r8 movq (%rsi),%r8
movq %r8,(%rdi) movq %r8,(%rdi)
addq %rbx,%rdi leaq 8(%rdi),%rdi
addq %rbx,%rsi leaq 8(%rsi),%rsi
decl %ecx jnz .Lloop_8
jnz loop_8
handle_7: .Lhandle_7:
movl %edx,%ecx movl %edx,%ecx
andl $7,%ecx andl $7,%ecx
jz ende jz .Lende
loop_1: .p2align 4
.Lloop_1:
movb (%rsi),%r8b movb (%rsi),%r8b
movb %r8b,(%rdi) movb %r8b,(%rdi)
incq %rdi incq %rdi
incq %rsi incq %rsi
decl %ecx decl %ecx
jnz loop_1 jnz .Lloop_1
ende: .Lende:
sfence
popq %rbx popq %rbx
ret ret
#ifdef FIX_ALIGNMENT
/* align destination */
/* This is simpleminded. For bigger blocks it may make sense to align
src and dst to their aligned subset and handle the rest separately */
bad_alignment:
movl $8,%r9d
subl %ecx,%r9d
movl %r9d,%ecx
subq %r9,%rdx
js small_alignment
jz small_alignment
align_1:
movb (%rsi),%r8b
movb %r8b,(%rdi)
incq %rdi
incq %rsi
decl %ecx
jnz align_1
jmp after_bad_alignment
small_alignment:
addq %r9,%rdx
jmp handle_7
#endif
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
*/ */
.globl __memset .globl __memset
.globl memset .globl memset
.p2align .p2align 4
memset: memset:
__memset: __memset:
movq %rdi,%r10 movq %rdi,%r10
...@@ -25,15 +25,16 @@ __memset: ...@@ -25,15 +25,16 @@ __memset:
/* align dst */ /* align dst */
movl %edi,%r9d movl %edi,%r9d
andl $7,%r9d andl $7,%r9d
jnz bad_alignment jnz .Lbad_alignment
after_bad_alignment: .Lafter_bad_alignment:
movq %r11,%rcx movl %r11d,%ecx
movl $64,%r8d shrl $6,%ecx
shrq $6,%rcx jz .Lhandle_tail
jz handle_tail
loop_64: .p2align 4
.Lloop_64:
decl %ecx
movq %rax,(%rdi) movq %rax,(%rdi)
movq %rax,8(%rdi) movq %rax,8(%rdi)
movq %rax,16(%rdi) movq %rax,16(%rdi)
...@@ -42,43 +43,45 @@ loop_64: ...@@ -42,43 +43,45 @@ loop_64:
movq %rax,40(%rdi) movq %rax,40(%rdi)
movq %rax,48(%rdi) movq %rax,48(%rdi)
movq %rax,56(%rdi) movq %rax,56(%rdi)
addq %r8,%rdi leaq 64(%rdi),%rdi
decl %ecx jnz .Lloop_64
jnz loop_64
/* Handle tail in loops. The loops should be faster than hard /* Handle tail in loops. The loops should be faster than hard
to predict jump tables. */ to predict jump tables. */
handle_tail: .p2align 4
.Lhandle_tail:
movl %r11d,%ecx movl %r11d,%ecx
andl $63&(~7),%ecx andl $63&(~7),%ecx
jz handle_7 jz .Lhandle_7
shrl $3,%ecx shrl $3,%ecx
loop_8: .p2align 4
movq %rax,(%rdi) .Lloop_8:
addq $8,%rdi
decl %ecx decl %ecx
jnz loop_8 movq %rax,(%rdi)
leaq 8(%rdi),%rdi
jnz .Lloop_8
handle_7: .Lhandle_7:
movl %r11d,%ecx movl %r11d,%ecx
andl $7,%ecx andl $7,%ecx
jz ende jz .Lende
loop_1: .p2align 4
movb %al,(%rdi) .Lloop_1:
addq $1,%rdi
decl %ecx decl %ecx
jnz loop_1 movb %al,(%rdi)
leaq 1(%rdi),%rdi
jnz .Lloop_1
ende: .Lende:
movq %r10,%rax movq %r10,%rax
ret ret
bad_alignment: .Lbad_alignment:
cmpq $7,%r11 cmpq $7,%r11
jbe handle_7 jbe .Lhandle_7
movq %rax,(%rdi) /* unaligned store */ movq %rax,(%rdi) /* unaligned store */
movq $8,%r8 movq $8,%r8
subq %r9,%r8 subq %r9,%r8
addq %r8,%rdi addq %r8,%rdi
subq %r8,%r11 subq %r8,%r11
jmp after_bad_alignment jmp .Lafter_bad_alignment
...@@ -50,10 +50,10 @@ SECTIONS ...@@ -50,10 +50,10 @@ SECTIONS
.xtime_lock : AT ((LOADADDR(.vsyscall_0) + SIZEOF(.vsyscall_0) + 63) & ~(63)) { *(.xtime_lock) } .xtime_lock : AT ((LOADADDR(.vsyscall_0) + SIZEOF(.vsyscall_0) + 63) & ~(63)) { *(.xtime_lock) }
xtime_lock = LOADADDR(.xtime_lock); xtime_lock = LOADADDR(.xtime_lock);
. = ALIGN(16); . = ALIGN(16);
.hpet : AT ((LOADADDR(.xtime_lock) + SIZEOF(.xtime_lock) + 15) & ~(15)) { *(.hpet) } .vxtime : AT ((LOADADDR(.xtime_lock) + SIZEOF(.xtime_lock) + 15) & ~(15)) { *(.vxtime) }
hpet = LOADADDR(.hpet); vxtime = LOADADDR(.vxtime);
. = ALIGN(16); . = ALIGN(16);
.wall_jiffies : AT ((LOADADDR(.hpet) + SIZEOF(.hpet) + 15) & ~(15)) { *(.wall_jiffies) } .wall_jiffies : AT ((LOADADDR(.vxtime) + SIZEOF(.vxtime) + 15) & ~(15)) { *(.wall_jiffies) }
wall_jiffies = LOADADDR(.wall_jiffies); wall_jiffies = LOADADDR(.wall_jiffies);
. = ALIGN(16); . = ALIGN(16);
.sys_tz : AT ((LOADADDR(.wall_jiffies) + SIZEOF(.wall_jiffies) + 15) & ~(15)) { *(.sys_tz) } .sys_tz : AT ((LOADADDR(.wall_jiffies) + SIZEOF(.wall_jiffies) + 15) & ~(15)) { *(.sys_tz) }
......
...@@ -125,7 +125,7 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, ...@@ -125,7 +125,7 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
* Before filling it in it needs to be csum_fold()'ed. * Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible. * buff should be aligned to a 64bit boundary if possible.
*/ */
extern unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum); extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER 1 #define HAVE_CSUM_COPY_USER 1
...@@ -179,4 +179,14 @@ extern unsigned short ...@@ -179,4 +179,14 @@ extern unsigned short
csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
__u32 len, unsigned short proto, unsigned int sum); __u32 len, unsigned short proto, unsigned int sum);
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
: "0" (a), "r" (b));
return a;
}
#endif #endif
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
enum fixed_addresses { enum fixed_addresses {
VSYSCALL_LAST_PAGE, VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
FIX_HPET_BASE,
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif #endif
......
...@@ -24,6 +24,11 @@ outb_p((addr),RTC_PORT(0)); \ ...@@ -24,6 +24,11 @@ outb_p((addr),RTC_PORT(0)); \
outb_p((val),RTC_PORT(1)); \ outb_p((val),RTC_PORT(1)); \
}) })
#ifndef CONFIG_HPET_TIMER
#define RTC_IRQ 8 #define RTC_IRQ 8
#else
/* Temporary workaround due to IRQ routing problem. */
#define RTC_IRQ 0
#endif
#endif /* _ASM_MC146818RTC_H */ #endif /* _ASM_MC146818RTC_H */
...@@ -313,7 +313,7 @@ extern inline void sync_core(void) ...@@ -313,7 +313,7 @@ extern inline void sync_core(void)
#define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCH
static inline void prefetch(void *x) static inline void prefetch(void *x)
{ {
asm volatile("2: prefetchnta %0\n1:\t" asm volatile("2: prefetcht0 %0\n1:\t"
".section __ex_table,\"a\"\n\t" ".section __ex_table,\"a\"\n\t"
" .align 8\n\t" " .align 8\n\t"
" .quad 2b,1b\n\t" " .quad 2b,1b\n\t"
......
...@@ -54,6 +54,8 @@ extern void swap_low_mappings(void); ...@@ -54,6 +54,8 @@ extern void swap_low_mappings(void);
extern void oops_begin(void); extern void oops_begin(void);
extern void die(const char *,struct pt_regs *,long); extern void die(const char *,struct pt_regs *,long);
extern void __die(const char * str, struct pt_regs * regs, long err); extern void __die(const char * str, struct pt_regs * regs, long err);
extern void __show_regs(struct pt_regs * regs);
extern void show_regs(struct pt_regs * regs);
extern int map_syscall32(struct mm_struct *mm, unsigned long address); extern int map_syscall32(struct mm_struct *mm, unsigned long address);
extern char *syscall32_page; extern char *syscall32_page;
......
...@@ -30,6 +30,34 @@ static inline cycles_t get_cycles (void) ...@@ -30,6 +30,34 @@ static inline cycles_t get_cycles (void)
extern unsigned int cpu_khz; extern unsigned int cpu_khz;
extern struct hpet_data hpet; /*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_NUMBER 0x00000f00
#define HPET_ID_REV 0x000000ff
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_T0_ENABLE 0x004
#define HPET_T0_PERIODIC 0x008
#define HPET_T0_SETVAL 0x040
#define HPET_T0_32BIT 0x100
extern struct vxtime_data vxtime;
#endif #endif
...@@ -15,7 +15,7 @@ enum vsyscall_num { ...@@ -15,7 +15,7 @@ enum vsyscall_num {
#ifdef __KERNEL__ #ifdef __KERNEL__
#define __section_hpet __attribute__ ((unused, __section__ (".hpet"), aligned(16))) #define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) #define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) #define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16))) #define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
...@@ -23,22 +23,24 @@ enum vsyscall_num { ...@@ -23,22 +23,24 @@ enum vsyscall_num {
#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16))) #define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(L1_CACHE_BYTES))) #define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(L1_CACHE_BYTES)))
#define VXTIME_TSC 1
#define VXTIME_HPET 2
struct hpet_data { struct vxtime_data {
long address; /* base address */ long hpet_address; /* HPET base address */
unsigned long hz; /* HPET clocks / sec */ unsigned long hz; /* HPET clocks / sec */
int trigger; /* value at last interrupt */
int last; int last;
int offset;
unsigned long last_tsc; unsigned long last_tsc;
long ticks; long quot;
long tsc_quot;
int mode;
}; };
#define hpet_readl(a) readl(fix_to_virt(FIX_HPET_BASE) + a) #define hpet_readl(a) readl(fix_to_virt(FIX_HPET_BASE) + a)
#define hpet_writel(d,a) writel(d, fix_to_virt(FIX_HPET_BASE) + a) #define hpet_writel(d,a) writel(d, fix_to_virt(FIX_HPET_BASE) + a)
/* vsyscall space (readonly) */ /* vsyscall space (readonly) */
extern struct hpet_data __hpet; extern struct vxtime_data __vxtime;
extern struct timespec __xtime; extern struct timespec __xtime;
extern volatile unsigned long __jiffies; extern volatile unsigned long __jiffies;
extern unsigned long __wall_jiffies; extern unsigned long __wall_jiffies;
...@@ -46,7 +48,7 @@ extern struct timezone __sys_tz; ...@@ -46,7 +48,7 @@ extern struct timezone __sys_tz;
extern seqlock_t __xtime_lock; extern seqlock_t __xtime_lock;
/* kernel space (writeable) */ /* kernel space (writeable) */
extern struct hpet_data hpet; extern struct vxtime_data vxtime;
extern unsigned long wall_jiffies; extern unsigned long wall_jiffies;
extern struct timezone sys_tz; extern struct timezone sys_tz;
extern int sysctl_vsyscall; extern int sysctl_vsyscall;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment