Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5d6a2114
Commit
5d6a2114
authored
Jul 25, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/to-linus-2.5
into home.osdl.org:/home/torvalds/v2.5/linux
parents
e0bd73b1
6f0a19c5
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
45 additions
and
90 deletions
+45
-90
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+4
-2
arch/ia64/hp/sim/simeth.c
arch/ia64/hp/sim/simeth.c
+1
-1
arch/ia64/kernel/head.S
arch/ia64/kernel/head.S
+2
-2
arch/ia64/kernel/patch.c
arch/ia64/kernel/patch.c
+3
-10
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+3
-24
arch/ia64/kernel/time.c
arch/ia64/kernel/time.c
+2
-2
arch/ia64/kernel/unwind.c
arch/ia64/kernel/unwind.c
+4
-3
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+8
-11
arch/ia64/scripts/toolchain-flags
arch/ia64/scripts/toolchain-flags
+1
-1
arch/ia64/vmlinux.lds.S
arch/ia64/vmlinux.lds.S
+3
-3
include/asm-ia64/atomic.h
include/asm-ia64/atomic.h
+3
-3
include/asm-ia64/io.h
include/asm-ia64/io.h
+11
-0
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock.h
+0
-28
No files found.
arch/ia64/hp/common/sba_iommu.c
View file @
5d6a2114
...
@@ -48,9 +48,11 @@
...
@@ -48,9 +48,11 @@
** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
** not defined, all DMA will be 32bit and go through the TLB.
** not defined, all DMA will be 32bit and go through the TLB.
** There's potentially a conflict in the bio merge code with us
** There's potentially a conflict in the bio merge code with us
** advertising an iommu, but then bypassing it. Disabled for now.
** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
** appears to give more performance than bio-level virtual merging, we'll
** do the former for now.
*/
*/
#
undef
ALLOW_IOV_BYPASS
#
define
ALLOW_IOV_BYPASS
/*
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** If a device prefetches beyond the end of a valid pdir entry, it will cause
...
...
arch/ia64/hp/sim/simeth.c
View file @
5d6a2114
...
@@ -223,7 +223,7 @@ simeth_probe1(void)
...
@@ -223,7 +223,7 @@ simeth_probe1(void)
dev
->
set_multicast_list
=
set_multicast_list
;
/* no yet used */
dev
->
set_multicast_list
=
set_multicast_list
;
/* no yet used */
err
=
register_netdev
(
dev
);
err
=
register_netdev
(
dev
);
if
(
dev
)
{
if
(
err
)
{
kfree
(
dev
);
kfree
(
dev
);
return
err
;
return
err
;
}
}
...
...
arch/ia64/kernel/head.S
View file @
5d6a2114
...
@@ -863,7 +863,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
...
@@ -863,7 +863,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
.
wait
:
.
wait
:
//
exponential
backoff
,
kdb
,
lockmeter
etc
.
go
in
here
//
exponential
backoff
,
kdb
,
lockmeter
etc
.
go
in
here
hint
@
pause
hint
@
pause
ld4
.bias
r30
=[
r31
]
ld4
r30
=[
r31
]
//
don
't use ld4.bias; if it'
s
contended
,
we
won
't write the word
nop
0
nop
0
;;
;;
cmp4.eq
p14
,
p0
=
r30
,
r0
cmp4.eq
p14
,
p0
=
r30
,
r0
...
@@ -880,7 +880,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention)
...
@@ -880,7 +880,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention)
.
wait
:
.
wait
:
//
exponential
backoff
,
kdb
,
lockmeter
etc
.
go
in
here
//
exponential
backoff
,
kdb
,
lockmeter
etc
.
go
in
here
hint
@
pause
hint
@
pause
ld4
.bias
r30
=[
r31
]
ld4
r30
=[
r31
]
//
don
't use ld4.bias; if it'
s
contended
,
we
won
't write the word
;;
;;
cmp4.ne
p14
,
p0
=
r30
,
r0
cmp4.ne
p14
,
p0
=
r30
,
r0
mov
r30
=
1
mov
r30
=
1
...
...
arch/ia64/kernel/patch.c
View file @
5d6a2114
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <asm/patch.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/unistd.h>
...
@@ -176,16 +177,8 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
...
@@ -176,16 +177,8 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
void
void
ia64_patch_gate
(
void
)
ia64_patch_gate
(
void
)
{
{
extern
char
__start_gate_mckinley_e9_patchlist
;
# define START(name) ((unsigned long) __start_gate_##name##_patchlist)
extern
char
__end_gate_mckinley_e9_patchlist
;
# define END(name) ((unsigned long)__end_gate_##name##_patchlist)
extern
char
__start_gate_vtop_patchlist
;
extern
char
__end_gate_vtop_patchlist
;
extern
char
__start_gate_fsyscall_patchlist
;
extern
char
__end_gate_fsyscall_patchlist
;
extern
char
__start_gate_brl_fsys_bubble_down_patchlist
;
extern
char
__end_gate_brl_fsys_bubble_down_patchlist
;
# define START(name) ((unsigned long) &__start_gate_##name##_patchlist)
# define END(name) ((unsigned long)&__end_gate_##name##_patchlist)
patch_fsyscall_table
(
START
(
fsyscall
),
END
(
fsyscall
));
patch_fsyscall_table
(
START
(
fsyscall
),
END
(
fsyscall
));
patch_brl_fsys_bubble_down
(
START
(
brl_fsys_bubble_down
),
END
(
brl_fsys_bubble_down
));
patch_brl_fsys_bubble_down
(
START
(
brl_fsys_bubble_down
),
END
(
brl_fsys_bubble_down
));
...
...
arch/ia64/kernel/setup.c
View file @
5d6a2114
...
@@ -41,6 +41,7 @@
...
@@ -41,6 +41,7 @@
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/sal.h>
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/unistd.h>
...
@@ -49,8 +50,6 @@
...
@@ -49,8 +50,6 @@
# error "struct cpuinfo_ia64 too big!"
# error "struct cpuinfo_ia64 too big!"
#endif
#endif
extern
char
_end
;
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
unsigned
long
__per_cpu_offset
[
NR_CPUS
];
unsigned
long
__per_cpu_offset
[
NR_CPUS
];
#endif
#endif
...
@@ -279,7 +278,6 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
...
@@ -279,7 +278,6 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
static
void
static
void
find_memory
(
void
)
find_memory
(
void
)
{
{
# define KERNEL_END (&_end)
unsigned
long
bootmap_size
;
unsigned
long
bootmap_size
;
int
n
=
0
;
int
n
=
0
;
...
@@ -300,7 +298,7 @@ find_memory (void)
...
@@ -300,7 +298,7 @@ find_memory (void)
n
++
;
n
++
;
rsvd_region
[
n
].
start
=
(
unsigned
long
)
ia64_imva
((
void
*
)
KERNEL_START
);
rsvd_region
[
n
].
start
=
(
unsigned
long
)
ia64_imva
((
void
*
)
KERNEL_START
);
rsvd_region
[
n
].
end
=
(
unsigned
long
)
ia64_imva
(
KERNEL_END
);
rsvd_region
[
n
].
end
=
(
unsigned
long
)
ia64_imva
(
_end
);
n
++
;
n
++
;
#ifdef CONFIG_BLK_DEV_INITRD
#ifdef CONFIG_BLK_DEV_INITRD
...
@@ -363,13 +361,12 @@ find_memory (void)
...
@@ -363,13 +361,12 @@ find_memory (void)
void
__init
void
__init
setup_arch
(
char
**
cmdline_p
)
setup_arch
(
char
**
cmdline_p
)
{
{
extern
unsigned
long
*
__start___vtop_patchlist
[],
*
__end____vtop_patchlist
[];
extern
unsigned
long
ia64_iobase
;
extern
unsigned
long
ia64_iobase
;
unsigned
long
phys_iobase
;
unsigned
long
phys_iobase
;
unw_init
();
unw_init
();
ia64_patch_vtop
((
u64
)
__start___vtop_patchlist
,
(
u64
)
__end___
_
vtop_patchlist
);
ia64_patch_vtop
((
u64
)
__start___vtop_patchlist
,
(
u64
)
__end___vtop_patchlist
);
*
cmdline_p
=
__va
(
ia64_boot_param
->
command_line
);
*
cmdline_p
=
__va
(
ia64_boot_param
->
command_line
);
strlcpy
(
saved_command_line
,
*
cmdline_p
,
sizeof
(
saved_command_line
));
strlcpy
(
saved_command_line
,
*
cmdline_p
,
sizeof
(
saved_command_line
));
...
@@ -390,19 +387,6 @@ setup_arch (char **cmdline_p)
...
@@ -390,19 +387,6 @@ setup_arch (char **cmdline_p)
find_memory
();
find_memory
();
#if 0
/* XXX fix me */
init_mm.start_code = (unsigned long) &_stext;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
code_resource.start = virt_to_bus(&_text);
code_resource.end = virt_to_bus(&_etext) - 1;
data_resource.start = virt_to_bus(&_etext);
data_resource.end = virt_to_bus(&_edata) - 1;
#endif
/* process SAL system table: */
/* process SAL system table: */
ia64_sal_init
(
efi
.
sal_systab
);
ia64_sal_init
(
efi
.
sal_systab
);
...
@@ -687,7 +671,6 @@ get_max_cacheline_size (void)
...
@@ -687,7 +671,6 @@ get_max_cacheline_size (void)
void
void
cpu_init
(
void
)
cpu_init
(
void
)
{
{
extern
char
__per_cpu_start
[],
__phys_per_cpu_start
[];
extern
void
__init
ia64_mmu_init
(
void
*
);
extern
void
__init
ia64_mmu_init
(
void
*
);
unsigned
long
num_phys_stacked
;
unsigned
long
num_phys_stacked
;
pal_vm_info_2_u_t
vmi
;
pal_vm_info_2_u_t
vmi
;
...
@@ -696,7 +679,6 @@ cpu_init (void)
...
@@ -696,7 +679,6 @@ cpu_init (void)
void
*
cpu_data
;
void
*
cpu_data
;
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
extern
char
__per_cpu_end
[];
int
cpu
;
int
cpu
;
/*
/*
...
@@ -812,9 +794,6 @@ cpu_init (void)
...
@@ -812,9 +794,6 @@ cpu_init (void)
void
void
check_bugs
(
void
)
check_bugs
(
void
)
{
{
extern
char
__start___mckinley_e9_bundles
[];
extern
char
__end___mckinley_e9_bundles
[];
ia64_patch_mckinley_e9
((
unsigned
long
)
__start___mckinley_e9_bundles
,
ia64_patch_mckinley_e9
((
unsigned
long
)
__start___mckinley_e9_bundles
,
(
unsigned
long
)
__end___mckinley_e9_bundles
);
(
unsigned
long
)
__end___mckinley_e9_bundles
);
}
}
arch/ia64/kernel/time.c
View file @
5d6a2114
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include <asm/hw_irq.h>
#include <asm/hw_irq.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/sal.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/system.h>
extern
unsigned
long
wall_jiffies
;
extern
unsigned
long
wall_jiffies
;
...
@@ -41,7 +42,6 @@ static void
...
@@ -41,7 +42,6 @@ static void
do_profile
(
unsigned
long
ip
)
do_profile
(
unsigned
long
ip
)
{
{
extern
unsigned
long
prof_cpu_mask
;
extern
unsigned
long
prof_cpu_mask
;
extern
char
_stext
;
if
(
!
prof_buffer
)
if
(
!
prof_buffer
)
return
;
return
;
...
@@ -49,7 +49,7 @@ do_profile (unsigned long ip)
...
@@ -49,7 +49,7 @@ do_profile (unsigned long ip)
if
(
!
((
1UL
<<
smp_processor_id
())
&
prof_cpu_mask
))
if
(
!
((
1UL
<<
smp_processor_id
())
&
prof_cpu_mask
))
return
;
return
;
ip
-=
(
unsigned
long
)
&
_stext
;
ip
-=
(
unsigned
long
)
_stext
;
ip
>>=
prof_shift
;
ip
>>=
prof_shift
;
/*
/*
* Don't ignore out-of-bounds IP values silently, put them into the last
* Don't ignore out-of-bounds IP values silently, put them into the last
...
...
arch/ia64/kernel/unwind.c
View file @
5d6a2114
...
@@ -39,6 +39,7 @@
...
@@ -39,6 +39,7 @@
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/ptrace_offsets.h>
#include <asm/ptrace_offsets.h>
#include <asm/rse.h>
#include <asm/rse.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
...
@@ -2178,7 +2179,7 @@ __initcall(create_gate_table);
...
@@ -2178,7 +2179,7 @@ __initcall(create_gate_table);
void
__init
void
__init
unw_init
(
void
)
unw_init
(
void
)
{
{
extern
int
ia64_unw_start
,
ia64_unw_end
,
__gp
;
extern
char
__gp
[]
;
extern
void
unw_hash_index_t_is_too_narrow
(
void
);
extern
void
unw_hash_index_t_is_too_narrow
(
void
);
long
i
,
off
;
long
i
,
off
;
...
@@ -2211,8 +2212,8 @@ unw_init (void)
...
@@ -2211,8 +2212,8 @@ unw_init (void)
unw
.
lru_head
=
UNW_CACHE_SIZE
-
1
;
unw
.
lru_head
=
UNW_CACHE_SIZE
-
1
;
unw
.
lru_tail
=
0
;
unw
.
lru_tail
=
0
;
init_unwind_table
(
&
unw
.
kernel_table
,
"kernel"
,
KERNEL_START
,
(
unsigned
long
)
&
__gp
,
init_unwind_table
(
&
unw
.
kernel_table
,
"kernel"
,
KERNEL_START
,
(
unsigned
long
)
__gp
,
&
ia64_unw_start
,
&
ia64_unw_e
nd
);
__start_unwind
,
__end_unwi
nd
);
}
}
/*
/*
...
...
arch/ia64/mm/init.c
View file @
5d6a2114
...
@@ -27,6 +27,7 @@
...
@@ -27,6 +27,7 @@
#include <asm/patch.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/sal.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
...
@@ -34,9 +35,6 @@
...
@@ -34,9 +35,6 @@
DEFINE_PER_CPU
(
struct
mmu_gather
,
mmu_gathers
);
DEFINE_PER_CPU
(
struct
mmu_gather
,
mmu_gathers
);
/* References to section boundaries: */
extern
char
_stext
,
_etext
,
_edata
,
__init_begin
,
__init_end
,
_end
;
extern
void
ia64_tlb_init
(
void
);
extern
void
ia64_tlb_init
(
void
);
unsigned
long
MAX_DMA_ADDRESS
=
PAGE_OFFSET
+
0x100000000UL
;
unsigned
long
MAX_DMA_ADDRESS
=
PAGE_OFFSET
+
0x100000000UL
;
...
@@ -151,8 +149,8 @@ free_initmem (void)
...
@@ -151,8 +149,8 @@ free_initmem (void)
{
{
unsigned
long
addr
,
eaddr
;
unsigned
long
addr
,
eaddr
;
addr
=
(
unsigned
long
)
ia64_imva
(
&
__init_begin
);
addr
=
(
unsigned
long
)
ia64_imva
(
__init_begin
);
eaddr
=
(
unsigned
long
)
ia64_imva
(
&
__init_end
);
eaddr
=
(
unsigned
long
)
ia64_imva
(
__init_end
);
while
(
addr
<
eaddr
)
{
while
(
addr
<
eaddr
)
{
ClearPageReserved
(
virt_to_page
(
addr
));
ClearPageReserved
(
virt_to_page
(
addr
));
set_page_count
(
virt_to_page
(
addr
),
1
);
set_page_count
(
virt_to_page
(
addr
),
1
);
...
@@ -161,7 +159,7 @@ free_initmem (void)
...
@@ -161,7 +159,7 @@ free_initmem (void)
addr
+=
PAGE_SIZE
;
addr
+=
PAGE_SIZE
;
}
}
printk
(
KERN_INFO
"Freeing unused kernel memory: %ldkB freed
\n
"
,
printk
(
KERN_INFO
"Freeing unused kernel memory: %ldkB freed
\n
"
,
(
&
__init_end
-
&
__init_begin
)
>>
10
);
(
__init_end
-
__init_begin
)
>>
10
);
}
}
void
void
...
@@ -308,7 +306,6 @@ static void
...
@@ -308,7 +306,6 @@ static void
setup_gate
(
void
)
setup_gate
(
void
)
{
{
struct
page
*
page
;
struct
page
*
page
;
extern
char
__start_gate_section
[];
/*
/*
* Map the gate page twice: once read-only to export the ELF headers etc. and once
* Map the gate page twice: once read-only to export the ELF headers etc. and once
...
@@ -671,7 +668,7 @@ mem_init (void)
...
@@ -671,7 +668,7 @@ mem_init (void)
kclist_add
(
&
kcore_mem
,
__va
(
0
),
max_low_pfn
*
PAGE_SIZE
);
kclist_add
(
&
kcore_mem
,
__va
(
0
),
max_low_pfn
*
PAGE_SIZE
);
kclist_add
(
&
kcore_vmem
,
(
void
*
)
VMALLOC_START
,
VMALLOC_END
-
VMALLOC_START
);
kclist_add
(
&
kcore_vmem
,
(
void
*
)
VMALLOC_START
,
VMALLOC_END
-
VMALLOC_START
);
kclist_add
(
&
kcore_kernel
,
&
_stext
,
&
_end
-
&
_stext
);
kclist_add
(
&
kcore_kernel
,
_stext
,
_end
-
_stext
);
for_each_pgdat
(
pgdat
)
for_each_pgdat
(
pgdat
)
totalram_pages
+=
free_all_bootmem_node
(
pgdat
);
totalram_pages
+=
free_all_bootmem_node
(
pgdat
);
...
@@ -679,9 +676,9 @@ mem_init (void)
...
@@ -679,9 +676,9 @@ mem_init (void)
reserved_pages
=
0
;
reserved_pages
=
0
;
efi_memmap_walk
(
count_reserved_pages
,
&
reserved_pages
);
efi_memmap_walk
(
count_reserved_pages
,
&
reserved_pages
);
codesize
=
(
unsigned
long
)
&
_etext
-
(
unsigned
long
)
&
_stext
;
codesize
=
(
unsigned
long
)
_etext
-
(
unsigned
long
)
_stext
;
datasize
=
(
unsigned
long
)
&
_edata
-
(
unsigned
long
)
&
_etext
;
datasize
=
(
unsigned
long
)
_edata
-
(
unsigned
long
)
_etext
;
initsize
=
(
unsigned
long
)
&
__init_end
-
(
unsigned
long
)
&
__init_begin
;
initsize
=
(
unsigned
long
)
__init_end
-
(
unsigned
long
)
__init_begin
;
printk
(
KERN_INFO
"Memory: %luk/%luk available (%luk code, %luk reserved, "
printk
(
KERN_INFO
"Memory: %luk/%luk available (%luk code, %luk reserved, "
"%luk data, %luk init)
\n
"
,
(
unsigned
long
)
nr_free_pages
()
<<
(
PAGE_SHIFT
-
10
),
"%luk data, %luk init)
\n
"
,
(
unsigned
long
)
nr_free_pages
()
<<
(
PAGE_SHIFT
-
10
),
...
...
arch/ia64/scripts/toolchain-flags
View file @
5d6a2114
...
@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations.
...
@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations.
EOF
EOF
fi
fi
if
!
$CC
-c
$dir
/check-model.c
-o
$out
|
grep
-q
'attribute directive ignored'
if
!
$CC
-c
$dir
/check-model.c
-o
$out
2>&1
|
grep
-q
'attribute directive ignored'
then
then
CPPFLAGS
=
"
$CPPFLAGS
-DHAVE_MODEL_SMALL_ATTRIBUTE"
CPPFLAGS
=
"
$CPPFLAGS
-DHAVE_MODEL_SMALL_ATTRIBUTE"
fi
fi
...
...
arch/ia64/vmlinux.lds.S
View file @
5d6a2114
...
@@ -59,7 +59,7 @@ SECTIONS
...
@@ -59,7 +59,7 @@ SECTIONS
{
{
__start___vtop_patchlist
=
.
;
__start___vtop_patchlist
=
.
;
*(.
data.patch.vtop
)
*(.
data.patch.vtop
)
__end___
_
vtop_patchlist
=
.
;
__end___vtop_patchlist
=
.
;
}
}
.
data.patch.
mckinley_e9
:
AT
(
ADDR
(
.
data
.
patch
.
mckinley_e9
)
-
LOAD_OFFSET
)
.
data.patch.
mckinley_e9
:
AT
(
ADDR
(
.
data
.
patch
.
mckinley_e9
)
-
LOAD_OFFSET
)
...
@@ -89,9 +89,9 @@ SECTIONS
...
@@ -89,9 +89,9 @@ SECTIONS
{
*(
.
IA_64
.
unwind_info
*)
}
{
*(
.
IA_64
.
unwind_info
*)
}
.
IA_64
.
unwind
:
AT
(
ADDR
(
.
IA_64
.
unwind
)
-
LOAD_OFFSET
)
.
IA_64
.
unwind
:
AT
(
ADDR
(
.
IA_64
.
unwind
)
-
LOAD_OFFSET
)
{
{
ia64_unw_start
=
.
;
__start_unwind
=
.
;
*(.
IA_64
.
unwind
*)
*(.
IA_64
.
unwind
*)
ia64_unw_e
nd
=
.
;
__end_unwi
nd
=
.
;
}
}
RODATA
RODATA
...
...
include/asm-ia64/atomic.h
View file @
5d6a2114
...
@@ -47,7 +47,7 @@ ia64_atomic_add (int i, atomic_t *v)
...
@@ -47,7 +47,7 @@ ia64_atomic_add (int i, atomic_t *v)
}
}
static
__inline__
int
static
__inline__
int
ia64_atomic64_add
(
int
i
,
atomic64_t
*
v
)
ia64_atomic64_add
(
__s64
i
,
atomic64_t
*
v
)
{
{
__s64
old
,
new
;
__s64
old
,
new
;
CMPXCHG_BUGCHECK_DECL
CMPXCHG_BUGCHECK_DECL
...
@@ -75,7 +75,7 @@ ia64_atomic_sub (int i, atomic_t *v)
...
@@ -75,7 +75,7 @@ ia64_atomic_sub (int i, atomic_t *v)
}
}
static
__inline__
int
static
__inline__
int
ia64_atomic64_sub
(
int
i
,
atomic64_t
*
v
)
ia64_atomic64_sub
(
__s64
i
,
atomic64_t
*
v
)
{
{
__s64
old
,
new
;
__s64
old
,
new
;
CMPXCHG_BUGCHECK_DECL
CMPXCHG_BUGCHECK_DECL
...
@@ -123,7 +123,7 @@ atomic_add_negative (int i, atomic_t *v)
...
@@ -123,7 +123,7 @@ atomic_add_negative (int i, atomic_t *v)
}
}
static
__inline__
int
static
__inline__
int
atomic64_add_negative
(
int
i
,
atomic64_t
*
v
)
atomic64_add_negative
(
__s64
i
,
atomic64_t
*
v
)
{
{
return
atomic64_add_return
(
i
,
v
)
<
0
;
return
atomic64_add_return
(
i
,
v
)
<
0
;
}
}
...
...
include/asm-ia64/io.h
View file @
5d6a2114
...
@@ -413,6 +413,16 @@ extern void __ia64_memset_c_io (unsigned long, unsigned long, long);
...
@@ -413,6 +413,16 @@ extern void __ia64_memset_c_io (unsigned long, unsigned long, long);
# endif
/* __KERNEL__ */
# endif
/* __KERNEL__ */
/*
* Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
* BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
* On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
* SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
* over BIO-level virtual merging.
*/
#if 1
#define BIO_VMERGE_BOUNDARY 0
#else
/*
/*
* It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
* It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
* replaced by dma_merge_mask() or something of that sort. Note: the only way
* replaced by dma_merge_mask() or something of that sort. Note: the only way
...
@@ -425,5 +435,6 @@ extern void __ia64_memset_c_io (unsigned long, unsigned long, long);
...
@@ -425,5 +435,6 @@ extern void __ia64_memset_c_io (unsigned long, unsigned long, long);
*/
*/
extern
unsigned
long
ia64_max_iommu_merge_mask
;
extern
unsigned
long
ia64_max_iommu_merge_mask
;
#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
#endif
#endif
/* _ASM_IA64_IO_H */
#endif
/* _ASM_IA64_IO_H */
include/asm-ia64/spinlock.h
View file @
5d6a2114
...
@@ -22,9 +22,6 @@ typedef struct {
...
@@ -22,9 +22,6 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
#define spin_lock_init(x) ((x)->lock = 0)
#define NEW_LOCK
#ifdef NEW_LOCK
/*
/*
* Try to get the lock. If we fail to get the lock, make a non-standard call to
* Try to get the lock. If we fail to get the lock, make a non-standard call to
* ia64_spinlock_contention(). We do not use a normal call because that would force all
* ia64_spinlock_contention(). We do not use a normal call because that would force all
...
@@ -87,31 +84,6 @@ _raw_spin_lock (spinlock_t *lock)
...
@@ -87,31 +84,6 @@ _raw_spin_lock (spinlock_t *lock)
#endif
#endif
}
}
#else
/* !NEW_LOCK */
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#define _raw_spin_lock(x) __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \
"mov r29 = 1\n" \
";;\n" \
"1:\n" \
"ld4.bias r2 = [%0]\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \
"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
";;\n" \
:: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
#endif
/* !NEW_LOCK */
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment