Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
22277879
Commit
22277879
authored
Oct 24, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/to-linus-2.5
into home.osdl.org:/home/torvalds/v2.5/linux
parents
80d63a94
60b75a3c
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
240 additions
and
102 deletions
+240
-102
arch/ia64/kernel/efi.c
arch/ia64/kernel/efi.c
+26
-0
arch/ia64/kernel/gate-data.S
arch/ia64/kernel/gate-data.S
+1
-1
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq.c
+1
-1
arch/ia64/kernel/module.c
arch/ia64/kernel/module.c
+75
-7
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+118
-82
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+6
-2
arch/ia64/kernel/unwind_i.h
arch/ia64/kernel/unwind_i.h
+1
-7
include/asm-ia64/io.h
include/asm-ia64/io.h
+3
-0
include/asm-ia64/module.h
include/asm-ia64/module.h
+2
-1
include/asm-ia64/namei.h
include/asm-ia64/namei.h
+1
-1
include/asm-ia64/unwind.h
include/asm-ia64/unwind.h
+6
-0
No files found.
arch/ia64/kernel/efi.c
View file @
22277879
...
...
@@ -711,6 +711,32 @@ efi_mem_attributes (unsigned long phys_addr)
return
0
;
}
int
valid_phys_addr_range
(
unsigned
long
phys_addr
,
unsigned
long
*
size
)
{
void
*
efi_map_start
,
*
efi_map_end
,
*
p
;
efi_memory_desc_t
*
md
;
u64
efi_desc_size
;
efi_map_start
=
__va
(
ia64_boot_param
->
efi_memmap
);
efi_map_end
=
efi_map_start
+
ia64_boot_param
->
efi_memmap_size
;
efi_desc_size
=
ia64_boot_param
->
efi_memdesc_size
;
for
(
p
=
efi_map_start
;
p
<
efi_map_end
;
p
+=
efi_desc_size
)
{
md
=
p
;
if
(
phys_addr
-
md
->
phys_addr
<
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
))
{
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
return
0
;
if
(
*
size
>
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
-
phys_addr
)
*
size
=
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
-
phys_addr
;
return
1
;
}
}
return
0
;
}
static
void
__exit
efivars_exit
(
void
)
{
...
...
arch/ia64/kernel/gate-data.S
View file @
22277879
.
section
.
data
.
gate
,
"a
x
"
.
section
.
data
.
gate
,
"a
w
"
.
incbin
"arch/ia64/kernel/gate.so"
arch/ia64/kernel/irq.c
View file @
22277879
...
...
@@ -405,7 +405,7 @@ void enable_irq(unsigned int irq)
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
switch
(
desc
->
depth
)
{
case
1
:
{
unsigned
int
status
=
desc
->
status
&
~
(
IRQ_DISABLED
|
IRQ_INPROGRESS
)
;
unsigned
int
status
=
desc
->
status
&
~
IRQ_DISABLED
;
desc
->
status
=
status
;
if
((
status
&
(
IRQ_PENDING
|
IRQ_REPLAY
))
==
IRQ_PENDING
)
{
desc
->
status
=
status
|
IRQ_REPLAY
;
...
...
arch/ia64/kernel/module.c
View file @
22277879
...
...
@@ -322,6 +322,10 @@ module_alloc (unsigned long size)
void
module_free
(
struct
module
*
mod
,
void
*
module_region
)
{
if
(
mod
->
arch
.
init_unw_table
&&
module_region
==
mod
->
module_init
)
{
unw_remove_unwind_table
(
mod
->
arch
.
init_unw_table
);
mod
->
arch
.
init_unw_table
=
NULL
;
}
vfree
(
module_region
);
}
...
...
@@ -843,23 +847,87 @@ apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return
-
ENOEXEC
;
}
/*
* Modules contain a single unwind table which covers both the core and the init text
* sections but since the two are not contiguous, we need to split this table up such that
* we can register (and unregister) each "segment" seperately. Fortunately, this sounds
* more complicated than it really is.
*/
static
void
register_unwind_table
(
struct
module
*
mod
)
{
struct
unw_table_entry
*
start
=
(
void
*
)
mod
->
arch
.
unwind
->
sh_addr
;
struct
unw_table_entry
*
end
=
start
+
mod
->
arch
.
unwind
->
sh_size
/
sizeof
(
*
start
);
struct
unw_table_entry
tmp
,
*
e1
,
*
e2
,
*
core
,
*
init
;
unsigned
long
num_init
=
0
,
num_core
=
0
;
/* First, count how many init and core unwind-table entries there are. */
for
(
e1
=
start
;
e1
<
end
;
++
e1
)
if
(
in_init
(
mod
,
e1
->
start_offset
))
++
num_init
;
else
++
num_core
;
/*
* Second, sort the table such that all unwind-table entries for the init and core
* text sections are nicely separated. We do this with a stupid bubble sort
* (unwind tables don't get ridiculously huge).
*/
for
(
e1
=
start
;
e1
<
end
;
++
e1
)
{
for
(
e2
=
e1
+
1
;
e2
<
end
;
++
e2
)
{
if
(
e2
->
start_offset
<
e1
->
start_offset
)
{
tmp
=
*
e1
;
*
e1
=
*
e2
;
*
e2
=
tmp
;
}
}
}
/*
* Third, locate the init and core segments in the unwind table:
*/
if
(
in_init
(
mod
,
start
->
start_offset
))
{
init
=
start
;
core
=
start
+
num_init
;
}
else
{
core
=
start
;
init
=
start
+
num_core
;
}
DEBUGP
(
"%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu
\n
"
,
__FUNCTION__
,
mod
->
name
,
mod
->
arch
.
gp
,
num_init
,
num_core
);
/*
* Fourth, register both tables (if not empty).
*/
if
(
num_core
>
0
)
{
mod
->
arch
.
core_unw_table
=
unw_add_unwind_table
(
mod
->
name
,
0
,
mod
->
arch
.
gp
,
core
,
core
+
num_core
);
DEBUGP
(
"%s: core: handle=%p [%p-%p)
\n
"
,
__FUNCTION__
,
mod
->
arch
.
core_unw_table
,
core
,
core
+
num_core
);
}
if
(
num_init
>
0
)
{
mod
->
arch
.
init_unw_table
=
unw_add_unwind_table
(
mod
->
name
,
0
,
mod
->
arch
.
gp
,
init
,
init
+
num_init
);
DEBUGP
(
"%s: init: handle=%p [%p-%p)
\n
"
,
__FUNCTION__
,
mod
->
arch
.
init_unw_table
,
init
,
init
+
num_init
);
}
}
int
module_finalize
(
const
Elf_Ehdr
*
hdr
,
const
Elf_Shdr
*
sechdrs
,
struct
module
*
mod
)
{
DEBUGP
(
"%s: init: entry=%p
\n
"
,
__FUNCTION__
,
mod
->
init
);
if
(
mod
->
arch
.
unwind
)
mod
->
arch
.
unw_table
=
unw_add_unwind_table
(
mod
->
name
,
0
,
mod
->
arch
.
gp
,
(
void
*
)
mod
->
arch
.
unwind
->
sh_addr
,
((
void
*
)
mod
->
arch
.
unwind
->
sh_addr
+
mod
->
arch
.
unwind
->
sh_size
));
register_unwind_table
(
mod
);
return
0
;
}
void
module_arch_cleanup
(
struct
module
*
mod
)
{
if
(
mod
->
arch
.
unwind
)
unw_remove_unwind_table
(
mod
->
arch
.
unw_table
);
if
(
mod
->
arch
.
init_unw_table
)
unw_remove_unwind_table
(
mod
->
arch
.
init_unw_table
);
if
(
mod
->
arch
.
core_unw_table
)
unw_remove_unwind_table
(
mod
->
arch
.
core_unw_table
);
}
#ifdef CONFIG_SMP
...
...
arch/ia64/kernel/perfmon.c
View file @
22277879
...
...
@@ -202,8 +202,8 @@
#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
#define LOCK_PFS(
) spin_lock(&pfm_sessions.pfs_lock
)
#define UNLOCK_PFS(
) spin_unlock(&pfm_sessions.pfs_lock
)
#define LOCK_PFS(
g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g
)
#define UNLOCK_PFS(
g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g
)
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
...
...
@@ -618,6 +618,7 @@ static struct file_system_type pfm_fs_type = {
.
get_sb
=
pfmfs_get_sb
,
.
kill_sb
=
kill_anon_super
,
};
DEFINE_PER_CPU
(
unsigned
long
,
pfm_syst_info
);
DEFINE_PER_CPU
(
struct
task_struct
*
,
pmu_owner
);
DEFINE_PER_CPU
(
pfm_context_t
*
,
pmu_ctx
);
...
...
@@ -634,6 +635,8 @@ static struct file_operations pfm_file_ops;
static
void
pfm_lazy_save_regs
(
struct
task_struct
*
ta
);
#endif
void
dump_pmu_state
(
const
char
*
);
/*
* the HP simulator must be first because
* CONFIG_IA64_HP_SIM is independent of CONFIG_MCKINLEY or CONFIG_ITANIUM
...
...
@@ -1283,10 +1286,11 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
static
int
pfm_reserve_session
(
struct
task_struct
*
task
,
int
is_syswide
,
unsigned
int
cpu
)
{
unsigned
long
flags
;
/*
* validy checks on cpu_mask have been done upstream
*/
LOCK_PFS
();
LOCK_PFS
(
flags
);
DPRINT
((
"in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u
\n
"
,
pfm_sessions
.
pfs_sys_sessions
,
...
...
@@ -1325,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
is_syswide
,
cpu
));
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
return
0
;
...
...
@@ -1334,7 +1338,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
pfm_sessions
.
pfs_sys_session
[
cpu
]
->
pid
,
smp_processor_id
()));
abort:
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
return
-
EBUSY
;
...
...
@@ -1343,11 +1347,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
static
int
pfm_unreserve_session
(
pfm_context_t
*
ctx
,
int
is_syswide
,
unsigned
int
cpu
)
{
unsigned
long
flags
;
/*
* validy checks on cpu_mask have been done upstream
*/
LOCK_PFS
();
LOCK_PFS
(
flags
);
DPRINT
((
"in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u
\n
"
,
pfm_sessions
.
pfs_sys_sessions
,
...
...
@@ -1380,7 +1384,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
is_syswide
,
cpu
));
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
return
0
;
}
...
...
@@ -1655,7 +1659,7 @@ pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned lon
}
/*
* context is locked when coming here
* context is locked when coming here
and interrupts are disabled
*/
static
inline
int
pfm_do_fasync
(
int
fd
,
struct
file
*
filp
,
pfm_context_t
*
ctx
,
int
on
)
...
...
@@ -1789,6 +1793,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
* even if the task itself is in the middle of being ctxsw out.
*/
static
int
pfm_context_unload
(
pfm_context_t
*
ctx
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
int
pfm_close
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
...
...
@@ -1803,10 +1808,6 @@ pfm_close(struct inode *inode, struct file *filp)
int
free_possible
=
1
;
int
state
,
is_system
;
{
u64
psr
=
pfm_get_psr
();
BUG_ON
((
psr
&
IA64_PSR_I
)
==
0UL
);
}
DPRINT
((
"pfm_close called private=%p
\n
"
,
filp
->
private_data
));
if
(
!
inode
)
{
...
...
@@ -1815,7 +1816,7 @@ pfm_close(struct inode *inode, struct file *filp)
}
if
(
PFM_IS_FILE
(
filp
)
==
0
)
{
printk
(
KERN_ERR
"perfmon: pfm_close: bad magic [%d]
\n
"
,
current
->
pid
);
DPRINT
((
"bad magic for [%d]
\n
"
,
current
->
pid
)
);
return
-
EBADF
;
}
...
...
@@ -1824,6 +1825,23 @@ pfm_close(struct inode *inode, struct file *filp)
printk
(
KERN_ERR
"perfmon: pfm_close: NULL ctx [%d]
\n
"
,
current
->
pid
);
return
-
EBADF
;
}
/*
* remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come
* here when the context has become unreacheable by other tasks.
*
* We may still have active monitoring at this point and we may
* end up in pfm_overflow_handler(). However, fasync_helper()
* operates with interrupts disabled and it cleans up the
* queue. If the PMU handler is called prior to entering
* fasync_helper() then it will send a signal. If it is
* invoked after, it will find an empty queue and no
* signal will be sent. In both case, we are safe
*/
if
(
filp
->
f_flags
&
FASYNC
)
{
DPRINT
((
"[%d] cleaning up async_queue=%p
\n
"
,
current
->
pid
,
ctx
->
ctx_async_queue
));
pfm_do_fasync
(
-
1
,
filp
,
ctx
,
0
);
}
PROTECT_CTX
(
ctx
,
flags
);
...
...
@@ -1832,24 +1850,17 @@ pfm_close(struct inode *inode, struct file *filp)
task
=
PFM_CTX_TASK
(
ctx
);
/*
* remove our file from the async queue, if we use it
*/
if
(
filp
->
f_flags
&
FASYNC
)
{
DPRINT
((
"[%d] before async_queue=%p
\n
"
,
current
->
pid
,
ctx
->
ctx_async_queue
));
pfm_do_fasync
(
-
1
,
filp
,
ctx
,
0
);
DPRINT
((
"[%d] after async_queue=%p
\n
"
,
current
->
pid
,
ctx
->
ctx_async_queue
));
}
regs
=
ia64_task_regs
(
task
);
DPRINT
((
"[%d] ctx_state=%d
\n
"
,
current
->
pid
,
state
));
DPRINT
((
"[%d] ctx_state=%d is_current=%d
\n
"
,
current
->
pid
,
state
,
task
==
current
?
1
:
0
));
if
(
state
==
PFM_CTX_UNLOADED
||
state
==
PFM_CTX_TERMINATED
)
{
goto
doit
;
}
regs
=
ia64_task_regs
(
task
);
/*
* context still loaded/masked and self monitoring,
* we stop/unload and we destroy right here
...
...
@@ -1898,12 +1909,11 @@ pfm_close(struct inode *inode, struct file *filp)
ctx
->
ctx_state
=
PFM_CTX_TERMINATED
;
DPRINT
((
"[%d] ctx_state=%d
\n
"
,
current
->
pid
,
state
));
DPRINT
((
"[%d] ctx_state=%d
\n
"
,
current
->
pid
,
ctx
->
ctx_
state
));
}
goto
doit
;
}
/*
* The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the
...
...
@@ -3482,6 +3492,7 @@ int
pfm_use_debug_registers
(
struct
task_struct
*
task
)
{
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
unsigned
long
flags
;
int
ret
=
0
;
if
(
pmu_conf
.
use_rr_dbregs
==
0
)
return
0
;
...
...
@@ -3503,7 +3514,7 @@ pfm_use_debug_registers(struct task_struct *task)
*/
if
(
ctx
&&
ctx
->
ctx_fl_using_dbreg
==
1
)
return
-
1
;
LOCK_PFS
();
LOCK_PFS
(
flags
);
/*
* We cannot allow setting breakpoints when system wide monitoring
...
...
@@ -3519,7 +3530,7 @@ pfm_use_debug_registers(struct task_struct *task)
pfm_sessions
.
pfs_sys_use_dbregs
,
task
->
pid
,
ret
));
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
return
ret
;
}
...
...
@@ -3535,11 +3546,12 @@ pfm_use_debug_registers(struct task_struct *task)
int
pfm_release_debug_registers
(
struct
task_struct
*
task
)
{
unsigned
long
flags
;
int
ret
;
if
(
pmu_conf
.
use_rr_dbregs
==
0
)
return
0
;
LOCK_PFS
();
LOCK_PFS
(
flags
);
if
(
pfm_sessions
.
pfs_ptrace_use_dbregs
==
0
)
{
printk
(
KERN_ERR
"perfmon: invalid release for [%d] ptrace_use_dbregs=0
\n
"
,
task
->
pid
);
ret
=
-
1
;
...
...
@@ -3547,7 +3559,7 @@ pfm_release_debug_registers(struct task_struct *task)
pfm_sessions
.
pfs_ptrace_use_dbregs
--
;
ret
=
0
;
}
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
return
ret
;
}
...
...
@@ -3723,7 +3735,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
memset
(
pfm_stats
,
0
,
sizeof
(
pfm_stats
));
for
(
m
=
0
;
m
<
NR_CPUS
;
m
++
)
pfm_stats
[
m
].
pfm_ovfl_intr_cycles_min
=
~
0UL
;
}
return
0
;
}
...
...
@@ -3735,6 +3746,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
{
struct
thread_struct
*
thread
=
NULL
;
pfarg_dbreg_t
*
req
=
(
pfarg_dbreg_t
*
)
arg
;
unsigned
long
flags
;
dbreg_t
dbreg
;
unsigned
int
rnum
;
int
first_time
;
...
...
@@ -3793,7 +3805,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
* written after the context is loaded
*/
if
(
is_loaded
)
{
LOCK_PFS
();
LOCK_PFS
(
flags
);
if
(
first_time
&&
is_system
)
{
if
(
pfm_sessions
.
pfs_ptrace_use_dbregs
)
...
...
@@ -3801,7 +3813,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
else
pfm_sessions
.
pfs_sys_use_dbregs
++
;
}
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
}
if
(
ret
!=
0
)
return
ret
;
...
...
@@ -3902,11 +3914,11 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
* in case it was our first attempt, we undo the global modifications
*/
if
(
first_time
)
{
LOCK_PFS
();
LOCK_PFS
(
flags
);
if
(
ctx
->
ctx_fl_system
)
{
pfm_sessions
.
pfs_sys_use_dbregs
--
;
}
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
ctx
->
ctx_fl_using_dbreg
=
0
;
}
/*
...
...
@@ -3959,7 +3971,11 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT
((
"[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
}
DPRINT
((
"current [%d] task [%d] ctx_state=%d is_system=%d
\n
"
,
current
->
pid
,
PFM_CTX_TASK
(
ctx
)
->
pid
,
state
,
is_system
));
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
...
...
@@ -4157,6 +4173,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
struct
task_struct
*
task
;
struct
thread_struct
*
thread
;
struct
pfm_context_t
*
old
;
unsigned
long
flags
;
#ifndef CONFIG_SMP
struct
task_struct
*
owner_task
=
NULL
;
#endif
...
...
@@ -4217,7 +4234,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT
((
"load_pid [%d] task is debugged, cannot load range restrictions
\n
"
,
req
->
load_pid
));
goto
error
;
}
LOCK_PFS
();
LOCK_PFS
(
flags
);
if
(
is_system
)
{
if
(
pfm_sessions
.
pfs_ptrace_use_dbregs
)
{
...
...
@@ -4230,7 +4247,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
}
}
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
if
(
ret
)
goto
error
;
}
...
...
@@ -4377,9 +4394,9 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* we must undo the dbregs setting (for system-wide)
*/
if
(
ret
&&
set_dbregs
)
{
LOCK_PFS
();
LOCK_PFS
(
flags
);
pfm_sessions
.
pfs_sys_use_dbregs
--
;
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
}
/*
* release task, there is now a link with the context
...
...
@@ -4605,11 +4622,14 @@ pfm_exit_thread(struct task_struct *task)
printk
(
KERN_ERR
"perfmon: pfm_exit_thread [%d] unexpected state=%d
\n
"
,
task
->
pid
,
state
);
break
;
}
UNPROTECT_CTX
(
ctx
,
flags
);
{
u64
psr
=
pfm_get_psr
();
BUG_ON
(
psr
&
(
IA64_PSR_UP
|
IA64_PSR_PP
));
BUG_ON
(
GET_PMU_OWNER
());
BUG_ON
(
ia64_psr
(
regs
)
->
up
);
BUG_ON
(
ia64_psr
(
regs
)
->
pp
);
}
UNPROTECT_CTX
(
ctx
,
flags
);
/*
* All memory free operations (especially for vmalloc'ed memory)
...
...
@@ -5488,7 +5508,7 @@ pfm_proc_info(char *page)
char
*
p
=
page
;
struct
list_head
*
pos
;
pfm_buffer_fmt_t
*
entry
;
unsigned
long
psr
;
unsigned
long
psr
,
flags
;
int
online_cpus
=
0
;
int
i
;
...
...
@@ -5528,7 +5548,7 @@ pfm_proc_info(char *page)
}
}
LOCK_PFS
();
LOCK_PFS
(
flags
);
p
+=
sprintf
(
p
,
"proc_sessions : %u
\n
"
"sys_sessions : %u
\n
"
"sys_use_dbregs : %u
\n
"
...
...
@@ -5537,7 +5557,7 @@ pfm_proc_info(char *page)
pfm_sessions
.
pfs_sys_sessions
,
pfm_sessions
.
pfs_sys_use_dbregs
,
pfm_sessions
.
pfs_ptrace_use_dbregs
);
UNLOCK_PFS
();
UNLOCK_PFS
(
flags
);
spin_lock
(
&
pfm_buffer_fmt_lock
);
...
...
@@ -5712,10 +5732,6 @@ pfm_save_regs(struct task_struct *task)
*/
ctx
->
ctx_saved_psr_up
=
psr
&
IA64_PSR_UP
;
{
u64
foo
=
pfm_get_psr
();
BUG_ON
(
foo
&
((
IA64_PSR_UP
|
IA64_PSR_PP
)));
}
/*
* release ownership of this PMU.
* PM interrupts are masked, so nothing
...
...
@@ -5771,6 +5787,8 @@ pfm_save_regs(struct task_struct *task)
*/
psr
=
pfm_get_psr
();
BUG_ON
(
foo
&
(
IA64_PSR_I
));
/*
* stop monitoring:
* This is the last instruction which may generate an overflow
...
...
@@ -5785,12 +5803,6 @@ pfm_save_regs(struct task_struct *task)
*/
ctx
->
ctx_saved_psr_up
=
psr
&
IA64_PSR_UP
;
#if 1
{
u64
foo
=
pfm_get_psr
();
BUG_ON
(
foo
&
(
IA64_PSR_I
));
BUG_ON
(
foo
&
((
IA64_PSR_UP
|
IA64_PSR_PP
)));
}
#endif
return
;
save_error:
printk
(
KERN_ERR
"perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld
\n
"
,
...
...
@@ -5805,11 +5817,9 @@ pfm_lazy_save_regs (struct task_struct *task)
struct
thread_struct
*
t
;
unsigned
long
flags
;
#if 1
{
u64
foo
=
pfm_get_psr
();
BUG_ON
(
foo
&
IA64_PSR_UP
);
{
u64
psr
=
pfm_get_psr
();
BUG_ON
(
psr
&
IA64_PSR_UP
);
}
#endif
ctx
=
PFM_GET_CTX
(
task
);
t
=
&
task
->
thread
;
...
...
@@ -5851,7 +5861,7 @@ pfm_lazy_save_regs (struct task_struct *task)
/*
* unfreeze PMU if had pending overflows
*/
if
(
t
->
pmcs
[
0
]
&
~
1UL
)
pfm_unfreeze_pmu
();
if
(
t
->
pmcs
[
0
]
&
~
0x
1UL
)
pfm_unfreeze_pmu
();
/*
* now get can unmask PMU interrupts, they will
...
...
@@ -5900,10 +5910,8 @@ pfm_load_regs (struct task_struct *task)
flags
=
pfm_protect_ctx_ctxsw
(
ctx
);
psr
=
pfm_get_psr
();
#if 1
BUG_ON
(
psr
&
(
IA64_PSR_UP
|
IA64_PSR_PP
));
BUG_ON
(
psr
&
IA64_PSR_I
);
#endif
if
(
unlikely
(
ctx
->
ctx_state
==
PFM_CTX_ZOMBIE
))
{
struct
pt_regs
*
regs
=
ia64_task_regs
(
task
);
...
...
@@ -6060,10 +6068,8 @@ pfm_load_regs (struct task_struct *task)
t
=
&
task
->
thread
;
psr
=
pfm_get_psr
();
#if 1
BUG_ON
(
psr
&
(
IA64_PSR_UP
|
IA64_PSR_PP
));
BUG_ON
(
psr
&
IA64_PSR_I
);
#endif
/*
* we restore ALL the debug registers to avoid picking up
...
...
@@ -6218,7 +6224,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
/*
* clear whatever overflow status bits there were
*/
task
->
thread
.
pmcs
[
0
]
&=
~
0x1
;
task
->
thread
.
pmcs
[
0
]
=
0
;
}
ovfl_val
=
pmu_conf
.
ovfl_val
;
/*
...
...
@@ -6400,6 +6406,11 @@ pfm_init_percpu (void)
pfm_clear_psr_pp
();
pfm_clear_psr_up
();
/*
* we run with the PMU not frozen at all times
*/
pfm_unfreeze_pmu
();
if
(
smp_processor_id
()
==
0
)
register_percpu_irq
(
IA64_PERFMON_VECTOR
,
&
perfmon_irqaction
);
...
...
@@ -6427,49 +6438,75 @@ pfm_init_percpu (void)
if
(
PMD_IS_IMPL
(
i
)
==
0
)
continue
;
ia64_set_pmd
(
i
,
0UL
);
}
/*
* we run with the PMU not frozen at all times
*/
pfm_unfreeze_pmu
();
}
/*
* used for debug purposes only
*/
void
dump_pmu_state
(
void
)
dump_pmu_state
(
const
char
*
from
)
{
struct
task_struct
*
task
;
struct
thread_struct
*
t
;
struct
pt_regs
*
regs
;
pfm_context_t
*
ctx
;
unsigned
long
psr
;
int
i
;
unsigned
long
psr
,
dcr
,
info
,
flags
;
int
i
,
this_cpu
;
local_irq_save
(
flags
);
this_cpu
=
smp_processor_id
();
regs
=
ia64_task_regs
(
current
);
info
=
PFM_CPUINFO_GET
();
dcr
=
ia64_getreg
(
_IA64_REG_CR_DCR
);
printk
(
"current [%d] %s
\n
"
,
current
->
pid
,
current
->
comm
);
if
(
info
==
0
&&
ia64_psr
(
regs
)
->
pp
==
0
&&
(
dcr
&
IA64_DCR_PP
)
==
0
)
{
local_irq_restore
(
flags
);
return
;
}
printk
(
"CPU%d from %s() current [%d] iip=0x%lx %s
\n
"
,
this_cpu
,
from
,
current
->
pid
,
regs
->
cr_iip
,
current
->
comm
);
task
=
GET_PMU_OWNER
();
ctx
=
GET_PMU_CTX
();
printk
(
"
owner [%d] ctx=%p
\n
"
,
task
?
task
->
pid
:
-
1
,
ctx
);
printk
(
"
->CPU%d owner [%d] ctx=%p
\n
"
,
this_cpu
,
task
?
task
->
pid
:
-
1
,
ctx
);
psr
=
pfm_get_psr
();
printk
(
"psr.pp=%ld psr.up=%ld
\n
"
,
(
psr
>>
IA64_PSR_PP_BIT
)
&
0x1UL
,
(
psr
>>
IA64_PSR_PP_BIT
)
&
0x1UL
);
printk
(
"->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d
\n
"
,
this_cpu
,
ia64_get_pmc
(
0
),
psr
&
IA64_PSR_PP
?
1
:
0
,
psr
&
IA64_PSR_UP
?
1
:
0
,
dcr
&
IA64_DCR_PP
?
1
:
0
,
info
,
ia64_psr
(
regs
)
->
up
,
ia64_psr
(
regs
)
->
pp
);
ia64_psr
(
regs
)
->
up
=
0
;
ia64_psr
(
regs
)
->
pp
=
0
;
t
=
&
current
->
thread
;
for
(
i
=
1
;
PMC_IS_LAST
(
i
)
==
0
;
i
++
)
{
if
(
PMC_IS_IMPL
(
i
)
==
0
)
continue
;
printk
(
"
pmc[%d]=0x%lx tpmc=0x%lx
\n
"
,
i
,
ia64_get_pmc
(
i
)
,
t
->
pmcs
[
i
]);
printk
(
"
->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx
\n
"
,
this_cpu
,
i
,
ia64_get_pmc
(
i
),
i
,
t
->
pmcs
[
i
]);
}
for
(
i
=
1
;
PMD_IS_LAST
(
i
)
==
0
;
i
++
)
{
if
(
PMD_IS_IMPL
(
i
)
==
0
)
continue
;
printk
(
"
pmd[%d]=0x%lx tpmd=0x%lx
\n
"
,
i
,
ia64_get_pmd
(
i
)
,
t
->
pmds
[
i
]);
printk
(
"
->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx
\n
"
,
this_cpu
,
i
,
ia64_get_pmd
(
i
),
i
,
t
->
pmds
[
i
]);
}
if
(
ctx
)
{
printk
(
"ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx
\n
"
,
printk
(
"->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx
\n
"
,
this_cpu
,
ctx
->
ctx_state
,
ctx
->
ctx_smpl_vaddr
,
ctx
->
ctx_smpl_hdr
,
...
...
@@ -6477,6 +6514,7 @@ dump_pmu_state(void)
ctx
->
ctx_msgq_tail
,
ctx
->
ctx_saved_psr_up
);
}
local_irq_restore
(
flags
);
}
/*
...
...
@@ -6499,10 +6537,8 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
PFM_SET_WORK_PENDING
(
task
,
0
);
/*
*
restore default psr settings
*
the psr bits are already set properly in copy_threads()
*/
ia64_psr
(
regs
)
->
pp
=
ia64_psr
(
regs
)
->
up
=
0
;
ia64_psr
(
regs
)
->
sp
=
1
;
}
#else
/* !CONFIG_PERFMON */
asmlinkage
long
...
...
arch/ia64/kernel/process.c
View file @
22277879
...
...
@@ -353,9 +353,13 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy parts of thread_struct: */
p
->
thread
.
ksp
=
(
unsigned
long
)
child_stack
-
16
;
/* stop some PSR bits from being inherited: */
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs
->
cr_ipsr
=
((
child_ptregs
->
cr_ipsr
|
IA64_PSR_BITS_TO_SET
)
&
~
IA64_PSR_BITS_TO_CLEAR
);
&
~
(
IA64_PSR_BITS_TO_CLEAR
|
IA64_PSR_PP
|
IA64_PSR_UP
)
);
/*
* NOTE: The calling convention considers all floating point
...
...
arch/ia64/kernel/unwind_i.h
View file @
22277879
/*
* Copyright (C) 2000, 2002 Hewlett-Packard Co
* Copyright (C) 2000, 2002
-2003
Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Kernel unwind support.
...
...
@@ -45,12 +45,6 @@ struct unw_info_block {
/* personality routine and language-specific data follow behind descriptors */
};
struct
unw_table_entry
{
u64
start_offset
;
u64
end_offset
;
u64
info_offset
;
};
struct
unw_table
{
struct
unw_table
*
next
;
/* must be first member! */
const
char
*
name
;
...
...
include/asm-ia64/io.h
View file @
22277879
...
...
@@ -72,6 +72,9 @@ phys_to_virt (unsigned long address)
return
(
void
*
)
(
address
+
PAGE_OFFSET
);
}
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern
int
valid_phys_addr_range
(
unsigned
long
addr
,
size_t
*
count
);
/* efi.c */
/*
* The following two macros are deprecated and scheduled for removal.
* Please use the PCI-DMA interface defined in <asm/pci.h> instead.
...
...
include/asm-ia64/module.h
View file @
22277879
...
...
@@ -18,7 +18,8 @@ struct mod_arch_specific {
struct
elf64_shdr
*
unwind
;
/* unwind-table section */
unsigned
long
gp
;
/* global-pointer for module */
void
*
unw_table
;
/* unwind-table cookie returned by unwinder */
void
*
core_unw_table
;
/* core unwind-table cookie returned by unwinder */
void
*
init_unw_table
;
/* init unwind-table cookie returned by unwinder */
unsigned
int
next_got_entry
;
/* index of next available got entry */
};
...
...
include/asm-ia64/namei.h
View file @
22277879
...
...
@@ -9,7 +9,7 @@
#include <asm/ptrace.h>
#include <asm/system.h>
#define EMUL_PREFIX_LINUX_IA32 "emul/ia32-linux/"
#define EMUL_PREFIX_LINUX_IA32 "
/
emul/ia32-linux/"
static
inline
char
*
__emul_prefix
(
void
)
...
...
include/asm-ia64/unwind.h
View file @
22277879
...
...
@@ -93,6 +93,12 @@ struct unw_frame_info {
* The official API follows below:
*/
struct
unw_table_entry
{
u64
start_offset
;
u64
end_offset
;
u64
info_offset
;
};
/*
* Initialize unwind support.
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment