Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1740c41c
Commit
1740c41c
authored
Apr 12, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/to-linus-2.5
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
4c268c6b
4006f663
Changes
19
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
407 additions
and
348 deletions
+407
-348
arch/ia64/Kconfig
arch/ia64/Kconfig
+2
-2
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+12
-1
arch/ia64/hp/sim/Kconfig
arch/ia64/hp/sim/Kconfig
+1
-1
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+9
-1
arch/ia64/kernel/efi.c
arch/ia64/kernel/efi.c
+1
-2
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+10
-6
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+323
-292
arch/ia64/kernel/perfmon_mckinley.h
arch/ia64/kernel/perfmon_mckinley.h
+9
-4
arch/ia64/kernel/sys_ia64.c
arch/ia64/kernel/sys_ia64.c
+7
-1
arch/ia64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
+1
-0
arch/ia64/pci/pci.c
arch/ia64/pci/pci.c
+0
-1
arch/ia64/sn/io/hwgfs/interface.c
arch/ia64/sn/io/hwgfs/interface.c
+3
-3
arch/ia64/sn/io/machvec/pci_bus_cvlink.c
arch/ia64/sn/io/machvec/pci_bus_cvlink.c
+10
-16
arch/ia64/sn/io/machvec/pci_dma.c
arch/ia64/sn/io/machvec/pci_dma.c
+8
-4
arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
+2
-1
drivers/char/sn_serial.c
drivers/char/sn_serial.c
+3
-1
include/asm-ia64/machvec_hpzx1.h
include/asm-ia64/machvec_hpzx1.h
+2
-1
include/asm-ia64/perfmon.h
include/asm-ia64/perfmon.h
+4
-3
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+0
-8
No files found.
arch/ia64/Kconfig
View file @
1740c41c
...
@@ -16,7 +16,7 @@ config IA64
...
@@ -16,7 +16,7 @@ config IA64
The Itanium Processor Family is Intel's 64-bit successor to
The Itanium Processor Family is Intel's 64-bit successor to
the 32-bit X86 line. The IA-64 Linux project has a home
the 32-bit X86 line. The IA-64 Linux project has a home
page at <http://www.linuxia64.org/> and a mailing list at
page at <http://www.linuxia64.org/> and a mailing list at
linux-ia64@vger.kernel.org
.
<linux-ia64@vger.kernel.org>
.
config 64BIT
config 64BIT
bool
bool
...
@@ -57,7 +57,7 @@ config IA64_GENERIC
...
@@ -57,7 +57,7 @@ config IA64_GENERIC
DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
HP-zx1/sx1000 For HP systems
HP-zx1/sx1000 For HP systems
SGI-SN2 For SGI Altix systems
SGI-SN2 For SGI Altix systems
Ski-simulator For the HP simulator
(<http://www.hpl.hp.com/research/linux/ski/>)
Ski-simulator For the HP simulator
<http://www.hpl.hp.com/research/linux/ski/>
If you don't know what to do, choose "generic".
If you don't know what to do, choose "generic".
...
...
arch/ia64/hp/common/sba_iommu.c
View file @
1740c41c
...
@@ -1732,7 +1732,6 @@ ioc_init(u64 hpa, void *handle)
...
@@ -1732,7 +1732,6 @@ ioc_init(u64 hpa, void *handle)
if
((
long
)
~
iovp_mask
>
(
long
)
ia64_max_iommu_merge_mask
)
if
((
long
)
~
iovp_mask
>
(
long
)
ia64_max_iommu_merge_mask
)
ia64_max_iommu_merge_mask
=
~
iovp_mask
;
ia64_max_iommu_merge_mask
=
~
iovp_mask
;
MAX_DMA_ADDRESS
=
~
0UL
;
printk
(
KERN_INFO
PFX
printk
(
KERN_INFO
PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx
\n
"
,
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx
\n
"
,
...
@@ -1966,6 +1965,18 @@ sba_init(void)
...
@@ -1966,6 +1965,18 @@ sba_init(void)
subsys_initcall
(
sba_init
);
/* must be initialized after ACPI etc., but before any drivers... */
subsys_initcall
(
sba_init
);
/* must be initialized after ACPI etc., but before any drivers... */
extern
void
dig_setup
(
char
**
);
/*
* MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
* so we use the platform_setup hook to fix it up.
*/
void
__init
sba_setup
(
char
**
cmdline_p
)
{
MAX_DMA_ADDRESS
=
~
0UL
;
dig_setup
(
cmdline_p
);
}
static
int
__init
static
int
__init
nosbagart
(
char
*
str
)
nosbagart
(
char
*
str
)
{
{
...
...
arch/ia64/hp/sim/Kconfig
View file @
1740c41c
...
@@ -13,7 +13,7 @@ config HP_SIMSERIAL_CONSOLE
...
@@ -13,7 +13,7 @@ config HP_SIMSERIAL_CONSOLE
depends on HP_SIMSERIAL
depends on HP_SIMSERIAL
config HP_SIMSCSI
config HP_SIMSCSI
bool
"Simulated SCSI disk"
tristate
"Simulated SCSI disk"
depends on SCSI
depends on SCSI
endmenu
endmenu
...
...
arch/ia64/kernel/acpi.c
View file @
1740c41c
...
@@ -455,6 +455,7 @@ acpi_numa_arch_fixup (void)
...
@@ -455,6 +455,7 @@ acpi_numa_arch_fixup (void)
for
(
i
=
0
;
i
<
MAX_PXM_DOMAINS
;
i
++
)
{
for
(
i
=
0
;
i
<
MAX_PXM_DOMAINS
;
i
++
)
{
if
(
pxm_bit_test
(
i
))
{
if
(
pxm_bit_test
(
i
))
{
pxm_to_nid_map
[
i
]
=
numnodes
;
pxm_to_nid_map
[
i
]
=
numnodes
;
node_set_online
(
numnodes
);
nid_to_pxm_map
[
numnodes
++
]
=
i
;
nid_to_pxm_map
[
numnodes
++
]
=
i
;
}
}
}
}
...
@@ -506,6 +507,13 @@ acpi_numa_arch_fixup (void)
...
@@ -506,6 +507,13 @@ acpi_numa_arch_fixup (void)
}
}
#endif
/* CONFIG_ACPI_NUMA */
#endif
/* CONFIG_ACPI_NUMA */
unsigned
int
acpi_register_gsi
(
u32
gsi
,
int
polarity
,
int
trigger
)
{
return
acpi_register_irq
(
gsi
,
polarity
,
trigger
);
}
EXPORT_SYMBOL
(
acpi_register_gsi
);
static
int
__init
static
int
__init
acpi_parse_fadt
(
unsigned
long
phys_addr
,
unsigned
long
size
)
acpi_parse_fadt
(
unsigned
long
phys_addr
,
unsigned
long
size
)
{
{
...
@@ -527,7 +535,7 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
...
@@ -527,7 +535,7 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
if
(
fadt
->
iapc_boot_arch
&
BAF_LEGACY_DEVICES
)
if
(
fadt
->
iapc_boot_arch
&
BAF_LEGACY_DEVICES
)
acpi_legacy_devices
=
1
;
acpi_legacy_devices
=
1
;
acpi_register_
irq
(
fadt
->
sci_int
,
ACPI_ACTIVE_LOW
,
ACPI_LEVEL_SENSITIVE
);
acpi_register_
gsi
(
fadt
->
sci_int
,
ACPI_ACTIVE_LOW
,
ACPI_LEVEL_SENSITIVE
);
return
0
;
return
0
;
}
}
...
...
arch/ia64/kernel/efi.c
View file @
1740c41c
...
@@ -674,8 +674,7 @@ efi_get_iobase (void)
...
@@ -674,8 +674,7 @@ efi_get_iobase (void)
for
(
p
=
efi_map_start
;
p
<
efi_map_end
;
p
+=
efi_desc_size
)
{
for
(
p
=
efi_map_start
;
p
<
efi_map_end
;
p
+=
efi_desc_size
)
{
md
=
p
;
md
=
p
;
if
(
md
->
type
==
EFI_MEMORY_MAPPED_IO_PORT_SPACE
)
{
if
(
md
->
type
==
EFI_MEMORY_MAPPED_IO_PORT_SPACE
)
{
/* paranoia attribute checking */
if
(
md
->
attribute
&
EFI_MEMORY_UC
)
if
(
md
->
attribute
==
(
EFI_MEMORY_UC
|
EFI_MEMORY_RUNTIME
))
return
md
->
phys_addr
;
return
md
->
phys_addr
;
}
}
}
}
...
...
arch/ia64/kernel/iosapic.c
View file @
1740c41c
...
@@ -172,7 +172,7 @@ gsi_to_irq (unsigned int gsi)
...
@@ -172,7 +172,7 @@ gsi_to_irq (unsigned int gsi)
static
void
static
void
set_rte
(
unsigned
int
vector
,
unsigned
int
dest
,
int
mask
)
set_rte
(
unsigned
int
vector
,
unsigned
int
dest
,
int
mask
)
{
{
unsigned
long
pol
,
trigger
,
dmode
;
unsigned
long
pol
,
trigger
,
dmode
,
flags
;
u32
low32
,
high32
;
u32
low32
,
high32
;
char
*
addr
;
char
*
addr
;
int
rte_index
;
int
rte_index
;
...
@@ -211,11 +211,15 @@ set_rte (unsigned int vector, unsigned int dest, int mask)
...
@@ -211,11 +211,15 @@ set_rte (unsigned int vector, unsigned int dest, int mask)
/* dest contains both id and eid */
/* dest contains both id and eid */
high32
=
(
dest
<<
IOSAPIC_DEST_SHIFT
);
high32
=
(
dest
<<
IOSAPIC_DEST_SHIFT
);
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
writel
(
IOSAPIC_RTE_HIGH
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_HIGH
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
high32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
high32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
iosapic_intr_info
[
vector
].
low32
=
low32
;
iosapic_intr_info
[
vector
].
low32
=
low32
;
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
}
}
static
void
static
void
...
...
arch/ia64/kernel/perfmon.c
View file @
1740c41c
...
@@ -57,7 +57,6 @@
...
@@ -57,7 +57,6 @@
#define PFM_CTX_LOADED 2
/* context is loaded onto a task */
#define PFM_CTX_LOADED 2
/* context is loaded onto a task */
#define PFM_CTX_MASKED 3
/* context is loaded but monitoring is masked due to overflow */
#define PFM_CTX_MASKED 3
/* context is loaded but monitoring is masked due to overflow */
#define PFM_CTX_ZOMBIE 4
/* owner of the context is closing it */
#define PFM_CTX_ZOMBIE 4
/* owner of the context is closing it */
#define PFM_CTX_TERMINATED 5
/* the task the context was loaded onto is gone */
#define PFM_INVALID_ACTIVATION (~0UL)
#define PFM_INVALID_ACTIVATION (~0UL)
...
@@ -473,6 +472,7 @@ typedef struct {
...
@@ -473,6 +472,7 @@ typedef struct {
int
debug
;
/* turn on/off debugging via syslog */
int
debug
;
/* turn on/off debugging via syslog */
int
debug_ovfl
;
/* turn on/off debug printk in overflow handler */
int
debug_ovfl
;
/* turn on/off debug printk in overflow handler */
int
fastctxsw
;
/* turn on/off fast (unsecure) ctxsw */
int
fastctxsw
;
/* turn on/off fast (unsecure) ctxsw */
int
expert_mode
;
/* turn on/off value checking */
int
debug_pfm_read
;
int
debug_pfm_read
;
}
pfm_sysctl_t
;
}
pfm_sysctl_t
;
...
@@ -508,6 +508,7 @@ static ctl_table pfm_ctl_table[]={
...
@@ -508,6 +508,7 @@ static ctl_table pfm_ctl_table[]={
{
1
,
"debug"
,
&
pfm_sysctl
.
debug
,
sizeof
(
int
),
0666
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
1
,
"debug"
,
&
pfm_sysctl
.
debug
,
sizeof
(
int
),
0666
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
2
,
"debug_ovfl"
,
&
pfm_sysctl
.
debug_ovfl
,
sizeof
(
int
),
0666
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
2
,
"debug_ovfl"
,
&
pfm_sysctl
.
debug_ovfl
,
sizeof
(
int
),
0666
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
3
,
"fastctxsw"
,
&
pfm_sysctl
.
fastctxsw
,
sizeof
(
int
),
0600
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
3
,
"fastctxsw"
,
&
pfm_sysctl
.
fastctxsw
,
sizeof
(
int
),
0600
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
4
,
"expert_mode"
,
&
pfm_sysctl
.
expert_mode
,
sizeof
(
int
),
0600
,
NULL
,
&
proc_dointvec
,
NULL
,},
{
0
,
},
{
0
,
},
};
};
static
ctl_table
pfm_sysctl_dir
[]
=
{
static
ctl_table
pfm_sysctl_dir
[]
=
{
...
@@ -520,11 +521,8 @@ static ctl_table pfm_sysctl_root[] = {
...
@@ -520,11 +521,8 @@ static ctl_table pfm_sysctl_root[] = {
};
};
static
struct
ctl_table_header
*
pfm_sysctl_header
;
static
struct
ctl_table_header
*
pfm_sysctl_header
;
static
void
pfm_vm_close
(
struct
vm_area_struct
*
area
);
static
int
pfm_context_unload
(
pfm_context_t
*
ctx
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
int
pfm_flush
(
struct
file
*
filp
);
static
struct
vm_operations_struct
pfm_vm_ops
=
{
close:
pfm_vm_close
};
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
...
@@ -697,6 +695,28 @@ pfm_unfreeze_pmu(void)
...
@@ -697,6 +695,28 @@ pfm_unfreeze_pmu(void)
ia64_srlz_d
();
ia64_srlz_d
();
}
}
static
inline
void
pfm_restore_ibrs
(
unsigned
long
*
ibrs
,
unsigned
int
nibrs
)
{
int
i
;
for
(
i
=
0
;
i
<
nibrs
;
i
++
)
{
ia64_set_ibr
(
i
,
ibrs
[
i
]);
}
ia64_srlz_i
();
}
static
inline
void
pfm_restore_dbrs
(
unsigned
long
*
dbrs
,
unsigned
int
ndbrs
)
{
int
i
;
for
(
i
=
0
;
i
<
ndbrs
;
i
++
)
{
ia64_set_dbr
(
i
,
dbrs
[
i
]);
}
ia64_srlz_d
();
}
/*
/*
* PMD[i] must be a counter. no check is made
* PMD[i] must be a counter. no check is made
*/
*/
...
@@ -827,7 +847,10 @@ pfm_context_alloc(void)
...
@@ -827,7 +847,10 @@ pfm_context_alloc(void)
{
{
pfm_context_t
*
ctx
;
pfm_context_t
*
ctx
;
/* allocate context descriptor */
/*
* allocate context descriptor
* must be able to free with interrupts disabled
*/
ctx
=
kmalloc
(
sizeof
(
pfm_context_t
),
GFP_KERNEL
);
ctx
=
kmalloc
(
sizeof
(
pfm_context_t
),
GFP_KERNEL
);
if
(
ctx
)
{
if
(
ctx
)
{
memset
(
ctx
,
0
,
sizeof
(
pfm_context_t
));
memset
(
ctx
,
0
,
sizeof
(
pfm_context_t
));
...
@@ -853,7 +876,7 @@ pfm_mask_monitoring(struct task_struct *task)
...
@@ -853,7 +876,7 @@ pfm_mask_monitoring(struct task_struct *task)
unsigned
long
mask
,
val
,
ovfl_mask
;
unsigned
long
mask
,
val
,
ovfl_mask
;
int
i
;
int
i
;
DPRINT_ovfl
((
"
[%d] masking monitoring for [%d]
\n
"
,
current
->
pid
,
task
->
pid
));
DPRINT_ovfl
((
"
masking monitoring for [%d]
\n
"
,
task
->
pid
));
ovfl_mask
=
pmu_conf
.
ovfl_val
;
ovfl_mask
=
pmu_conf
.
ovfl_val
;
/*
/*
...
@@ -996,6 +1019,15 @@ pfm_restore_monitoring(struct task_struct *task)
...
@@ -996,6 +1019,15 @@ pfm_restore_monitoring(struct task_struct *task)
}
}
ia64_srlz_d
();
ia64_srlz_d
();
/*
* must restore DBR/IBR because could be modified while masked
* XXX: need to optimize
*/
if
(
ctx
->
ctx_fl_using_dbreg
)
{
pfm_restore_ibrs
(
ctx
->
ctx_ibrs
,
pmu_conf
.
num_ibrs
);
pfm_restore_dbrs
(
ctx
->
ctx_dbrs
,
pmu_conf
.
num_dbrs
);
}
/*
/*
* now restore PSR
* now restore PSR
*/
*/
...
@@ -1106,28 +1138,6 @@ pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
...
@@ -1106,28 +1138,6 @@ pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
ia64_srlz_d
();
ia64_srlz_d
();
}
}
static
inline
void
pfm_restore_ibrs
(
unsigned
long
*
ibrs
,
unsigned
int
nibrs
)
{
int
i
;
for
(
i
=
0
;
i
<
nibrs
;
i
++
)
{
ia64_set_ibr
(
i
,
ibrs
[
i
]);
}
ia64_srlz_i
();
}
static
inline
void
pfm_restore_dbrs
(
unsigned
long
*
dbrs
,
unsigned
int
ndbrs
)
{
int
i
;
for
(
i
=
0
;
i
<
ndbrs
;
i
++
)
{
ia64_set_dbr
(
i
,
dbrs
[
i
]);
}
ia64_srlz_d
();
}
static
inline
int
static
inline
int
pfm_uuid_cmp
(
pfm_uuid_t
a
,
pfm_uuid_t
b
)
pfm_uuid_cmp
(
pfm_uuid_t
a
,
pfm_uuid_t
b
)
{
{
...
@@ -1684,8 +1694,7 @@ pfm_fasync(int fd, struct file *filp, int on)
...
@@ -1684,8 +1694,7 @@ pfm_fasync(int fd, struct file *filp, int on)
ret
=
pfm_do_fasync
(
fd
,
filp
,
ctx
,
on
);
ret
=
pfm_do_fasync
(
fd
,
filp
,
ctx
,
on
);
DPRINT
((
"pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d
\n
"
,
DPRINT
((
"pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d
\n
"
,
current
->
pid
,
fd
,
fd
,
on
,
on
,
ctx
->
ctx_async_queue
,
ret
));
ctx
->
ctx_async_queue
,
ret
));
...
@@ -1707,6 +1716,8 @@ pfm_syswide_force_stop(void *info)
...
@@ -1707,6 +1716,8 @@ pfm_syswide_force_stop(void *info)
pfm_context_t
*
ctx
=
(
pfm_context_t
*
)
info
;
pfm_context_t
*
ctx
=
(
pfm_context_t
*
)
info
;
struct
pt_regs
*
regs
=
ia64_task_regs
(
current
);
struct
pt_regs
*
regs
=
ia64_task_regs
(
current
);
struct
task_struct
*
owner
;
struct
task_struct
*
owner
;
unsigned
long
flags
;
int
ret
;
if
(
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
if
(
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
printk
(
KERN_ERR
"perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d
\n
"
,
printk
(
KERN_ERR
"perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d
\n
"
,
...
@@ -1728,27 +1739,23 @@ pfm_syswide_force_stop(void *info)
...
@@ -1728,27 +1739,23 @@ pfm_syswide_force_stop(void *info)
return
;
return
;
}
}
DPRINT
((
"
[%d] on CPU%d forcing system wide stop for [%d]
\n
"
,
current
->
pid
,
smp_processor_id
(),
ctx
->
ctx_task
->
pid
));
DPRINT
((
"
on CPU%d forcing system wide stop for [%d]
\n
"
,
smp_processor_id
(),
ctx
->
ctx_task
->
pid
));
/*
/*
* Update local PMU
* the context is already protected in pfm_close(), we simply
* need to mask interrupts to avoid a PMU interrupt race on
* this CPU
*/
*/
ia64_setreg
(
_IA64_REG_CR_DCR
,
ia64_getreg
(
_IA64_REG_CR_DCR
)
&
~
IA64_DCR_PP
);
local_irq_save
(
flags
);
ia64_srlz_i
();
/*
* update local cpuinfo
*/
PFM_CPUINFO_CLEAR
(
PFM_CPUINFO_DCR_PP
);
PFM_CPUINFO_CLEAR
(
PFM_CPUINFO_SYST_WIDE
);
PFM_CPUINFO_CLEAR
(
PFM_CPUINFO_EXCL_IDLE
);
pfm_clear_psr_pp
();
ret
=
pfm_context_unload
(
ctx
,
NULL
,
0
,
regs
);
if
(
ret
)
{
DPRINT
((
"context_unload returned %d
\n
"
,
ret
));
}
/*
/*
*
also stop monitoring in the local interrupted task
*
unmask interrupts, PMU interrupts are now spurious here
*/
*/
ia64_psr
(
regs
)
->
pp
=
0
;
local_irq_restore
(
flags
);
SET_PMU_OWNER
(
NULL
,
NULL
);
}
}
static
void
static
void
...
@@ -1756,59 +1763,38 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
...
@@ -1756,59 +1763,38 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
{
{
int
ret
;
int
ret
;
DPRINT
((
"
[%d] calling CPU%d for cleanup
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
calling CPU%d for cleanup
\n
"
,
ctx
->
ctx_cpu
));
ret
=
smp_call_function_single
(
ctx
->
ctx_cpu
,
pfm_syswide_force_stop
,
ctx
,
0
,
1
);
ret
=
smp_call_function_single
(
ctx
->
ctx_cpu
,
pfm_syswide_force_stop
,
ctx
,
0
,
1
);
DPRINT
((
"
[%d] called CPU%d for cleanup ret=%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
,
ret
));
DPRINT
((
"
called CPU%d for cleanup ret=%d
\n
"
,
ctx
->
ctx_cpu
,
ret
));
}
}
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
/*
/*
* called either on explicit close() or from exit_files().
* called for each close(). Partially free resources.
*
* When caller is self-monitoring, the context is unloaded.
* IMPORTANT: we get called ONLY when the refcnt on the file gets to zero (fput()),i.e,
* last task to access the file. Nobody else can access the file at this point.
*
* When called from exit_files(), the VMA has been freed because exit_mm()
* is executed before exit_files().
*
* When called from exit_files(), the current task is not yet ZOMBIE but we will
* flush the PMU state to the context. This means * that when we see the context
* state as TERMINATED we are guranteed to have the latest PMU state available,
* even if the task itself is in the middle of being ctxsw out.
*/
*/
static
int
pfm_context_unload
(
pfm_context_t
*
ctx
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
int
static
int
pfm_
close
(
struct
inode
*
inode
,
struct
file
*
filp
)
pfm_
flush
(
struct
file
*
filp
)
{
{
pfm_context_t
*
ctx
;
pfm_context_t
*
ctx
;
struct
task_struct
*
task
;
struct
task_struct
*
task
;
struct
pt_regs
*
regs
;
struct
pt_regs
*
regs
;
DECLARE_WAITQUEUE
(
wait
,
current
);
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
smpl_buf_size
=
0UL
;
unsigned
long
smpl_buf_size
=
0UL
;
void
*
smpl_buf_vaddr
=
NULL
;
void
*
smpl_buf_vaddr
=
NULL
;
void
*
smpl_buf_addr
=
NULL
;
int
free_possible
=
1
;
int
state
,
is_system
;
int
state
,
is_system
;
DPRINT
((
"pfm_close called private=%p
\n
"
,
filp
->
private_data
));
if
(
!
inode
)
{
printk
(
KERN_ERR
"pfm_close: NULL inode
\n
"
);
return
0
;
}
if
(
PFM_IS_FILE
(
filp
)
==
0
)
{
if
(
PFM_IS_FILE
(
filp
)
==
0
)
{
DPRINT
((
"bad magic for
[%d]
\n
"
,
current
->
pid
));
DPRINT
((
"bad magic for
\n
"
));
return
-
EBADF
;
return
-
EBADF
;
}
}
ctx
=
(
pfm_context_t
*
)
filp
->
private_data
;
ctx
=
(
pfm_context_t
*
)
filp
->
private_data
;
if
(
ctx
==
NULL
)
{
if
(
ctx
==
NULL
)
{
printk
(
KERN_ERR
"perfmon: pfm_
close
: NULL ctx [%d]
\n
"
,
current
->
pid
);
printk
(
KERN_ERR
"perfmon: pfm_
flush
: NULL ctx [%d]
\n
"
,
current
->
pid
);
return
-
EBADF
;
return
-
EBADF
;
}
}
/*
/*
* remove our file from the async queue, if we use this mode.
* remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come
* This can be done without the context being protected. We come
...
@@ -1823,7 +1809,7 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1823,7 +1809,7 @@ pfm_close(struct inode *inode, struct file *filp)
* signal will be sent. In both case, we are safe
* signal will be sent. In both case, we are safe
*/
*/
if
(
filp
->
f_flags
&
FASYNC
)
{
if
(
filp
->
f_flags
&
FASYNC
)
{
DPRINT
((
"
[%d] cleaning up async_queue=%p
\n
"
,
current
->
pid
,
ctx
->
ctx_async_queue
));
DPRINT
((
"
cleaning up async_queue=%p
\n
"
,
ctx
->
ctx_async_queue
));
pfm_do_fasync
(
-
1
,
filp
,
ctx
,
0
);
pfm_do_fasync
(
-
1
,
filp
,
ctx
,
0
);
}
}
...
@@ -1833,23 +1819,18 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1833,23 +1819,18 @@ pfm_close(struct inode *inode, struct file *filp)
is_system
=
ctx
->
ctx_fl_system
;
is_system
=
ctx
->
ctx_fl_system
;
task
=
PFM_CTX_TASK
(
ctx
);
task
=
PFM_CTX_TASK
(
ctx
);
regs
=
ia64_task_regs
(
task
);
regs
=
ia64_task_regs
(
task
);
DPRINT
((
"
[%d] ctx_state=%d is_current=%d
\n
"
,
DPRINT
((
"
ctx_state=%d is_current=%d
\n
"
,
current
->
pid
,
state
,
state
,
task
==
current
?
1
:
0
));
task
==
current
?
1
:
0
));
if
(
state
==
PFM_CTX_UNLOADED
||
state
==
PFM_CTX_TERMINATED
)
{
/*
goto
doit
;
* if state == UNLOADED, then task is NULL
}
*/
/*
/*
* context still loaded/masked and self monitoring,
* we must stop and unload because we are losing access to the context.
* we stop/unload and we destroy right here
*
* We always go here for system-wide sessions
*/
*/
if
(
task
==
current
)
{
if
(
task
==
current
)
{
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
@@ -1862,46 +1843,134 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1862,46 +1843,134 @@ pfm_close(struct inode *inode, struct file *filp)
*/
*/
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
DPRINT
((
"[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
/*
UNPROTECT_CTX
(
ctx
,
flags
);
* keep context protected but unmask interrupt for IPI
*/
local_irq_restore
(
flags
);
pfm_syswide_cleanup_other_cpu
(
ctx
);
pfm_syswide_cleanup_other_cpu
(
ctx
);
PROTECT_CTX
(
ctx
,
flags
);
/*
/*
*
short circuit pfm_context_unload();
*
restore interrupt masking
*/
*/
task
->
thread
.
pfm_context
=
NULL
;
local_irq_save
(
flags
);
ctx
->
ctx_task
=
NULL
;
ctx
->
ctx_state
=
state
=
PFM_CTX_UNLOADED
;
pfm_unreserve_session
(
ctx
,
1
,
ctx
->
ctx_cpu
);
/*
* context is unloaded at this point
*/
}
else
}
else
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
{
{
DPRINT
((
"forcing unload
on [%d]
\n
"
,
current
->
pid
));
DPRINT
((
"forcing unload
\n
"
));
/*
/*
* stop and unload, returning with state UNLOADED
* stop and unload, returning with state UNLOADED
* and session unreserved.
* and session unreserved.
*/
*/
pfm_context_unload
(
ctx
,
NULL
,
0
,
regs
);
pfm_context_unload
(
ctx
,
NULL
,
0
,
regs
);
ctx
->
ctx_state
=
PFM_CTX_TERMINATED
;
DPRINT
((
"ctx_state=%d
\n
"
,
ctx
->
ctx_state
));
}
}
/*
* remove virtual mapping, if any, for the calling task.
* cannot reset ctx field until last user is calling close().
*
* ctx_smpl_vaddr must never be cleared because it is needed
* by every task with access to the context
*
* When called from do_exit(), the mm context is gone already, therefore
* mm is NULL, i.e., the VMA is already gone and we do not have to
* do anything here
*/
if
(
ctx
->
ctx_smpl_vaddr
&&
current
->
mm
)
{
smpl_buf_vaddr
=
ctx
->
ctx_smpl_vaddr
;
smpl_buf_size
=
ctx
->
ctx_smpl_size
;
}
UNPROTECT_CTX
(
ctx
,
flags
);
/*
* if there was a mapping, then we systematically remove it
* at this point. Cannot be done inside critical section
* because some VM function reenables interrupts.
*
*/
if
(
smpl_buf_vaddr
)
pfm_remove_smpl_mapping
(
current
,
smpl_buf_vaddr
,
smpl_buf_size
);
return
0
;
}
/*
* called either on explicit close() or from exit_files().
* Only the LAST user of the file gets to this point, i.e., it is
* called only ONCE.
*
* IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
* (fput()),i.e, last task to access the file. Nobody else can access the
* file at this point.
*
* When called from exit_files(), the VMA has been freed because exit_mm()
* is executed before exit_files().
*
* When called from exit_files(), the current task is not yet ZOMBIE but we
* flush the PMU state to the context.
*/
static
int
pfm_close
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
pfm_context_t
*
ctx
;
struct
task_struct
*
task
;
struct
pt_regs
*
regs
;
DECLARE_WAITQUEUE
(
wait
,
current
);
unsigned
long
flags
;
unsigned
long
smpl_buf_size
=
0UL
;
void
*
smpl_buf_addr
=
NULL
;
int
free_possible
=
1
;
int
state
,
is_system
;
DPRINT
((
"pfm_close called private=%p
\n
"
,
filp
->
private_data
));
DPRINT
((
"[%d] ctx_state=%d
\n
"
,
current
->
pid
,
ctx
->
ctx_state
));
if
(
PFM_IS_FILE
(
filp
)
==
0
)
{
DPRINT
((
"bad magic
\n
"
));
return
-
EBADF
;
}
}
goto
doit
;
ctx
=
(
pfm_context_t
*
)
filp
->
private_data
;
if
(
ctx
==
NULL
)
{
printk
(
KERN_ERR
"perfmon: pfm_close: NULL ctx [%d]
\n
"
,
current
->
pid
);
return
-
EBADF
;
}
}
PROTECT_CTX
(
ctx
,
flags
);
state
=
ctx
->
ctx_state
;
is_system
=
ctx
->
ctx_fl_system
;
task
=
PFM_CTX_TASK
(
ctx
);
regs
=
ia64_task_regs
(
task
);
DPRINT
((
"ctx_state=%d is_current=%d
\n
"
,
state
,
task
==
current
?
1
:
0
));
/*
* if task == current, then pfm_flush() unloaded the context
*/
if
(
state
==
PFM_CTX_UNLOADED
)
goto
doit
;
/*
* context is loaded/masked and task != current, we need to
* either force an unload or go zombie
*/
/*
/*
* The task is currently blocked or will block after an overflow.
* The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the
* we must force it to wakeup to get out of the
* MASKED state and transition to the unloaded state by itself
* MASKED state and transition to the unloaded state by itself.
*
* This situation is only possible for per-task mode
*/
*/
if
(
state
==
PFM_CTX_MASKED
&&
CTX_OVFL_NOBLOCK
(
ctx
)
==
0
)
{
if
(
state
==
PFM_CTX_MASKED
&&
CTX_OVFL_NOBLOCK
(
ctx
)
==
0
)
{
...
@@ -1911,7 +1980,7 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1911,7 +1980,7 @@ pfm_close(struct inode *inode, struct file *filp)
*
*
* We cannot use the ZOMBIE state, because it is checked
* We cannot use the ZOMBIE state, because it is checked
* by pfm_load_regs() which is called upon wakeup from down().
* by pfm_load_regs() which is called upon wakeup from down().
* In such cas, it would free the context and then we would
* In such cas
e
, it would free the context and then we would
* return to pfm_handle_work() which would access the
* return to pfm_handle_work() which would access the
* stale context. Instead, we set a flag invisible to pfm_load_regs()
* stale context. Instead, we set a flag invisible to pfm_load_regs()
* but visible to pfm_handle_work().
* but visible to pfm_handle_work().
...
@@ -1926,7 +1995,7 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1926,7 +1995,7 @@ pfm_close(struct inode *inode, struct file *filp)
*/
*/
up
(
&
ctx
->
ctx_restart_sem
);
up
(
&
ctx
->
ctx_restart_sem
);
DPRINT
((
"waking up ctx_state=%d
for [%d]
\n
"
,
state
,
current
->
pid
));
DPRINT
((
"waking up ctx_state=%d
\n
"
,
state
));
/*
/*
* put ourself to sleep waiting for the other
* put ourself to sleep waiting for the other
...
@@ -1956,11 +2025,11 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1956,11 +2025,11 @@ pfm_close(struct inode *inode, struct file *filp)
set_current_state
(
TASK_RUNNING
);
set_current_state
(
TASK_RUNNING
);
/*
/*
* context is
terminat
ed at this point
* context is
unload
ed at this point
*/
*/
DPRINT
((
"after zombie wakeup ctx_state=%d for
[%d]
\n
"
,
state
,
current
->
pid
));
DPRINT
((
"after zombie wakeup ctx_state=%d for
\n
"
,
state
));
}
}
else
{
else
if
(
task
!=
current
)
{
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
/*
/*
* switch context to zombie state
* switch context to zombie state
...
@@ -1978,8 +2047,7 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1978,8 +2047,7 @@ pfm_close(struct inode *inode, struct file *filp)
#endif
#endif
}
}
doit:
/* cannot assume task is defined from now on */
doit:
/* reload state, may have changed during opening of critical section */
/* reload state, may have changed during opening of critical section */
state
=
ctx
->
ctx_state
;
state
=
ctx
->
ctx_state
;
...
@@ -1987,18 +2055,9 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -1987,18 +2055,9 @@ pfm_close(struct inode *inode, struct file *filp)
* the context is still attached to a task (possibly current)
* the context is still attached to a task (possibly current)
* we cannot destroy it right now
* we cannot destroy it right now
*/
*/
/*
* remove virtual mapping, if any. will be NULL when
* called from exit_files().
*/
if
(
ctx
->
ctx_smpl_vaddr
)
{
smpl_buf_vaddr
=
ctx
->
ctx_smpl_vaddr
;
smpl_buf_size
=
ctx
->
ctx_smpl_size
;
ctx
->
ctx_smpl_vaddr
=
NULL
;
}
/*
/*
* we must fre the sampling buffer right here because
* we must fre
e
the sampling buffer right here because
* we cannot rely on it being cleaned up later by the
* we cannot rely on it being cleaned up later by the
* monitored task. It is not possible to free vmalloc'ed
* monitored task. It is not possible to free vmalloc'ed
* memory in pfm_load_regs(). Instead, we remove the buffer
* memory in pfm_load_regs(). Instead, we remove the buffer
...
@@ -2011,21 +2070,19 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -2011,21 +2070,19 @@ pfm_close(struct inode *inode, struct file *filp)
smpl_buf_size
=
ctx
->
ctx_smpl_size
;
smpl_buf_size
=
ctx
->
ctx_smpl_size
;
/* no more sampling */
/* no more sampling */
ctx
->
ctx_smpl_hdr
=
NULL
;
ctx
->
ctx_smpl_hdr
=
NULL
;
ctx
->
ctx_fl_is_sampling
=
0
;
}
}
DPRINT
((
"[%d] ctx_state=%d free_possible=%d vaddr=%p addr=%p size=%lu
\n
"
,
DPRINT
((
"ctx_state=%d free_possible=%d addr=%p size=%lu
\n
"
,
current
->
pid
,
state
,
state
,
free_possible
,
free_possible
,
smpl_buf_vaddr
,
smpl_buf_addr
,
smpl_buf_addr
,
smpl_buf_size
));
smpl_buf_size
));
if
(
smpl_buf_addr
)
pfm_exit_smpl_buffer
(
ctx
->
ctx_buf_fmt
);
if
(
smpl_buf_addr
)
pfm_exit_smpl_buffer
(
ctx
->
ctx_buf_fmt
);
/*
/*
* UNLOADED and TERMINATED mean that the session has already been
* UNLOADED that the session has already been unreserved.
* unreserved.
*/
*/
if
(
state
==
PFM_CTX_ZOMBIE
)
{
if
(
state
==
PFM_CTX_ZOMBIE
)
{
pfm_unreserve_session
(
ctx
,
ctx
->
ctx_fl_system
,
ctx
->
ctx_cpu
);
pfm_unreserve_session
(
ctx
,
ctx
->
ctx_fl_system
,
ctx
->
ctx_cpu
);
...
@@ -2047,14 +2104,9 @@ pfm_close(struct inode *inode, struct file *filp)
...
@@ -2047,14 +2104,9 @@ pfm_close(struct inode *inode, struct file *filp)
UNPROTECT_CTX
(
ctx
,
flags
);
UNPROTECT_CTX
(
ctx
,
flags
);
/*
/*
* if there was a mapping, then we systematically remove it
* at this point. Cannot be done inside critical section
* because some VM function reenables interrupts.
*
* All memory free operations (especially for vmalloc'ed memory)
* All memory free operations (especially for vmalloc'ed memory)
* MUST be done with interrupts ENABLED.
* MUST be done with interrupts ENABLED.
*/
*/
if
(
smpl_buf_vaddr
)
pfm_remove_smpl_mapping
(
current
,
smpl_buf_vaddr
,
smpl_buf_size
);
if
(
smpl_buf_addr
)
pfm_rvfree
(
smpl_buf_addr
,
smpl_buf_size
);
if
(
smpl_buf_addr
)
pfm_rvfree
(
smpl_buf_addr
,
smpl_buf_size
);
/*
/*
...
@@ -2072,6 +2124,8 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare)
...
@@ -2072,6 +2124,8 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare)
return
-
ENXIO
;
return
-
ENXIO
;
}
}
static
struct
file_operations
pfm_file_ops
=
{
static
struct
file_operations
pfm_file_ops
=
{
.
llseek
=
pfm_lseek
,
.
llseek
=
pfm_lseek
,
.
read
=
pfm_read
,
.
read
=
pfm_read
,
...
@@ -2080,7 +2134,8 @@ static struct file_operations pfm_file_ops = {
...
@@ -2080,7 +2134,8 @@ static struct file_operations pfm_file_ops = {
.
ioctl
=
pfm_ioctl
,
.
ioctl
=
pfm_ioctl
,
.
open
=
pfm_no_open
,
/* special open code to disallow open via /proc */
.
open
=
pfm_no_open
,
/* special open code to disallow open via /proc */
.
fasync
=
pfm_fasync
,
.
fasync
=
pfm_fasync
,
.
release
=
pfm_close
.
release
=
pfm_close
,
.
flush
=
pfm_flush
};
};
static
int
static
int
...
@@ -2088,6 +2143,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
...
@@ -2088,6 +2143,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
{
{
return
1
;
return
1
;
}
}
static
struct
dentry_operations
pfmfs_dentry_operations
=
{
static
struct
dentry_operations
pfmfs_dentry_operations
=
{
.
d_delete
=
pfmfs_delete_dentry
,
.
d_delete
=
pfmfs_delete_dentry
,
};
};
...
@@ -2172,27 +2228,6 @@ pfm_free_fd(int fd, struct file *file)
...
@@ -2172,27 +2228,6 @@ pfm_free_fd(int fd, struct file *file)
put_unused_fd
(
fd
);
put_unused_fd
(
fd
);
}
}
/*
* This function gets called from mm/mmap.c:exit_mmap() only when there is a sampling buffer
* attached to the context AND the current task has a mapping for it, i.e., it is the original
* creator of the context.
*
* This function is used to remember the fact that the vma describing the sampling buffer
* has now been removed. It can only be called when no other tasks share the same mm context.
*
*/
static
void
pfm_vm_close
(
struct
vm_area_struct
*
vma
)
{
pfm_context_t
*
ctx
=
(
pfm_context_t
*
)
vma
->
vm_private_data
;
unsigned
long
flags
;
PROTECT_CTX
(
ctx
,
flags
);
ctx
->
ctx_smpl_vaddr
=
NULL
;
UNPROTECT_CTX
(
ctx
,
flags
);
DPRINT
((
"[%d] clearing vaddr for ctx %p
\n
"
,
current
->
pid
,
ctx
));
}
static
int
static
int
pfm_remap_buffer
(
struct
vm_area_struct
*
vma
,
unsigned
long
buf
,
unsigned
long
addr
,
unsigned
long
size
)
pfm_remap_buffer
(
struct
vm_area_struct
*
vma
,
unsigned
long
buf
,
unsigned
long
addr
,
unsigned
long
size
)
{
{
...
@@ -2252,7 +2287,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
...
@@ -2252,7 +2287,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
DPRINT
((
"
[%d] smpl_buf @%p
\n
"
,
current
->
pid
,
smpl_buf
));
DPRINT
((
"
smpl_buf @%p
\n
"
,
smpl_buf
));
/* allocate vma */
/* allocate vma */
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
...
@@ -2268,12 +2303,12 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
...
@@ -2268,12 +2303,12 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
* what we want.
* what we want.
*/
*/
vma
->
vm_mm
=
mm
;
vma
->
vm_mm
=
mm
;
vma
->
vm_flags
=
VM_READ
|
VM_MAYREAD
|
VM_RESERVED
|
VM_DONTCOPY
;
vma
->
vm_flags
=
VM_READ
|
VM_MAYREAD
|
VM_RESERVED
;
vma
->
vm_page_prot
=
PAGE_READONLY
;
/* XXX may need to change */
vma
->
vm_page_prot
=
PAGE_READONLY
;
/* XXX may need to change */
vma
->
vm_ops
=
&
pfm_vm_ops
;
vma
->
vm_ops
=
NULL
;
vma
->
vm_pgoff
=
0
;
vma
->
vm_pgoff
=
0
;
vma
->
vm_file
=
NULL
;
vma
->
vm_file
=
NULL
;
vma
->
vm_private_data
=
ctx
;
/* information needed by the pfm_vm_close() function */
vma
->
vm_private_data
=
NULL
;
/*
/*
* Now we have everything we need and we can initialize
* Now we have everything we need and we can initialize
...
@@ -2342,8 +2377,7 @@ static int
...
@@ -2342,8 +2377,7 @@ static int
pfm_bad_permissions
(
struct
task_struct
*
task
)
pfm_bad_permissions
(
struct
task_struct
*
task
)
{
{
/* inspired by ptrace_attach() */
/* inspired by ptrace_attach() */
DPRINT
((
"[%d] cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d
\n
"
,
DPRINT
((
"cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d
\n
"
,
current
->
pid
,
current
->
uid
,
current
->
uid
,
current
->
gid
,
current
->
gid
,
task
->
euid
,
task
->
euid
,
...
@@ -2532,11 +2566,11 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
...
@@ -2532,11 +2566,11 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
* no kernel task or task not owner by caller
* no kernel task or task not owner by caller
*/
*/
if
(
task
->
mm
==
NULL
)
{
if
(
task
->
mm
==
NULL
)
{
DPRINT
((
"
[%d] task [%d] has not memory context (kernel thread)
\n
"
,
current
->
pid
,
task
->
pid
));
DPRINT
((
"
task [%d] has not memory context (kernel thread)
\n
"
,
task
->
pid
));
return
-
EPERM
;
return
-
EPERM
;
}
}
if
(
pfm_bad_permissions
(
task
))
{
if
(
pfm_bad_permissions
(
task
))
{
DPRINT
((
"
[%d] no permission to attach to [%d]
\n
"
,
current
->
pid
,
task
->
pid
));
DPRINT
((
"
no permission to attach to [%d]
\n
"
,
task
->
pid
));
return
-
EPERM
;
return
-
EPERM
;
}
}
/*
/*
...
@@ -2548,7 +2582,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
...
@@ -2548,7 +2582,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
}
}
if
(
task
->
state
==
TASK_ZOMBIE
)
{
if
(
task
->
state
==
TASK_ZOMBIE
)
{
DPRINT
((
"
[%d] cannot attach to zombie task [%d]
\n
"
,
current
->
pid
,
task
->
pid
));
DPRINT
((
"
cannot attach to zombie task [%d]
\n
"
,
task
->
pid
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
...
@@ -2558,7 +2592,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
...
@@ -2558,7 +2592,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
if
(
task
==
current
)
return
0
;
if
(
task
==
current
)
return
0
;
if
(
task
->
state
!=
TASK_STOPPED
)
{
if
(
task
->
state
!=
TASK_STOPPED
)
{
DPRINT
((
"
[%d] cannot attach to non-stopped task [%d] state=%ld
\n
"
,
current
->
pid
,
task
->
pid
,
task
->
state
));
DPRINT
((
"
cannot attach to non-stopped task [%d] state=%ld
\n
"
,
task
->
pid
,
task
->
state
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
/*
/*
...
@@ -2835,7 +2869,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -2835,7 +2869,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
task
=
ctx
->
ctx_task
;
task
=
ctx
->
ctx_task
;
impl_pmds
=
pmu_conf
.
impl_pmds
[
0
];
impl_pmds
=
pmu_conf
.
impl_pmds
[
0
];
if
(
state
==
PFM_CTX_
TERMINATED
||
state
==
PFM_CTX_
ZOMBIE
)
return
-
EINVAL
;
if
(
state
==
PFM_CTX_ZOMBIE
)
return
-
EINVAL
;
if
(
is_loaded
)
{
if
(
is_loaded
)
{
thread
=
&
task
->
thread
;
thread
=
&
task
->
thread
;
...
@@ -2845,7 +2879,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -2845,7 +2879,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
can_access_pmu
=
GET_PMU_OWNER
()
==
task
||
is_system
?
1
:
0
;
can_access_pmu
=
GET_PMU_OWNER
()
==
task
||
is_system
?
1
:
0
;
...
@@ -2928,7 +2962,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -2928,7 +2962,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
/*
* execute write checker, if any
* execute write checker, if any
*/
*/
if
(
PMC_WR_FUNC
(
cnum
))
{
if
(
pfm_sysctl
.
expert_mode
==
0
&&
PMC_WR_FUNC
(
cnum
))
{
ret
=
PMC_WR_FUNC
(
cnum
)(
task
,
ctx
,
cnum
,
&
value
,
regs
);
ret
=
PMC_WR_FUNC
(
cnum
)(
task
,
ctx
,
cnum
,
&
value
,
regs
);
if
(
ret
)
goto
error
;
if
(
ret
)
goto
error
;
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
...
@@ -3072,7 +3106,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3072,7 +3106,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
ovfl_mask
=
pmu_conf
.
ovfl_val
;
ovfl_mask
=
pmu_conf
.
ovfl_val
;
task
=
ctx
->
ctx_task
;
task
=
ctx
->
ctx_task
;
if
(
unlikely
(
state
==
PFM_CTX_
TERMINATED
||
state
==
PFM_CTX_
ZOMBIE
))
return
-
EINVAL
;
if
(
unlikely
(
state
==
PFM_CTX_ZOMBIE
))
return
-
EINVAL
;
/*
/*
* on both UP and SMP, we can only write to the PMC when the task is
* on both UP and SMP, we can only write to the PMC when the task is
...
@@ -3086,7 +3120,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3086,7 +3120,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
unlikely
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
()))
{
if
(
unlikely
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
()))
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
can_access_pmu
=
GET_PMU_OWNER
()
==
task
||
is_system
?
1
:
0
;
can_access_pmu
=
GET_PMU_OWNER
()
==
task
||
is_system
?
1
:
0
;
...
@@ -3106,7 +3140,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3106,7 +3140,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
/*
* execute write checker, if any
* execute write checker, if any
*/
*/
if
(
PMD_WR_FUNC
(
cnum
))
{
if
(
pfm_sysctl
.
expert_mode
==
0
&&
PMD_WR_FUNC
(
cnum
))
{
unsigned
long
v
=
value
;
unsigned
long
v
=
value
;
ret
=
PMD_WR_FUNC
(
cnum
)(
task
,
ctx
,
cnum
,
&
v
,
regs
);
ret
=
PMD_WR_FUNC
(
cnum
)(
task
,
ctx
,
cnum
,
&
v
,
regs
);
...
@@ -3279,7 +3313,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3279,7 +3313,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
unlikely
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
()))
{
if
(
unlikely
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
()))
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
/*
/*
...
@@ -3347,7 +3381,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3347,7 +3381,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
/*
* execute read checker, if any
* execute read checker, if any
*/
*/
if
(
unlikely
(
PMD_RD_FUNC
(
cnum
)))
{
if
(
unlikely
(
pfm_sysctl
.
expert_mode
==
0
&&
PMD_RD_FUNC
(
cnum
)))
{
unsigned
long
v
=
val
;
unsigned
long
v
=
val
;
ret
=
PMD_RD_FUNC
(
cnum
)(
ctx
->
ctx_task
,
ctx
,
cnum
,
&
v
,
regs
);
ret
=
PMD_RD_FUNC
(
cnum
)(
ctx
->
ctx_task
,
ctx
,
cnum
,
&
v
,
regs
);
if
(
ret
)
goto
error
;
if
(
ret
)
goto
error
;
...
@@ -3376,14 +3410,14 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3376,14 +3410,14 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return
ret
;
return
ret
;
}
}
long
int
pfm_mod_write_pmcs
(
struct
task_struct
*
task
,
pfarg_reg_t
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
)
pfm_mod_write_pmcs
(
struct
task_struct
*
task
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
)
{
{
pfm_context_t
*
ctx
;
pfm_context_t
*
ctx
;
if
(
task
==
NULL
||
req
==
NULL
)
return
-
EINVAL
;
if
(
req
==
NULL
)
return
-
EINVAL
;
ctx
=
task
->
thread
.
pfm_context
;
ctx
=
GET_PMU_CTX
()
;
if
(
ctx
==
NULL
)
return
-
EINVAL
;
if
(
ctx
==
NULL
)
return
-
EINVAL
;
...
@@ -3391,20 +3425,19 @@ pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq
...
@@ -3391,20 +3425,19 @@ pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq
* for now limit to current task, which is enough when calling
* for now limit to current task, which is enough when calling
* from overflow handler
* from overflow handler
*/
*/
if
(
task
!=
current
)
return
-
EBUSY
;
if
(
task
!=
current
&&
ctx
->
ctx_fl_system
==
0
)
return
-
EBUSY
;
return
pfm_write_pmcs
(
ctx
,
req
,
nreq
,
regs
);
return
pfm_write_pmcs
(
ctx
,
req
,
nreq
,
regs
);
}
}
EXPORT_SYMBOL
(
pfm_mod_write_pmcs
);
EXPORT_SYMBOL
(
pfm_mod_write_pmcs
);
long
int
pfm_mod_read_pmds
(
struct
task_struct
*
task
,
pfarg_reg_t
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
)
pfm_mod_read_pmds
(
struct
task_struct
*
task
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
)
{
{
pfm_context_t
*
ctx
;
pfm_context_t
*
ctx
;
if
(
task
==
NULL
||
req
==
NULL
)
return
-
EINVAL
;
if
(
req
==
NULL
)
return
-
EINVAL
;
//ctx = task->thread.pfm_context;
ctx
=
GET_PMU_CTX
();
ctx
=
GET_PMU_CTX
();
if
(
ctx
==
NULL
)
return
-
EINVAL
;
if
(
ctx
==
NULL
)
return
-
EINVAL
;
...
@@ -3419,48 +3452,6 @@ pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq,
...
@@ -3419,48 +3452,6 @@ pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq,
}
}
EXPORT_SYMBOL
(
pfm_mod_read_pmds
);
EXPORT_SYMBOL
(
pfm_mod_read_pmds
);
long
pfm_mod_fast_read_pmds
(
struct
task_struct
*
task
,
unsigned
long
mask
[
4
],
unsigned
long
*
addr
,
struct
pt_regs
*
regs
)
{
pfm_context_t
*
ctx
;
unsigned
long
m
,
val
;
unsigned
int
j
;
if
(
task
==
NULL
||
addr
==
NULL
)
return
-
EINVAL
;
//ctx = task->thread.pfm_context;
ctx
=
GET_PMU_CTX
();
if
(
ctx
==
NULL
)
return
-
EINVAL
;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if
(
task
!=
current
&&
ctx
->
ctx_fl_system
==
0
)
return
-
EBUSY
;
m
=
mask
[
0
];
for
(
j
=
0
;
m
;
m
>>=
1
,
j
++
)
{
if
((
m
&
0x1
)
==
0
)
continue
;
if
(
!
(
PMD_IS_IMPL
(
j
)
&&
CTX_IS_USED_PMD
(
ctx
,
j
))
)
return
-
EINVAL
;
if
(
PMD_IS_COUNTING
(
j
))
{
val
=
pfm_read_soft_counter
(
ctx
,
j
);
}
else
{
val
=
ia64_get_pmd
(
j
);
}
*
addr
++
=
val
;
/* XXX: should call read checker routine? */
DPRINT
((
"single_read_pmd[%u]=0x%lx
\n
"
,
j
,
val
));
}
return
0
;
}
EXPORT_SYMBOL
(
pfm_mod_fast_read_pmds
);
/*
/*
* Only call this function when a process it trying to
* Only call this function when a process it trying to
* write the debug registers (reading is always allowed)
* write the debug registers (reading is always allowed)
...
@@ -3565,9 +3556,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3565,9 +3556,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
case
PFM_CTX_ZOMBIE
:
case
PFM_CTX_ZOMBIE
:
DPRINT
((
"invalid state=%d
\n
"
,
state
));
DPRINT
((
"invalid state=%d
\n
"
,
state
));
return
-
EBUSY
;
return
-
EBUSY
;
case
PFM_CTX_TERMINATED
:
DPRINT
((
"context is terminated, nothing to do
\n
"
));
return
0
;
default:
default:
DPRINT
((
"state=%d, cannot operate (no active_restart handler)
\n
"
,
state
));
DPRINT
((
"state=%d, cannot operate (no active_restart handler)
\n
"
,
state
));
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -3579,7 +3567,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3579,7 +3567,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
...
@@ -3739,7 +3727,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
...
@@ -3739,7 +3727,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
is_system
=
ctx
->
ctx_fl_system
;
is_system
=
ctx
->
ctx_fl_system
;
task
=
ctx
->
ctx_task
;
task
=
ctx
->
ctx_task
;
if
(
state
==
PFM_CTX_
TERMINATED
||
state
==
PFM_CTX_
ZOMBIE
)
return
-
EINVAL
;
if
(
state
==
PFM_CTX_ZOMBIE
)
return
-
EINVAL
;
/*
/*
* on both UP and SMP, we can only write to the PMC when the task is
* on both UP and SMP, we can only write to the PMC when the task is
...
@@ -3753,7 +3741,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
...
@@ -3753,7 +3741,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
unlikely
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
()))
{
if
(
unlikely
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
()))
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
can_access_pmu
=
GET_PMU_OWNER
()
==
task
||
is_system
?
1
:
0
;
can_access_pmu
=
GET_PMU_OWNER
()
==
task
||
is_system
?
1
:
0
;
...
@@ -3920,6 +3908,49 @@ pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3920,6 +3908,49 @@ pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return
pfm_write_ibr_dbr
(
PFM_DATA_RR
,
ctx
,
arg
,
count
,
regs
);
return
pfm_write_ibr_dbr
(
PFM_DATA_RR
,
ctx
,
arg
,
count
,
regs
);
}
}
int
pfm_mod_write_ibrs
(
struct
task_struct
*
task
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
)
{
pfm_context_t
*
ctx
;
if
(
req
==
NULL
)
return
-
EINVAL
;
ctx
=
GET_PMU_CTX
();
if
(
ctx
==
NULL
)
return
-
EINVAL
;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if
(
task
!=
current
&&
ctx
->
ctx_fl_system
==
0
)
return
-
EBUSY
;
return
pfm_write_ibrs
(
ctx
,
req
,
nreq
,
regs
);
}
EXPORT_SYMBOL
(
pfm_mod_write_ibrs
);
int
pfm_mod_write_dbrs
(
struct
task_struct
*
task
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
)
{
pfm_context_t
*
ctx
;
if
(
req
==
NULL
)
return
-
EINVAL
;
ctx
=
GET_PMU_CTX
();
if
(
ctx
==
NULL
)
return
-
EINVAL
;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if
(
task
!=
current
&&
ctx
->
ctx_fl_system
==
0
)
return
-
EBUSY
;
return
pfm_write_dbrs
(
ctx
,
req
,
nreq
,
regs
);
}
EXPORT_SYMBOL
(
pfm_mod_write_dbrs
);
static
int
static
int
pfm_get_features
(
pfm_context_t
*
ctx
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
)
pfm_get_features
(
pfm_context_t
*
ctx
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
)
{
{
...
@@ -3947,11 +3978,10 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -3947,11 +3978,10 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
DPRINT
((
"current [%d] task [%d] ctx_state=%d is_system=%d
\n
"
,
DPRINT
((
"task [%d] ctx_state=%d is_system=%d
\n
"
,
current
->
pid
,
PFM_CTX_TASK
(
ctx
)
->
pid
,
PFM_CTX_TASK
(
ctx
)
->
pid
,
state
,
state
,
is_system
));
is_system
));
...
@@ -4010,7 +4040,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -4010,7 +4040,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* monitoring disabled in kernel at next reschedule
* monitoring disabled in kernel at next reschedule
*/
*/
ctx
->
ctx_saved_psr_up
=
0
;
ctx
->
ctx_saved_psr_up
=
0
;
DPRINT
((
"
pfm_stop: current [%d] task=[%d]
\n
"
,
current
->
pid
,
task
->
pid
));
DPRINT
((
"
task=[%d]
\n
"
,
task
->
pid
));
}
}
return
0
;
return
0
;
}
}
...
@@ -4033,7 +4063,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -4033,7 +4063,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se.
* It does not have to be the owner (ctx_task) of the context per se.
*/
*/
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
if
(
is_system
&&
ctx
->
ctx_cpu
!=
smp_processor_id
())
{
DPRINT
((
"
[%d] should be running on CPU%d
\n
"
,
current
->
pid
,
ctx
->
ctx_cpu
));
DPRINT
((
"
should be running on CPU%d
\n
"
,
ctx
->
ctx_cpu
));
return
-
EBUSY
;
return
-
EBUSY
;
}
}
...
@@ -4167,9 +4197,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -4167,9 +4197,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
/*
* can only load from unloaded or terminated state
* can only load from unloaded or terminated state
*/
*/
if
(
state
!=
PFM_CTX_UNLOADED
&&
state
!=
PFM_CTX_TERMINATED
)
{
if
(
state
!=
PFM_CTX_UNLOADED
)
{
DPRINT
((
"[%d] cannot load to [%d], invalid ctx_state=%d
\n
"
,
DPRINT
((
"cannot load to [%d], invalid ctx_state=%d
\n
"
,
current
->
pid
,
req
->
load_pid
,
req
->
load_pid
,
ctx
->
ctx_state
));
ctx
->
ctx_state
));
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -4178,7 +4207,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -4178,7 +4207,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT
((
"load_pid [%d] using_dbreg=%d
\n
"
,
req
->
load_pid
,
ctx
->
ctx_fl_using_dbreg
));
DPRINT
((
"load_pid [%d] using_dbreg=%d
\n
"
,
req
->
load_pid
,
ctx
->
ctx_fl_using_dbreg
));
if
(
CTX_OVFL_NOBLOCK
(
ctx
)
==
0
&&
req
->
load_pid
==
current
->
pid
)
{
if
(
CTX_OVFL_NOBLOCK
(
ctx
)
==
0
&&
req
->
load_pid
==
current
->
pid
)
{
DPRINT
((
"cannot use blocking mode on self
for [%d]
\n
"
,
current
->
pid
));
DPRINT
((
"cannot use blocking mode on self
\n
"
));
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -4194,8 +4223,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -4194,8 +4223,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* system wide is self monitoring only
* system wide is self monitoring only
*/
*/
if
(
is_system
&&
task
!=
current
)
{
if
(
is_system
&&
task
!=
current
)
{
DPRINT
((
"system wide is self monitoring only current=%d load_pid=%d
\n
"
,
DPRINT
((
"system wide is self monitoring only load_pid=%d
\n
"
,
current
->
pid
,
req
->
load_pid
));
req
->
load_pid
));
goto
error
;
goto
error
;
}
}
...
@@ -4264,8 +4292,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
...
@@ -4264,8 +4292,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*
*
* XXX: needs to be atomic
* XXX: needs to be atomic
*/
*/
DPRINT
((
"[%d] before cmpxchg() old_ctx=%p new_ctx=%p
\n
"
,
DPRINT
((
"before cmpxchg() old_ctx=%p new_ctx=%p
\n
"
,
current
->
pid
,
thread
->
pfm_context
,
ctx
));
thread
->
pfm_context
,
ctx
));
old
=
ia64_cmpxchg
(
acq
,
&
thread
->
pfm_context
,
NULL
,
ctx
,
sizeof
(
pfm_context_t
*
));
old
=
ia64_cmpxchg
(
acq
,
&
thread
->
pfm_context
,
NULL
,
ctx
,
sizeof
(
pfm_context_t
*
));
...
@@ -4409,19 +4436,19 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
...
@@ -4409,19 +4436,19 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
{
{
struct
task_struct
*
task
=
PFM_CTX_TASK
(
ctx
);
struct
task_struct
*
task
=
PFM_CTX_TASK
(
ctx
);
struct
pt_regs
*
tregs
;
struct
pt_regs
*
tregs
;
int
state
,
is_system
;
int
prev_
state
,
is_system
;
int
ret
;
int
ret
;
DPRINT
((
"ctx_state=%d task [%d]
\n
"
,
ctx
->
ctx_state
,
task
?
task
->
pid
:
-
1
));
DPRINT
((
"ctx_state=%d task [%d]
\n
"
,
ctx
->
ctx_state
,
task
?
task
->
pid
:
-
1
));
state
=
ctx
->
ctx_state
;
prev_state
=
ctx
->
ctx_state
;
is_system
=
ctx
->
ctx_fl_system
;
is_system
=
ctx
->
ctx_fl_system
;
/*
/*
* unload only when necessary
* unload only when necessary
*/
*/
if
(
state
==
PFM_CTX_TERMINATED
||
state
==
PFM_CTX_UNLOADED
)
{
if
(
prev_
state
==
PFM_CTX_UNLOADED
)
{
DPRINT
((
"
[%d] ctx_state=%d, nothing to do
\n
"
,
current
->
pid
,
ctx
->
ctx
_state
));
DPRINT
((
"
ctx_state=%d, nothing to do
\n
"
,
prev
_state
));
return
0
;
return
0
;
}
}
...
@@ -4431,7 +4458,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
...
@@ -4431,7 +4458,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
ret
=
pfm_stop
(
ctx
,
NULL
,
0
,
regs
);
ret
=
pfm_stop
(
ctx
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
if
(
ret
)
return
ret
;
ctx
->
ctx_state
=
state
=
PFM_CTX_UNLOADED
;
ctx
->
ctx_state
=
PFM_CTX_UNLOADED
;
/*
/*
* in system mode, we need to update the PMU directly
* in system mode, we need to update the PMU directly
...
@@ -4458,6 +4485,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
...
@@ -4458,6 +4485,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
* at this point we are done with the PMU
* at this point we are done with the PMU
* so we can unreserve the resource.
* so we can unreserve the resource.
*/
*/
if
(
prev_state
!=
PFM_CTX_ZOMBIE
)
pfm_unreserve_session
(
ctx
,
1
,
ctx
->
ctx_cpu
);
pfm_unreserve_session
(
ctx
,
1
,
ctx
->
ctx_cpu
);
/*
/*
...
@@ -4497,7 +4525,10 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
...
@@ -4497,7 +4525,10 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/*
/*
* at this point we are done with the PMU
* at this point we are done with the PMU
* so we can unreserve the resource.
* so we can unreserve the resource.
*
* when state was ZOMBIE, we have already unreserved.
*/
*/
if
(
prev_state
!=
PFM_CTX_ZOMBIE
)
pfm_unreserve_session
(
ctx
,
0
,
ctx
->
ctx_cpu
);
pfm_unreserve_session
(
ctx
,
0
,
ctx
->
ctx_cpu
);
/*
/*
...
@@ -4549,12 +4580,14 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
...
@@ -4549,12 +4580,14 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
task
->
thread
.
pfm_context
=
NULL
;
task
->
thread
.
pfm_context
=
NULL
;
task
->
thread
.
flags
&=
~
IA64_THREAD_PM_VALID
;
task
->
thread
.
flags
&=
~
IA64_THREAD_PM_VALID
;
DPRINT
((
"
context <%d> force cleanup for [%d] by [%d]
\n
"
,
ctx
->
ctx_fd
,
task
->
pid
,
current
->
pid
));
DPRINT
((
"
force cleanupf for [%d]
\n
"
,
task
->
pid
));
}
}
/*
/*
* called only from exit_thread(): task == current
* called only from exit_thread(): task == current
* we come here only if current has a context attached (loaded or masked)
*/
*/
void
void
pfm_exit_thread
(
struct
task_struct
*
task
)
pfm_exit_thread
(
struct
task_struct
*
task
)
...
@@ -4575,7 +4608,8 @@ pfm_exit_thread(struct task_struct *task)
...
@@ -4575,7 +4608,8 @@ pfm_exit_thread(struct task_struct *task)
switch
(
state
)
{
switch
(
state
)
{
case
PFM_CTX_UNLOADED
:
case
PFM_CTX_UNLOADED
:
/*
/*
* come here only if attached
* only comes to thios function if pfm_context is not NULL, i.e., cannot
* be in unloaded state
*/
*/
printk
(
KERN_ERR
"perfmon: pfm_exit_thread [%d] ctx unloaded
\n
"
,
task
->
pid
);
printk
(
KERN_ERR
"perfmon: pfm_exit_thread [%d] ctx unloaded
\n
"
,
task
->
pid
);
break
;
break
;
...
@@ -4583,20 +4617,17 @@ pfm_exit_thread(struct task_struct *task)
...
@@ -4583,20 +4617,17 @@ pfm_exit_thread(struct task_struct *task)
case
PFM_CTX_MASKED
:
case
PFM_CTX_MASKED
:
ret
=
pfm_context_unload
(
ctx
,
NULL
,
0
,
regs
);
ret
=
pfm_context_unload
(
ctx
,
NULL
,
0
,
regs
);
if
(
ret
)
{
if
(
ret
)
{
printk
(
KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d
\n
"
,
task
->
pid
,
ctx
->
ctx_
state
,
ret
);
printk
(
KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d
\n
"
,
task
->
pid
,
state
,
ret
);
}
}
ctx
->
ctx_state
=
PFM_CTX_TERMINATED
;
DPRINT
((
"ctx unloaded for current state was %d
\n
"
,
state
));
DPRINT
((
"ctx terminated by [%d]
\n
"
,
task
->
pid
));
pfm_end_notify_user
(
ctx
);
pfm_end_notify_user
(
ctx
);
break
;
break
;
case
PFM_CTX_ZOMBIE
:
case
PFM_CTX_ZOMBIE
:
pfm_clear_psr_up
();
ret
=
pfm_context_unload
(
ctx
,
NULL
,
0
,
regs
);
if
(
ret
)
{
BUG_ON
(
ctx
->
ctx_smpl_hdr
);
printk
(
KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d
\n
"
,
task
->
pid
,
state
,
ret
);
}
pfm_force_cleanup
(
ctx
,
regs
);
free_ok
=
1
;
free_ok
=
1
;
break
;
break
;
default:
default:
...
@@ -4696,7 +4727,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
...
@@ -4696,7 +4727,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
if
(
task
==
current
||
ctx
->
ctx_fl_system
)
return
0
;
if
(
task
==
current
||
ctx
->
ctx_fl_system
)
return
0
;
/*
/*
* context is UNLOADED, MASKED
, TERMINATED
we are safe to go
* context is UNLOADED, MASKED we are safe to go
*/
*/
if
(
state
!=
PFM_CTX_LOADED
)
return
0
;
if
(
state
!=
PFM_CTX_LOADED
)
return
0
;
...
@@ -4749,7 +4780,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
...
@@ -4749,7 +4780,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
if
(
unlikely
(
PFM_IS_DISABLED
()))
return
-
ENOSYS
;
if
(
unlikely
(
PFM_IS_DISABLED
()))
return
-
ENOSYS
;
if
(
unlikely
(
cmd
<
0
||
cmd
>=
PFM_CMD_COUNT
))
{
if
(
unlikely
(
cmd
<
0
||
cmd
>=
PFM_CMD_COUNT
))
{
DPRINT
((
"
[%d] invalid cmd=%d
\n
"
,
current
->
pid
,
cmd
));
DPRINT
((
"
invalid cmd=%d
\n
"
,
cmd
));
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -4760,7 +4791,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
...
@@ -4760,7 +4791,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
cmd_flags
=
pfm_cmd_tab
[
cmd
].
cmd_flags
;
cmd_flags
=
pfm_cmd_tab
[
cmd
].
cmd_flags
;
if
(
unlikely
(
func
==
NULL
))
{
if
(
unlikely
(
func
==
NULL
))
{
DPRINT
((
"
[%d] invalid cmd=%d
\n
"
,
current
->
pid
,
cmd
));
DPRINT
((
"
invalid cmd=%d
\n
"
,
cmd
));
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -4803,7 +4834,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
...
@@ -4803,7 +4834,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
* assume sz = 0 for command without parameters
* assume sz = 0 for command without parameters
*/
*/
if
(
sz
&&
copy_from_user
(
args_k
,
arg
,
sz
))
{
if
(
sz
&&
copy_from_user
(
args_k
,
arg
,
sz
))
{
DPRINT
((
"
[%d] cannot copy_from_user %lu bytes @%p
\n
"
,
current
->
pid
,
sz
,
arg
));
DPRINT
((
"
cannot copy_from_user %lu bytes @%p
\n
"
,
sz
,
arg
));
goto
error_args
;
goto
error_args
;
}
}
...
@@ -4819,7 +4850,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
...
@@ -4819,7 +4850,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
completed_args
=
1
;
completed_args
=
1
;
DPRINT
((
"
[%d] restart_args sz=%lu xtra_sz=%lu
\n
"
,
current
->
pid
,
sz
,
xtra_sz
));
DPRINT
((
"
restart_args sz=%lu xtra_sz=%lu
\n
"
,
sz
,
xtra_sz
));
/* retry if necessary */
/* retry if necessary */
if
(
likely
(
xtra_sz
))
goto
restart_args
;
if
(
likely
(
xtra_sz
))
goto
restart_args
;
...
@@ -4831,17 +4862,17 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
...
@@ -4831,17 +4862,17 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
file
=
fget
(
fd
);
file
=
fget
(
fd
);
if
(
unlikely
(
file
==
NULL
))
{
if
(
unlikely
(
file
==
NULL
))
{
DPRINT
((
"
[%d] invalid fd %d
\n
"
,
current
->
pid
,
fd
));
DPRINT
((
"
invalid fd %d
\n
"
,
fd
));
goto
error_args
;
goto
error_args
;
}
}
if
(
unlikely
(
PFM_IS_FILE
(
file
)
==
0
))
{
if
(
unlikely
(
PFM_IS_FILE
(
file
)
==
0
))
{
DPRINT
((
"
[%d] fd %d not related to perfmon
\n
"
,
current
->
pid
,
fd
));
DPRINT
((
"
fd %d not related to perfmon
\n
"
,
fd
));
goto
error_args
;
goto
error_args
;
}
}
ctx
=
(
pfm_context_t
*
)
file
->
private_data
;
ctx
=
(
pfm_context_t
*
)
file
->
private_data
;
if
(
unlikely
(
ctx
==
NULL
))
{
if
(
unlikely
(
ctx
==
NULL
))
{
DPRINT
((
"
[%d] no context for fd %d
\n
"
,
current
->
pid
,
fd
));
DPRINT
((
"
no context for fd %d
\n
"
,
fd
));
goto
error_args
;
goto
error_args
;
}
}
prefetch
(
&
ctx
->
ctx_state
);
prefetch
(
&
ctx
->
ctx_state
);
...
@@ -4861,7 +4892,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
...
@@ -4861,7 +4892,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
abort_locked:
abort_locked:
if
(
likely
(
ctx
))
{
if
(
likely
(
ctx
))
{
DPRINT
((
"
[%d] context unlocked
\n
"
,
current
->
pid
));
DPRINT
((
"
context unlocked
\n
"
));
UNPROTECT_CTX
(
ctx
,
flags
);
UNPROTECT_CTX
(
ctx
,
flags
);
fput
(
file
);
fput
(
file
);
}
}
...
@@ -4945,12 +4976,7 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
...
@@ -4945,12 +4976,7 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
current
->
thread
.
flags
&=
~
IA64_THREAD_PM_VALID
;
current
->
thread
.
flags
&=
~
IA64_THREAD_PM_VALID
;
ctx
->
ctx_task
=
NULL
;
ctx
->
ctx_task
=
NULL
;
/*
DPRINT
((
"context terminated
\n
"
));
* switch to terminated state
*/
ctx
->
ctx_state
=
PFM_CTX_TERMINATED
;
DPRINT
((
"context <%d> terminated for [%d]
\n
"
,
ctx
->
ctx_fd
,
current
->
pid
));
/*
/*
* and wakeup controlling task, indicating we are now disconnected
* and wakeup controlling task, indicating we are now disconnected
...
@@ -4995,15 +5021,15 @@ pfm_handle_work(void)
...
@@ -4995,15 +5021,15 @@ pfm_handle_work(void)
*/
*/
reason
=
ctx
->
ctx_fl_trap_reason
;
reason
=
ctx
->
ctx_fl_trap_reason
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_NONE
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_NONE
;
ovfl_regs
=
ctx
->
ctx_ovfl_regs
[
0
];
DPRINT
((
"
[%d] reason=%d state=%d
\n
"
,
current
->
pid
,
reason
,
ctx
->
ctx_state
));
DPRINT
((
"
reason=%d state=%d
\n
"
,
reason
,
ctx
->
ctx_state
));
/*
/*
* must be done before we check
non-blocking
mode
* must be done before we check
for simple-reset
mode
*/
*/
if
(
ctx
->
ctx_fl_going_zombie
||
ctx
->
ctx_state
==
PFM_CTX_ZOMBIE
)
goto
do_zombie
;
if
(
ctx
->
ctx_fl_going_zombie
||
ctx
->
ctx_state
==
PFM_CTX_ZOMBIE
)
goto
do_zombie
;
ovfl_regs
=
ctx
->
ctx_ovfl_regs
[
0
];
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if
(
reason
==
PFM_TRAP_REASON_RESET
)
goto
skip_blocking
;
if
(
reason
==
PFM_TRAP_REASON_RESET
)
goto
skip_blocking
;
...
@@ -5022,6 +5048,14 @@ pfm_handle_work(void)
...
@@ -5022,6 +5048,14 @@ pfm_handle_work(void)
PROTECT_CTX
(
ctx
,
flags
);
PROTECT_CTX
(
ctx
,
flags
);
/*
* we need to read the ovfl_regs only after wake-up
* because we may have had pfm_write_pmds() in between
* and that can changed PMD values and therefore
* ovfl_regs is reset for these new PMD values.
*/
ovfl_regs
=
ctx
->
ctx_ovfl_regs
[
0
];
if
(
ctx
->
ctx_fl_going_zombie
)
{
if
(
ctx
->
ctx_fl_going_zombie
)
{
do_zombie:
do_zombie:
DPRINT
((
"context is zombie, bailing out
\n
"
));
DPRINT
((
"context is zombie, bailing out
\n
"
));
...
@@ -5050,7 +5084,7 @@ pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
...
@@ -5050,7 +5084,7 @@ pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
return
0
;
return
0
;
}
}
DPRINT
((
"
[%d] waking up somebody
\n
"
,
current
->
pid
));
DPRINT
((
"
waking up somebody
\n
"
));
if
(
msg
)
wake_up_interruptible
(
&
ctx
->
ctx_msgq_wait
);
if
(
msg
)
wake_up_interruptible
(
&
ctx
->
ctx_msgq_wait
);
...
@@ -5085,11 +5119,10 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
...
@@ -5085,11 +5119,10 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
msg
->
pfm_ovfl_msg
.
msg_tstamp
=
0UL
;
msg
->
pfm_ovfl_msg
.
msg_tstamp
=
0UL
;
}
}
DPRINT
((
"ovfl msg: msg=%p no_msg=%d fd=%d
pid=%d
ovfl_pmds=0x%lx
\n
"
,
DPRINT
((
"ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx
\n
"
,
msg
,
msg
,
ctx
->
ctx_fl_no_msg
,
ctx
->
ctx_fl_no_msg
,
ctx
->
ctx_fd
,
ctx
->
ctx_fd
,
current
->
pid
,
ovfl_pmds
));
ovfl_pmds
));
return
pfm_notify_user
(
ctx
,
msg
);
return
pfm_notify_user
(
ctx
,
msg
);
...
@@ -5112,10 +5145,10 @@ pfm_end_notify_user(pfm_context_t *ctx)
...
@@ -5112,10 +5145,10 @@ pfm_end_notify_user(pfm_context_t *ctx)
msg
->
pfm_end_msg
.
msg_ctx_fd
=
ctx
->
ctx_fd
;
msg
->
pfm_end_msg
.
msg_ctx_fd
=
ctx
->
ctx_fd
;
msg
->
pfm_ovfl_msg
.
msg_tstamp
=
0UL
;
msg
->
pfm_ovfl_msg
.
msg_tstamp
=
0UL
;
DPRINT
((
"end msg: msg=%p no_msg=%d ctx_fd=%d
pid=%d
\n
"
,
DPRINT
((
"end msg: msg=%p no_msg=%d ctx_fd=%d
\n
"
,
msg
,
msg
,
ctx
->
ctx_fl_no_msg
,
ctx
->
ctx_fl_no_msg
,
ctx
->
ctx_fd
,
current
->
pid
));
ctx
->
ctx_fd
));
return
pfm_notify_user
(
ctx
,
msg
);
return
pfm_notify_user
(
ctx
,
msg
);
}
}
...
@@ -5275,8 +5308,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
...
@@ -5275,8 +5308,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* when the module cannot handle the rest of the overflows, we abort right here
* when the module cannot handle the rest of the overflows, we abort right here
*/
*/
if
(
ret
&&
pmd_mask
)
{
if
(
ret
&&
pmd_mask
)
{
DPRINT
((
"current [%d] handler aborts leftover ovfl_pmds=0x%lx
\n
"
,
DPRINT
((
"handler aborts leftover ovfl_pmds=0x%lx
\n
"
,
current
->
pid
,
pmd_mask
<<
PMU_FIRST_COUNTER
));
pmd_mask
<<
PMU_FIRST_COUNTER
));
}
}
/*
/*
...
@@ -5298,8 +5330,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
...
@@ -5298,8 +5330,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
if
(
ovfl_notify
==
0
)
reset_pmds
=
ovfl_pmds
;
if
(
ovfl_notify
==
0
)
reset_pmds
=
ovfl_pmds
;
}
}
DPRINT
((
"current [%d] ovfl_pmds=0x%lx reset_pmds=0x%lx
\n
"
,
DPRINT
((
"ovfl_pmds=0x%lx reset_pmds=0x%lx
\n
"
,
current
->
pid
,
ovfl_pmds
,
ovfl_pmds
,
reset_pmds
));
reset_pmds
));
/*
/*
...
@@ -5341,8 +5372,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
...
@@ -5341,8 +5372,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
must_notify
=
1
;
must_notify
=
1
;
}
}
DPRINT_ovfl
((
"current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d
\n
"
,
DPRINT_ovfl
((
"owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d
\n
"
,
current
->
pid
,
GET_PMU_OWNER
()
?
GET_PMU_OWNER
()
->
pid
:
-
1
,
GET_PMU_OWNER
()
?
GET_PMU_OWNER
()
->
pid
:
-
1
,
PFM_GET_WORK_PENDING
(
task
),
PFM_GET_WORK_PENDING
(
task
),
ctx
->
ctx_fl_trap_reason
,
ctx
->
ctx_fl_trap_reason
,
...
@@ -5521,6 +5551,7 @@ pfm_proc_info(char *page)
...
@@ -5521,6 +5551,7 @@ pfm_proc_info(char *page)
p
+=
sprintf
(
p
,
"perfmon version : %u.%u
\n
"
,
PFM_VERSION_MAJ
,
PFM_VERSION_MIN
);
p
+=
sprintf
(
p
,
"perfmon version : %u.%u
\n
"
,
PFM_VERSION_MAJ
,
PFM_VERSION_MIN
);
p
+=
sprintf
(
p
,
"model : %s
\n
"
,
pmu_conf
.
pmu_name
);
p
+=
sprintf
(
p
,
"model : %s
\n
"
,
pmu_conf
.
pmu_name
);
p
+=
sprintf
(
p
,
"fastctxsw : %s
\n
"
,
pfm_sysctl
.
fastctxsw
>
0
?
"Yes"
:
"No"
);
p
+=
sprintf
(
p
,
"fastctxsw : %s
\n
"
,
pfm_sysctl
.
fastctxsw
>
0
?
"Yes"
:
"No"
);
p
+=
sprintf
(
p
,
"expert mode : %s
\n
"
,
pfm_sysctl
.
expert_mode
>
0
?
"Yes"
:
"No"
);
p
+=
sprintf
(
p
,
"ovfl_mask : 0x%lx
\n
"
,
pmu_conf
.
ovfl_val
);
p
+=
sprintf
(
p
,
"ovfl_mask : 0x%lx
\n
"
,
pmu_conf
.
ovfl_val
);
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
...
@@ -6490,7 +6521,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
...
@@ -6490,7 +6521,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
{
{
struct
thread_struct
*
thread
;
struct
thread_struct
*
thread
;
DPRINT
((
"perfmon: pfm_inherit clearing state for [%d]
current [%d]
\n
"
,
task
->
pid
,
current
->
pid
));
DPRINT
((
"perfmon: pfm_inherit clearing state for [%d]
\n
"
,
task
->
pid
));
thread
=
&
task
->
thread
;
thread
=
&
task
->
thread
;
...
...
arch/ia64/kernel/perfmon_mckinley.h
View file @
1740c41c
...
@@ -101,6 +101,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
...
@@ -101,6 +101,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
{
{
int
ret
=
0
,
check_case1
=
0
;
int
ret
=
0
,
check_case1
=
0
;
unsigned
long
val8
=
0
,
val14
=
0
,
val13
=
0
;
unsigned
long
val8
=
0
,
val14
=
0
,
val13
=
0
;
int
is_loaded
;
/* first preserve the reserved fields */
/* first preserve the reserved fields */
pfm_mck_reserved
(
cnum
,
val
,
regs
);
pfm_mck_reserved
(
cnum
,
val
,
regs
);
...
@@ -108,6 +109,8 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
...
@@ -108,6 +109,8 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
/* sanitfy check */
/* sanitfy check */
if
(
ctx
==
NULL
)
return
-
EINVAL
;
if
(
ctx
==
NULL
)
return
-
EINVAL
;
is_loaded
=
ctx
->
ctx_state
==
PFM_CTX_LOADED
||
ctx
->
ctx_state
==
PFM_CTX_MASKED
;
/*
/*
* we must clear the debug registers if pmc13 has a value which enable
* we must clear the debug registers if pmc13 has a value which enable
* memory pipeline event constraints. In this case we need to clear the
* memory pipeline event constraints. In this case we need to clear the
...
@@ -120,7 +123,9 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
...
@@ -120,7 +123,9 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
*
*
* For now, we just check on cfg_dbrXX != 0x3.
* For now, we just check on cfg_dbrXX != 0x3.
*/
*/
if
(
cnum
==
13
&&
((
*
val
&
0x18181818UL
)
!=
0x18181818UL
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
DPRINT
((
"cnum=%u val=0x%lx, using_dbreg=%d loaded=%d
\n
"
,
cnum
,
*
val
,
ctx
->
ctx_fl_using_dbreg
,
is_loaded
));
if
(
cnum
==
13
&&
is_loaded
&&
((
*
val
&
0x18181818UL
)
!=
0x18181818UL
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
DPRINT
((
"pmc[%d]=0x%lx has active pmc13 settings, clearing dbr
\n
"
,
cnum
,
*
val
));
DPRINT
((
"pmc[%d]=0x%lx has active pmc13 settings, clearing dbr
\n
"
,
cnum
,
*
val
));
...
@@ -131,14 +136,14 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
...
@@ -131,14 +136,14 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* a count of 0 will mark the debug registers as in use and also
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
* ensure that they are properly cleared.
*/
*/
ret
=
pfm_write_ibr_dbr
(
1
,
ctx
,
NULL
,
0
,
regs
);
ret
=
pfm_write_ibr_dbr
(
PFM_DATA_RR
,
ctx
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
if
(
ret
)
return
ret
;
}
}
/*
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
*/
if
(
cnum
==
14
&&
((
*
val
&
0x2222UL
)
!=
0x2222UL
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
if
(
cnum
==
14
&&
is_loaded
&&
((
*
val
&
0x2222UL
)
!=
0x2222UL
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
DPRINT
((
"pmc[%d]=0x%lx has active pmc14 settings, clearing ibr
\n
"
,
cnum
,
*
val
));
DPRINT
((
"pmc[%d]=0x%lx has active pmc14 settings, clearing ibr
\n
"
,
cnum
,
*
val
));
...
@@ -149,7 +154,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
...
@@ -149,7 +154,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* a count of 0 will mark the debug registers as in use and also
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
* ensure that they are properly cleared.
*/
*/
ret
=
pfm_write_ibr_dbr
(
0
,
ctx
,
NULL
,
0
,
regs
);
ret
=
pfm_write_ibr_dbr
(
PFM_CODE_RR
,
ctx
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
if
(
ret
)
return
ret
;
}
}
...
...
arch/ia64/kernel/sys_ia64.c
View file @
1740c41c
...
@@ -201,10 +201,16 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
...
@@ -201,10 +201,16 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
* A zero mmap always succeeds in Linux, independent of whether or not the
* A zero mmap always succeeds in Linux, independent of whether or not the
* remaining arguments are valid.
* remaining arguments are valid.
*/
*/
len
=
PAGE_ALIGN
(
len
);
if
(
len
==
0
)
if
(
len
==
0
)
goto
out
;
goto
out
;
/* Careful about overflows.. */
len
=
PAGE_ALIGN
(
len
);
if
(
!
len
||
len
>
TASK_SIZE
)
{
addr
=
-
EINVAL
;
goto
out
;
}
/*
/*
* Don't permit mappings into unmapped space, the virtual page table of a region,
* Don't permit mappings into unmapped space, the virtual page table of a region,
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
...
...
arch/ia64/mm/hugetlbpage.c
View file @
1740c41c
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mm.h>
...
...
arch/ia64/pci/pci.c
View file @
1740c41c
...
@@ -442,7 +442,6 @@ pcibios_enable_device (struct pci_dev *dev, int mask)
...
@@ -442,7 +442,6 @@ pcibios_enable_device (struct pci_dev *dev, int mask)
if
(
ret
<
0
)
if
(
ret
<
0
)
return
ret
;
return
ret
;
printk
(
KERN_INFO
"PCI: Found IRQ %d for device %s
\n
"
,
dev
->
irq
,
pci_name
(
dev
));
return
acpi_pci_irq_enable
(
dev
);
return
acpi_pci_irq_enable
(
dev
);
}
}
...
...
arch/ia64/sn/io/hwgfs/interface.c
View file @
1740c41c
...
@@ -36,7 +36,7 @@ walk_parents_mkdir(
...
@@ -36,7 +36,7 @@ walk_parents_mkdir(
memcpy
(
buf
,
*
path
,
len
);
memcpy
(
buf
,
*
path
,
len
);
buf
[
len
]
=
'\0'
;
buf
[
len
]
=
'\0'
;
error
=
link_path_walk
(
buf
,
nd
);
error
=
path_walk
(
buf
,
nd
);
if
(
unlikely
(
error
))
if
(
unlikely
(
error
))
return
error
;
return
error
;
...
@@ -83,7 +83,7 @@ hwgfs_decode(
...
@@ -83,7 +83,7 @@ hwgfs_decode(
if
(
unlikely
(
error
))
if
(
unlikely
(
error
))
return
error
;
return
error
;
error
=
link_
path_walk
(
name
,
&
nd
);
error
=
path_walk
(
name
,
&
nd
);
if
(
unlikely
(
error
))
if
(
unlikely
(
error
))
return
error
;
return
error
;
...
@@ -274,7 +274,7 @@ hwgfs_find_handle(
...
@@ -274,7 +274,7 @@ hwgfs_find_handle(
nd
.
dentry
=
dget
(
base
?
base
:
hwgfs_vfsmount
->
mnt_sb
->
s_root
);
nd
.
dentry
=
dget
(
base
?
base
:
hwgfs_vfsmount
->
mnt_sb
->
s_root
);
nd
.
flags
=
(
traverse_symlinks
?
LOOKUP_FOLLOW
:
0
);
nd
.
flags
=
(
traverse_symlinks
?
LOOKUP_FOLLOW
:
0
);
error
=
link_
path_walk
(
name
,
&
nd
);
error
=
path_walk
(
name
,
&
nd
);
if
(
likely
(
!
error
))
{
if
(
likely
(
!
error
))
{
dentry
=
nd
.
dentry
;
dentry
=
nd
.
dentry
;
path_release
(
&
nd
);
/* stale data from here! */
path_release
(
&
nd
);
/* stale data from here! */
...
...
arch/ia64/sn/io/machvec/pci_bus_cvlink.c
View file @
1740c41c
...
@@ -811,7 +811,6 @@ sn_pci_init (void)
...
@@ -811,7 +811,6 @@ sn_pci_init (void)
/*
/*
* set pci_raw_ops, etc.
* set pci_raw_ops, etc.
*/
*/
sgi_master_io_infr_init
();
sgi_master_io_infr_init
();
for
(
cnode
=
0
;
cnode
<
numnodes
;
cnode
++
)
{
for
(
cnode
=
0
;
cnode
<
numnodes
;
cnode
++
)
{
...
@@ -826,16 +825,16 @@ sn_pci_init (void)
...
@@ -826,16 +825,16 @@ sn_pci_init (void)
#endif
#endif
controller
=
kmalloc
(
sizeof
(
struct
pci_controller
),
GFP_KERNEL
);
controller
=
kmalloc
(
sizeof
(
struct
pci_controller
),
GFP_KERNEL
);
if
(
controller
)
{
if
(
!
controller
)
{
printk
(
KERN_WARNING
"cannot allocate PCI controller
\n
"
);
return
0
;
}
memset
(
controller
,
0
,
sizeof
(
struct
pci_controller
));
memset
(
controller
,
0
,
sizeof
(
struct
pci_controller
));
/* just allocate some devices and fill in the pci_dev structs */
for
(
i
=
0
;
i
<
PCI_BUSES_TO_SCAN
;
i
++
)
for
(
i
=
0
;
i
<
PCI_BUSES_TO_SCAN
;
i
++
)
if
(
pci_bus_to_vertex
(
i
))
pci_scan_bus
(
i
,
&
sn_pci_ops
,
controller
);
pci_scan_bus
(
i
,
&
sn_pci_ops
,
controller
);
}
/*
* actually find devices and fill in hwgraph structs
*/
done_probing
=
1
;
done_probing
=
1
;
...
@@ -857,13 +856,8 @@ sn_pci_init (void)
...
@@ -857,13 +856,8 @@ sn_pci_init (void)
* set the root start and end so that drivers calling check_region()
* set the root start and end so that drivers calling check_region()
* won't see a conflict
* won't see a conflict
*/
*/
#ifdef CONFIG_IA64_SGI_SN_SIM
if
(
!
IS_RUNNING_ON_SIMULATOR
())
{
ioport_resource
.
start
=
0xc000000000000000
;
ioport_resource
.
start
=
0xc000000000000000
;
ioport_resource
.
end
=
0xcfffffffffffffff
;
ioport_resource
.
end
=
0xcfffffffffffffff
;
}
#endif
/*
/*
* Set the root start and end for Mem Resource.
* Set the root start and end for Mem Resource.
...
...
arch/ia64/sn/io/machvec/pci_dma.c
View file @
1740c41c
...
@@ -391,11 +391,9 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
...
@@ -391,11 +391,9 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
dma_map
=
pcibr_dmamap_alloc
(
vhdl
,
NULL
,
size
,
PCIIO_DMA_DATA
|
dma_map
=
pcibr_dmamap_alloc
(
vhdl
,
NULL
,
size
,
PCIIO_DMA_DATA
|
MINIMAL_ATE_FLAG
(
phys_addr
,
size
));
MINIMAL_ATE_FLAG
(
phys_addr
,
size
));
if
(
!
dma_map
)
{
/* PMU out of entries */
printk
(
KERN_ERR
"pci_map_single: Unable to allocate anymore "
if
(
!
dma_map
)
"32 bit page map entries.
\n
"
);
return
0
;
return
0
;
}
dma_addr
=
(
dma_addr_t
)
pcibr_dmamap_addr
(
dma_map
,
phys_addr
,
size
);
dma_addr
=
(
dma_addr_t
)
pcibr_dmamap_addr
(
dma_map
,
phys_addr
,
size
);
dma_map
->
bd_dma_addr
=
dma_addr
;
dma_map
->
bd_dma_addr
=
dma_addr
;
...
@@ -655,6 +653,12 @@ EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
...
@@ -655,6 +653,12 @@ EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
int
int
sn_dma_mapping_error
(
dma_addr_t
dma_addr
)
sn_dma_mapping_error
(
dma_addr_t
dma_addr
)
{
{
/*
* We can only run out of page mapping entries, so if there's
* an error, tell the caller to try again later.
*/
if
(
!
dma_addr
)
return
-
EAGAIN
;
return
0
;
return
0
;
}
}
...
...
arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
View file @
1740c41c
...
@@ -47,6 +47,7 @@ void pcibr_bus_addr_free(pciio_win_info_t);
...
@@ -47,6 +47,7 @@ void pcibr_bus_addr_free(pciio_win_info_t);
cfg_p
pcibr_find_capability
(
cfg_p
,
unsigned
);
cfg_p
pcibr_find_capability
(
cfg_p
,
unsigned
);
extern
uint64_t
do_pcibr_config_get
(
cfg_p
,
unsigned
,
unsigned
);
extern
uint64_t
do_pcibr_config_get
(
cfg_p
,
unsigned
,
unsigned
);
void
do_pcibr_config_set
(
cfg_p
,
unsigned
,
unsigned
,
uint64_t
);
void
do_pcibr_config_set
(
cfg_p
,
unsigned
,
unsigned
,
uint64_t
);
int
pcibr_slot_pwr
(
vertex_hdl_t
pcibr_vhdl
,
pciio_slot_t
slot
,
int
up
,
char
*
err_msg
);
/*
/*
...
@@ -351,7 +352,7 @@ pcibr_slot_enable(vertex_hdl_t pcibr_vhdl, struct pcibr_slot_enable_req_s *req_p
...
@@ -351,7 +352,7 @@ pcibr_slot_enable(vertex_hdl_t pcibr_vhdl, struct pcibr_slot_enable_req_s *req_p
goto
enable_unlock
;
goto
enable_unlock
;
}
}
error
=
pcibr_slot_attach
(
pcibr_vhdl
,
slot
,
NULL
,
error
=
pcibr_slot_attach
(
pcibr_vhdl
,
slot
,
0
,
req_p
->
req_resp
.
resp_l1_msg
,
req_p
->
req_resp
.
resp_l1_msg
,
&
req_p
->
req_resp
.
resp_sub_errno
);
&
req_p
->
req_resp
.
resp_sub_errno
);
...
...
drivers/char/sn_serial.c
View file @
1740c41c
...
@@ -82,10 +82,10 @@ static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0);
...
@@ -82,10 +82,10 @@ static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0);
static
unsigned
long
sn_interrupt_timeout
;
static
unsigned
long
sn_interrupt_timeout
;
extern
u64
master_node_bedrock_address
;
extern
u64
master_node_bedrock_address
;
static
int
sn_debug_printf
(
const
char
*
fmt
,
...);
#undef DEBUG
#undef DEBUG
#ifdef DEBUG
#ifdef DEBUG
static
int
sn_debug_printf
(
const
char
*
fmt
,
...);
#define DPRINTF(x...) sn_debug_printf(x)
#define DPRINTF(x...) sn_debug_printf(x)
#else
#else
#define DPRINTF(x...) do { } while (0)
#define DPRINTF(x...) do { } while (0)
...
@@ -247,6 +247,7 @@ early_printk_sn_sal(const char *s, unsigned count)
...
@@ -247,6 +247,7 @@ early_printk_sn_sal(const char *s, unsigned count)
sn_func
->
sal_puts
(
s
,
count
);
sn_func
->
sal_puts
(
s
,
count
);
}
}
#ifdef DEBUG
/* this is as "close to the metal" as we can get, used when the driver
/* this is as "close to the metal" as we can get, used when the driver
* itself may be broken */
* itself may be broken */
static
int
static
int
...
@@ -262,6 +263,7 @@ sn_debug_printf(const char *fmt, ...)
...
@@ -262,6 +263,7 @@ sn_debug_printf(const char *fmt, ...)
va_end
(
args
);
va_end
(
args
);
return
printed_len
;
return
printed_len
;
}
}
#endif
/* DEBUG */
/*
/*
* Interrupt handling routines.
* Interrupt handling routines.
...
...
include/asm-ia64/machvec_hpzx1.h
View file @
1740c41c
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#define _ASM_IA64_MACHVEC_HPZX1_h
#define _ASM_IA64_MACHVEC_HPZX1_h
extern
ia64_mv_setup_t
dig_setup
;
extern
ia64_mv_setup_t
dig_setup
;
extern
ia64_mv_setup_t
sba_setup
;
extern
ia64_mv_dma_alloc_coherent
sba_alloc_coherent
;
extern
ia64_mv_dma_alloc_coherent
sba_alloc_coherent
;
extern
ia64_mv_dma_free_coherent
sba_free_coherent
;
extern
ia64_mv_dma_free_coherent
sba_free_coherent
;
extern
ia64_mv_dma_map_single
sba_map_single
;
extern
ia64_mv_dma_map_single
sba_map_single
;
...
@@ -19,7 +20,7 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
...
@@ -19,7 +20,7 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
* the macros are used directly.
* the macros are used directly.
*/
*/
#define platform_name "hpzx1"
#define platform_name "hpzx1"
#define platform_setup
dig
_setup
#define platform_setup
sba
_setup
#define platform_dma_init machvec_noop
#define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_free_coherent sba_free_coherent
#define platform_dma_free_coherent sba_free_coherent
...
...
include/asm-ia64/perfmon.h
View file @
1740c41c
...
@@ -242,9 +242,10 @@ extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
...
@@ -242,9 +242,10 @@ extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
/*
/*
* perfmon interface exported to modules
* perfmon interface exported to modules
*/
*/
extern
long
pfm_mod_fast_read_pmds
(
struct
task_struct
*
,
unsigned
long
mask
[
4
],
unsigned
long
*
addr
,
struct
pt_regs
*
regs
);
extern
int
pfm_mod_read_pmds
(
struct
task_struct
*
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
);
extern
long
pfm_mod_read_pmds
(
struct
task_struct
*
,
pfarg_reg_t
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
);
extern
int
pfm_mod_write_pmcs
(
struct
task_struct
*
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
);
extern
long
pfm_mod_write_pmcs
(
struct
task_struct
*
,
pfarg_reg_t
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
);
extern
int
pfm_mod_write_ibrs
(
struct
task_struct
*
task
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
);
extern
int
pfm_mod_write_dbrs
(
struct
task_struct
*
task
,
void
*
req
,
unsigned
int
nreq
,
struct
pt_regs
*
regs
);
/*
/*
* describe the content of the local_cpu_date->pfm_syst_info field
* describe the content of the local_cpu_date->pfm_syst_info field
...
...
include/asm-ia64/processor.h
View file @
1740c41c
...
@@ -137,14 +137,6 @@ struct ia64_psr {
...
@@ -137,14 +137,6 @@ struct ia64_psr {
* state comes earlier:
* state comes earlier:
*/
*/
struct
cpuinfo_ia64
{
struct
cpuinfo_ia64
{
/* irq_stat must be 64-bit aligned */
union
{
struct
{
__u32
irq_count
;
__u32
bh_count
;
}
f
;
__u64
irq_and_bh_counts
;
}
irq_stat
;
__u32
softirq_pending
;
__u32
softirq_pending
;
__u64
itm_delta
;
/* # of clock cycles between clock ticks */
__u64
itm_delta
;
/* # of clock cycles between clock ticks */
__u64
itm_next
;
/* interval timer mask value to use for next clock tick */
__u64
itm_next
;
/* interval timer mask value to use for next clock tick */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment