Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
788ec66a
Commit
788ec66a
authored
Jun 15, 2003
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/scratch/anton/linux-2.5
into samba.org:/scratch/anton/tmp3
parents
e9213c78
4eb7de4f
Changes
64
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
64 changed files
with
877 additions
and
890 deletions
+877
-890
arch/alpha/vmlinux.lds.S
arch/alpha/vmlinux.lds.S
+3
-0
arch/arm/vmlinux-armo.lds.in
arch/arm/vmlinux-armo.lds.in
+1
-0
arch/arm/vmlinux-armv.lds.in
arch/arm/vmlinux-armv.lds.in
+1
-0
arch/cris/vmlinux.lds.S
arch/cris/vmlinux.lds.S
+6
-1
arch/h8300/platform/h8300h/generic/rom.ld
arch/h8300/platform/h8300h/generic/rom.ld
+1
-0
arch/i386/vmlinux.lds.S
arch/i386/vmlinux.lds.S
+1
-0
arch/ia64/vmlinux.lds.S
arch/ia64/vmlinux.lds.S
+4
-0
arch/m68k/vmlinux-std.lds
arch/m68k/vmlinux-std.lds
+1
-0
arch/m68k/vmlinux-sun3.lds
arch/m68k/vmlinux-sun3.lds
+1
-0
arch/m68knommu/vmlinux.lds.S
arch/m68knommu/vmlinux.lds.S
+1
-3
arch/mips/vmlinux.lds.S
arch/mips/vmlinux.lds.S
+1
-0
arch/mips64/vmlinux.lds.S
arch/mips64/vmlinux.lds.S
+1
-0
arch/parisc/vmlinux.lds.S
arch/parisc/vmlinux.lds.S
+1
-0
arch/ppc/vmlinux.lds.S
arch/ppc/vmlinux.lds.S
+2
-0
arch/ppc64/vmlinux.lds.S
arch/ppc64/vmlinux.lds.S
+1
-0
arch/s390/vmlinux.lds.S
arch/s390/vmlinux.lds.S
+1
-0
arch/sh/vmlinux.lds.S
arch/sh/vmlinux.lds.S
+1
-0
arch/sparc/vmlinux.lds.S
arch/sparc/vmlinux.lds.S
+1
-0
arch/sparc64/vmlinux.lds.S
arch/sparc64/vmlinux.lds.S
+1
-0
arch/x86_64/Kconfig
arch/x86_64/Kconfig
+12
-0
arch/x86_64/ia32/ia32entry.S
arch/x86_64/ia32/ia32entry.S
+1
-0
arch/x86_64/kernel/acpi/boot.c
arch/x86_64/kernel/acpi/boot.c
+4
-2
arch/x86_64/kernel/apic.c
arch/x86_64/kernel/apic.c
+7
-1
arch/x86_64/kernel/bluesmoke.c
arch/x86_64/kernel/bluesmoke.c
+2
-12
arch/x86_64/kernel/pci-gart.c
arch/x86_64/kernel/pci-gart.c
+2
-13
arch/x86_64/kernel/process.c
arch/x86_64/kernel/process.c
+5
-1
arch/x86_64/kernel/smpboot.c
arch/x86_64/kernel/smpboot.c
+8
-3
arch/x86_64/kernel/time.c
arch/x86_64/kernel/time.c
+224
-34
arch/x86_64/kernel/traps.c
arch/x86_64/kernel/traps.c
+1
-1
arch/x86_64/kernel/vsyscall.c
arch/x86_64/kernel/vsyscall.c
+13
-5
arch/x86_64/lib/clear_page.S
arch/x86_64/lib/clear_page.S
+11
-21
arch/x86_64/lib/copy_page.S
arch/x86_64/lib/copy_page.S
+83
-66
arch/x86_64/lib/csum-copy.S
arch/x86_64/lib/csum-copy.S
+134
-160
arch/x86_64/lib/csum-partial.c
arch/x86_64/lib/csum-partial.c
+60
-40
arch/x86_64/lib/csum-wrappers.c
arch/x86_64/lib/csum-wrappers.c
+40
-22
arch/x86_64/lib/memcpy.S
arch/x86_64/lib/memcpy.S
+35
-60
arch/x86_64/lib/memset.S
arch/x86_64/lib/memset.S
+30
-27
arch/x86_64/vmlinux.lds.S
arch/x86_64/vmlinux.lds.S
+4
-3
drivers/char/rocket.c
drivers/char/rocket.c
+36
-251
drivers/char/rocket.h
drivers/char/rocket.h
+2
-22
drivers/char/rocket_int.h
drivers/char/rocket_int.h
+2
-44
drivers/pci/pci.ids
drivers/pci/pci.ids
+2
-0
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_scan.c
+2
-2
fs/fs-writeback.c
fs/fs-writeback.c
+14
-1
fs/namei.c
fs/namei.c
+1
-1
include/asm-generic/vmlinux.lds.h
include/asm-generic/vmlinux.lds.h
+6
-0
include/asm-i386/fixmap.h
include/asm-i386/fixmap.h
+8
-0
include/asm-x86_64/checksum.h
include/asm-x86_64/checksum.h
+11
-1
include/asm-x86_64/fixmap.h
include/asm-x86_64/fixmap.h
+2
-0
include/asm-x86_64/mc146818rtc.h
include/asm-x86_64/mc146818rtc.h
+5
-0
include/asm-x86_64/processor.h
include/asm-x86_64/processor.h
+1
-1
include/asm-x86_64/proto.h
include/asm-x86_64/proto.h
+2
-0
include/asm-x86_64/timex.h
include/asm-x86_64/timex.h
+29
-1
include/asm-x86_64/vsyscall.h
include/asm-x86_64/vsyscall.h
+10
-8
include/linux/init.h
include/linux/init.h
+6
-0
include/linux/pci_ids.h
include/linux/pci_ids.h
+2
-0
include/linux/security.h
include/linux/security.h
+0
-39
init/main.c
init/main.c
+1
-1
kernel/sys.c
kernel/sys.c
+7
-13
mm/memory.c
mm/memory.c
+8
-6
security/capability.c
security/capability.c
+1
-9
security/dummy.c
security/dummy.c
+0
-12
security/root_plug.c
security/root_plug.c
+1
-2
security/security.c
security/security.c
+12
-1
No files found.
arch/alpha/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -74,6 +74,9 @@ SECTIONS
__con_initcall_end
=
.
;
}
.
=
ALIGN
(
8
)
;
SECURITY_INIT
.
=
ALIGN
(
64
)
;
__per_cpu_start
=
.
;
.
data.percpu
:
{
*(
.
data
.
percpu
)
}
...
...
arch/arm/vmlinux-armo.lds.in
View file @
788ec66a
...
...
@@ -43,6 +43,7 @@ SECTIONS
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(32768);
__init_end = .;
}
...
...
arch/arm/vmlinux-armv.lds.in
View file @
788ec66a
...
...
@@ -53,6 +53,7 @@ SECTIONS
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(32);
__initramfs_start = .;
usr/built-in.o(.init.ramfs)
...
...
arch/cris/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -74,7 +74,12 @@ SECTIONS
__con_initcall_start
=
.
;
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
}
.
security_initcall
.
init
:
{
__security_initcall_start
=
.
;
*(.
security_initcall
.
init
)
__security_initcall_end
=
.
;
/
*
We
fill
to
the
next
page
,
so
we
can
discard
all
init
pages
without
needing
to
consider
what
payload
might
be
appended
to
the
kernel
image
.
*/
...
...
arch/h8300/platform/h8300h/generic/rom.ld
View file @
788ec66a
...
...
@@ -83,6 +83,7 @@ SECTIONS
___con_initcall_start = .;
*(.con_initcall.init)
___con_initcall_end = .;
SECURITY_INIT
. = ALIGN(4);
___initramfs_start = .;
*(.init.ramfs)
...
...
arch/i386/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -81,6 +81,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4
)
;
__alt_instructions
=
.
;
.
altinstructions
:
{
*(
.
altinstructions
)
}
...
...
arch/ia64/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -141,6 +141,10 @@ SECTIONS
.
con_initcall
.
init
:
AT
(
ADDR
(
.
con_initcall
.
init
)
-
PAGE_OFFSET
)
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
__security_initcall_start
=
.
;
.
security_initcall
.
init
:
AT
(
ADDR
(
.
security_initcall
.
init
)
-
PAGE_OFFSET
)
{
*(
.
security_initcall
.
init
)
}
__security_initcall_end
=
.
;
.
=
ALIGN
(
PAGE_SIZE
)
;
__init_end
=
.
;
...
...
arch/m68k/vmlinux-std.lds
View file @
788ec66a
...
...
@@ -67,6 +67,7 @@ SECTIONS
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(8192);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
...
...
arch/m68k/vmlinux-sun3.lds
View file @
788ec66a
...
...
@@ -61,6 +61,7 @@ __init_begin = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(8192);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
...
...
arch/m68knommu/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -277,9 +277,7 @@ SECTIONS {
__con_initcall_start
=
.
;
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
__security_initcall_start
=
.
;
*(.
security_initcall
.
init
)
__security_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4
)
;
__initramfs_start
=
.
;
*(.
init.ramfs
)
...
...
arch/mips/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -54,6 +54,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
; /* Align double page for init_task_union */
__init_end
=
.
;
...
...
arch/mips64/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -53,6 +53,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
; /* Align double page for init_task_union */
__init_end
=
.
;
...
...
arch/parisc/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -80,6 +80,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/ppc/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -119,6 +119,8 @@ SECTIONS
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
__start___ftr_fixup
=
.
;
__ftr_fixup
:
{
*(
__ftr_fixup
)
}
__stop___ftr_fixup
=
.
;
...
...
arch/ppc64/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -112,6 +112,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/s390/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -94,6 +94,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
256
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
initramfs
)
}
...
...
arch/sh/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -71,6 +71,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
__machvec_start
=
.
;
.
machvec.init
:
{
*(
.
machvec
.
init
)
}
__machvec_end
=
.
;
...
...
arch/sparc/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -62,6 +62,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/sparc64/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -68,6 +68,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
8192
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/x86_64/Kconfig
View file @
788ec66a
...
...
@@ -52,6 +52,18 @@ config EARLY_PRINTK
klogd/syslogd or the X server. You should normally N here, unless
you want to debug such a crash.
config HPET_TIMER
bool
default y
help
Use the IA-PC HPET (High Precision Event Timer) to manage
time in preference to the PIT and RTC, if a HPET is
present. The HPET provides a stable time base on SMP
systems, unlike the RTC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
<http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
If unsure, say Y.
config GENERIC_ISA_DMA
bool
...
...
arch/x86_64/ia32/ia32entry.S
View file @
788ec66a
...
...
@@ -47,6 +47,7 @@
ENTRY
(
ia32_cstar_target
)
swapgs
movl
%
esp
,%
r8d
movq
%
r8
,%
gs
:
pda_oldrsp
movq
%
gs
:
pda_kernelstack
,%
rsp
sti
SAVE_ARGS
8
,
1
...
...
arch/x86_64/kernel/acpi/boot.c
View file @
788ec66a
...
...
@@ -244,9 +244,11 @@ acpi_parse_hpet (
return
-
1
;
}
hpet
.
address
=
hpet_tbl
->
addr
.
addrl
|
((
long
)
hpet_tbl
->
addr
.
addrh
<<
32
);
vxtime
.
hpet_address
=
hpet_tbl
->
addr
.
addrl
|
((
long
)
hpet_tbl
->
addr
.
addrh
<<
32
);
printk
(
KERN_INFO
"acpi: HPET id: %#x base: %#lx
\n
"
,
hpet_tbl
->
id
,
hpet
.
address
);
printk
(
KERN_INFO
"acpi: HPET id: %#x base: %#lx
\n
"
,
hpet_tbl
->
id
,
vxtime
.
hpet_address
);
return
0
;
}
...
...
arch/x86_64/kernel/apic.c
View file @
788ec66a
...
...
@@ -690,7 +690,13 @@ static void setup_APIC_timer(unsigned int clocks)
}
/* wait for irq slice */
{
if
(
vxtime
.
hpet_address
)
{
int
trigger
=
hpet_readl
(
HPET_T0_CMP
);
while
(
hpet_readl
(
HPET_COUNTER
)
>=
trigger
)
/* do nothing */
;
while
(
hpet_readl
(
HPET_COUNTER
)
<
trigger
)
/* do nothing */
;
}
else
{
int
c1
,
c2
;
outb_p
(
0x00
,
0x43
);
c2
=
inb_p
(
0x40
);
...
...
arch/x86_64/kernel/bluesmoke.c
View file @
788ec66a
...
...
@@ -363,22 +363,12 @@ static void __init k8_mcheck_init(struct cpuinfo_x86 *c)
machine_check_vector
=
k8_machine_check
;
for
(
i
=
0
;
i
<
banks
;
i
++
)
{
u64
val
=
((
1UL
<<
i
)
&
disabled_banks
)
?
0
:
~
0UL
;
if
(
val
&&
i
==
4
)
val
=
k8_nb_flags
;
wrmsrl
(
MSR_IA32_MC0_CTL
+
4
*
i
,
val
);
wrmsrl
(
MSR_IA32_MC0_STATUS
+
4
*
i
,
0
);
}
nb
=
find_k8_nb
();
if
(
nb
!=
NULL
)
{
u32
reg
,
reg2
;
pci_read_config_dword
(
nb
,
0x40
,
&
reg
);
pci_write_config_dword
(
nb
,
0x40
,
k8_nb_flags
);
pci_read_config_dword
(
nb
,
0x44
,
&
reg2
);
pci_write_config_dword
(
nb
,
0x44
,
reg2
);
printk
(
KERN_INFO
"Machine Check for K8 Northbridge %d enabled (%x,%x)
\n
"
,
nb
->
devfn
,
reg
,
reg2
);
ignored_banks
|=
(
1UL
<<
4
);
}
set_in_cr4
(
X86_CR4_MCE
);
if
(
mcheck_interval
&&
(
smp_processor_id
()
==
0
))
{
...
...
arch/x86_64/kernel/pci-gart.c
View file @
788ec66a
...
...
@@ -173,12 +173,10 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
if
(
iommu_page
==
-
1
)
goto
error
;
/* Fill in the GATT
, allocating pages as needed.
*/
/* Fill in the GATT */
for
(
i
=
0
;
i
<
size
;
i
++
)
{
unsigned
long
phys_mem
;
void
*
mem
=
memory
+
i
*
PAGE_SIZE
;
if
(
i
>
0
)
atomic_inc
(
&
virt_to_page
(
mem
)
->
count
);
phys_mem
=
virt_to_phys
(
mem
);
BUG_ON
(
phys_mem
&
~
PHYSICAL_PAGE_MASK
);
iommu_gatt_base
[
iommu_page
+
i
]
=
GPTE_ENCODE
(
phys_mem
);
...
...
@@ -206,16 +204,14 @@ void pci_free_consistent(struct pci_dev *hwdev, size_t size,
size
=
round_up
(
size
,
PAGE_SIZE
);
if
(
bus
>=
iommu_bus_base
&&
bus
<=
iommu_bus_base
+
iommu_size
)
{
unsigned
pages
=
size
>>
PAGE_SHIFT
;
int
i
;
iommu_page
=
(
bus
-
iommu_bus_base
)
>>
PAGE_SHIFT
;
vaddr
=
__va
(
GPTE_DECODE
(
iommu_gatt_base
[
iommu_page
]));
#ifdef CONFIG_IOMMU_DEBUG
int
i
;
for
(
i
=
0
;
i
<
pages
;
i
++
)
{
u64
pte
=
iommu_gatt_base
[
iommu_page
+
i
];
BUG_ON
((
pte
&
GPTE_VALID
)
==
0
);
iommu_gatt_base
[
iommu_page
+
i
]
=
0
;
}
#endif
free_iommu
(
iommu_page
,
pages
);
}
free_pages
((
unsigned
long
)
vaddr
,
get_order
(
size
));
...
...
@@ -319,11 +315,6 @@ dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
*/
iommu_gatt_base
[
iommu_page
+
i
]
=
GPTE_ENCODE
(
phys_mem
);
#ifdef CONFIG_IOMMU_DEBUG
/* paranoia check */
BUG_ON
(
GPTE_DECODE
(
iommu_gatt_base
[
iommu_page
+
i
])
!=
phys_mem
);
#endif
#ifdef CONFIG_IOMMU_LEAK
/* XXX need eventually caller of pci_map_sg */
if
(
iommu_leak_tab
)
...
...
@@ -350,7 +341,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
return
;
iommu_page
=
(
dma_addr
-
iommu_bus_base
)
>>
PAGE_SHIFT
;
npages
=
round_up
(
size
+
(
dma_addr
&
~
PAGE_MASK
),
PAGE_SIZE
)
>>
PAGE_SHIFT
;
#ifdef CONFIG_IOMMU_DEBUG
int
i
;
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
iommu_gatt_base
[
iommu_page
+
i
]
=
0
;
...
...
@@ -359,7 +349,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
iommu_leak_tab
[
iommu_page
+
i
]
=
0
;
#endif
}
#endif
free_iommu
(
iommu_page
,
npages
);
}
...
...
arch/x86_64/kernel/process.c
View file @
788ec66a
...
...
@@ -150,7 +150,7 @@ __setup("idle=", idle_setup);
/* Prints also some state that isn't saved in the pt_regs */
void
show_regs
(
struct
pt_regs
*
regs
)
void
__
show_regs
(
struct
pt_regs
*
regs
)
{
unsigned
long
cr0
=
0L
,
cr2
=
0L
,
cr3
=
0L
,
cr4
=
0L
,
fs
,
gs
,
shadowgs
;
unsigned
int
fsindex
,
gsindex
;
...
...
@@ -192,7 +192,11 @@ void show_regs(struct pt_regs * regs)
fs
,
fsindex
,
gs
,
gsindex
,
shadowgs
);
printk
(
"CS: %04x DS: %04x ES: %04x CR0: %016lx
\n
"
,
cs
,
ds
,
es
,
cr0
);
printk
(
"CR2: %016lx CR3: %016lx CR4: %016lx
\n
"
,
cr2
,
cr3
,
cr4
);
}
void
show_regs
(
struct
pt_regs
*
regs
)
{
__show_regs
(
regs
);
show_trace
(
&
regs
->
rsp
);
}
...
...
arch/x86_64/kernel/smpboot.c
View file @
788ec66a
...
...
@@ -67,6 +67,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int
smp_threads_ready
;
extern
void
time_init_smp
(
void
);
/*
* Trampoline 80x86 program as an array.
*/
...
...
@@ -760,7 +762,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if
(
APIC_init_uniprocessor
())
printk
(
KERN_NOTICE
"Local APIC not detected."
" Using dummy APIC emulation.
\n
"
);
return
;
goto
smp_done
;
}
/*
...
...
@@ -784,7 +786,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_online_map
=
phys_cpu_present_map
=
1
;
phys_cpu_present_map
=
1
;
disable_apic
=
1
;
return
;
goto
smp_done
;
}
verify_local_APIC
();
...
...
@@ -799,7 +801,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_online_map
=
phys_cpu_present_map
=
1
;
phys_cpu_present_map
=
1
;
disable_apic
=
1
;
return
;
goto
smp_done
;
}
connect_bsp_APIC
();
...
...
@@ -883,6 +885,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/
if
(
cpu_has_tsc
&&
cpucount
)
synchronize_tsc_bp
();
smp_done:
time_init_smp
();
}
/* These are wrappers to interface to the new boot process. Someone
...
...
arch/x86_64/kernel/time.c
View file @
788ec66a
This diff is collapsed.
Click to expand it.
arch/x86_64/kernel/traps.c
View file @
788ec66a
...
...
@@ -263,7 +263,7 @@ void show_registers(struct pt_regs *regs)
rsp
=
regs
->
rsp
;
printk
(
"CPU %d "
,
cpu
);
show_regs
(
regs
);
__
show_regs
(
regs
);
printk
(
"Process %s (pid: %d, stackpage=%08lx)
\n
"
,
cur
->
comm
,
cur
->
pid
,
4096
+
(
unsigned
long
)
cur
);
...
...
arch/x86_64/kernel/vsyscall.c
View file @
788ec66a
...
...
@@ -78,13 +78,21 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
do
{
sequence
=
read_seqbegin
(
&
__xtime_lock
);
sync_core
();
rdtscll
(
t
);
sec
=
__xtime
.
tv_sec
;
usec
=
(
__xtime
.
tv_nsec
/
1000
)
+
(
__jiffies
-
__wall_jiffies
)
*
(
1000000
/
HZ
)
+
(
t
-
__hpet
.
last_tsc
)
*
(
1000000
/
HZ
)
/
__hpet
.
ticks
+
__hpet
.
offset
;
(
__jiffies
-
__wall_jiffies
)
*
(
1000000
/
HZ
);
if
(
__vxtime
.
mode
==
VXTIME_TSC
)
{
sync_core
();
rdtscll
(
t
);
usec
+=
((
t
-
__vxtime
.
last_tsc
)
*
__vxtime
.
tsc_quot
)
>>
32
;
}
else
{
#if 0
usec += ((readl(fix_to_virt(VSYSCALL_HPET) + 0xf0) -
__vxtime.last) * __vxtime.quot) >> 32;
#endif
}
}
while
(
read_seqretry
(
&
__xtime_lock
,
sequence
));
tv
->
tv_sec
=
sec
+
usec
/
1000000
;
...
...
arch/x86_64/lib/clear_page.S
View file @
788ec66a
/*
*
Copyright
2002
Andi
Kleen
,
SuSE
Labs
.
*/
#include <linux/linkage.h>
/*
*
Zero
a
page
.
*
rdi
page
*/
ENTRY
(
clear_page
)
.
globl
clear_page
.
p2align
4
clear_page
:
xorl
%
eax
,%
eax
movl
$
4096
/
128
,%
ecx
movl
$
128
,%
edx
loop
:
movl
$
4096
/
64
,%
ecx
.
p2align
4
.
Lloop
:
decl
%
ecx
#define PUT(x) movq %rax,x*8(%rdi)
PUT
(0
)
movq
%
rax
,(%
rdi
)
PUT
(1)
PUT
(2)
PUT
(3)
...
...
@@ -21,17 +20,8 @@ loop:
PUT
(5)
PUT
(6)
PUT
(7)
PUT
(8)
PUT
(9)
PUT
(10)
PUT
(11)
PUT
(12)
PUT
(13)
PUT
(14)
PUT
(15)
addq
%
rdx
,%
rdi
decl
%
ecx
jnz
loop
sfence
leaq
64
(%
rdi
),%
rdi
jnz
.
Lloop
nop
ret
arch/x86_64/lib/copy_page.S
View file @
788ec66a
/*
*
Copyright
2002
Andi
Kleen
,
SuSE
Labs
.
*/
/*
Written
2003
by
Andi
Kleen
,
based
on
a
kernel
by
Evandro
Menezes
*/
#include <linux/linkage.h>
#include <linux/config.h>
#ifdef CONFIG_PREEMPT
#warning "check your fpu context saving!"
#endif
/*
Don
'
t
use
streaming
store
because
it
's better when the target
ends
up
in
cache
.
*/
/*
Could
vary
the
prefetch
distance
based
on
SMP
/
UP
*/
/*
*
Copy
a
page
.
*
*
rdi
destination
page
*
rsi
source
page
*
*
src
/
dst
must
be
aligned
to
16
bytes
.
*
*
Warning
:
in
case
of
super
lazy
FP
save
this
needs
to
be
preempt_stop
*/
.
globl
copy_page
.
p2align
.
p2align
4
copy_page
:
prefetchnta
(%
rsi
)
prefetchnta
64
(%
rsi
)
movq
%
rsp
,%
rax
subq
$
16
*
4
,%
rsp
andq
$~
15
,%
rsp
movdqa
%
xmm0
,(%
rsp
)
movdqa
%
xmm1
,
16
(%
rsp
)
movdqa
%
xmm2
,
32
(%
rsp
)
movdqa
%
xmm3
,
48
(%
rsp
)
movl
$
(
4096
/
128
)-
2
,%
ecx
movl
$
128
,%
edx
loop
:
prefetchnta
(%
rsi
)
prefetchnta
64
(%
rsi
)
loop_no_prefetch
:
movdqa
(%
rsi
),%
xmm0
movdqa
16
(%
rsi
),%
xmm1
movdqa
32
(%
rsi
),%
xmm2
movdqa
48
(%
rsi
),%
xmm3
movntdq
%
xmm0
,(%
rdi
)
movntdq
%
xmm1
,
16
(%
rdi
)
movntdq
%
xmm2
,
32
(%
rdi
)
movntdq
%
xmm3
,
48
(%
rdi
)
movdqa
64
(%
rsi
),%
xmm0
movdqa
80
(%
rsi
),%
xmm1
movdqa
96
(%
rsi
),%
xmm2
movdqa
112
(%
rsi
),%
xmm3
movntdq
%
xmm0
,
64
(%
rdi
)
movntdq
%
xmm1
,
80
(%
rdi
)
movntdq
%
xmm2
,
96
(%
rdi
)
movntdq
%
xmm3
,
112
(%
rdi
)
prefetch
(%
rsi
)
prefetch
1
*
64
(%
rsi
)
prefetch
2
*
64
(%
rsi
)
prefetch
3
*
64
(%
rsi
)
prefetch
4
*
64
(%
rsi
)
prefetchw
(%
rdi
)
prefetchw
1
*
64
(%
rdi
)
prefetchw
2
*
64
(%
rdi
)
prefetchw
3
*
64
(%
rdi
)
prefetchw
4
*
64
(%
rdi
)
subq
$
3
*
8
,%
rsp
movq
%
rbx
,(%
rsp
)
movq
%
r12
,
1
*
8
(%
rsp
)
movq
%
r13
,
2
*
8
(%
rsp
)
movl
$
(
4096
/
64
)-
5
,%
ecx
.
p2align
4
.
Loop64
:
dec
%
rcx
movq
(%
rsi
),
%
rax
movq
8
(%
rsi
),
%
rbx
movq
16
(%
rsi
),
%
rdx
movq
24
(%
rsi
),
%
r8
movq
32
(%
rsi
),
%
r9
movq
40
(%
rsi
),
%
r10
movq
48
(%
rsi
),
%
r11
movq
56
(%
rsi
),
%
r12
prefetch
5
*
64
(%
rsi
)
movq
%
rax
,
(%
rdi
)
movq
%
rbx
,
8
(%
rdi
)
movq
%
rdx
,
16
(%
rdi
)
movq
%
r8
,
24
(%
rdi
)
movq
%
r9
,
32
(%
rdi
)
movq
%
r10
,
40
(%
rdi
)
movq
%
r11
,
48
(%
rdi
)
movq
%
r12
,
56
(%
rdi
)
addq
%
rdx
,%
rdi
addq
%
rdx
,%
rsi
prefetchw
5
*
64
(%
rdi
)
leaq
64
(%
rsi
),
%
rsi
leaq
64
(%
rdi
),
%
rdi
jnz
.
Loop64
movl
$
5
,%
ecx
.
p2align
4
.
Loop2
:
decl
%
ecx
jns
loop
cmpl
$
-
1
,%
ecx
je
loop_no_prefetch
sfence
movdqa
(%
rsp
),%
xmm0
movdqa
16
(%
rsp
),%
xmm1
movdqa
32
(%
rsp
),%
xmm2
movdqa
48
(%
rsp
),%
xmm3
movq
%
rax
,%
rsp
movq
(%
rsi
),
%
rax
movq
8
(%
rsi
),
%
rbx
movq
16
(%
rsi
),
%
rdx
movq
24
(%
rsi
),
%
r8
movq
32
(%
rsi
),
%
r9
movq
40
(%
rsi
),
%
r10
movq
48
(%
rsi
),
%
r11
movq
56
(%
rsi
),
%
r12
movq
%
rax
,
(%
rdi
)
movq
%
rbx
,
8
(%
rdi
)
movq
%
rdx
,
16
(%
rdi
)
movq
%
r8
,
24
(%
rdi
)
movq
%
r9
,
32
(%
rdi
)
movq
%
r10
,
40
(%
rdi
)
movq
%
r11
,
48
(%
rdi
)
movq
%
r12
,
56
(%
rdi
)
leaq
64
(%
rdi
),%
rdi
leaq
64
(%
rsi
),%
rsi
jnz
.
Loop2
movq
(%
rsp
),%
rbx
movq
1
*
8
(%
rsp
),%
r12
movq
2
*
8
(%
rsp
),%
r13
addq
$
3
*
8
,%
rsp
ret
arch/x86_64/lib/csum-copy.S
View file @
788ec66a
/*
*
Copyright
2002
Andi
Kleen
*
Copyright
2002
,
2003
Andi
Kleen
,
SuSE
Labs
.
*
*
This
file
is
subject
to
the
terms
and
conditions
of
the
GNU
General
Public
*
License
.
See
the
file
COPYING
in
the
main
directory
of
this
archive
...
...
@@ -8,7 +8,6 @@
#include <linux/linkage.h>
#include <asm/errno.h>
//
#
define
FIX_ALIGNMENT
1
/*
*
Checksum
copy
with
exception
handling
.
*
On
exceptions
src_err_ptr
or
dst_err_ptr
is
set
to
-
EFAULT
and
the
...
...
@@ -26,17 +25,14 @@
*
eax
64
bit
sum
.
undefined
in
case
of
exception
.
*
*
Wrappers
need
to
take
care
of
valid
exception
sum
and
zeroing
.
*
They
also
should
align
source
or
destination
to
8
bytes
.
*/
/*
for
now
-
should
vary
this
based
on
direction
*/
#define prefetch prefetcht2
#define movnti movq
.
macro
source
10
:
.
section
__ex_table
,
"a"
.
align
8
.
quad
10
b
,
bad_source
.
quad
10
b
,
.
L
bad_source
.
previous
.
endm
...
...
@@ -44,57 +40,74 @@
20
:
.
section
__ex_table
,
"a"
.
align
8
.
quad
20
b
,
bad_dest
.
quad
20
b
,
.
L
bad_dest
.
previous
.
endm
.
macro
ignore
L
=
.
Lignore
30
:
.
section
__ex_table
,
"a"
.
align
8
.
quad
30
b
,
\
L
.
previous
.
endm
.
globl
csum_partial_copy_generic
.
p2align
.
p2align
4
csum_partial_copy_generic
:
prefetchnta
(%
rdi
)
cmpl
$
3
*
64
,%
edx
jle
.
Lignore
ignore
prefetch
(%
rdi
)
ignore
prefetch
1
*
64
(%
rdi
)
ignore
prefetch
2
*
64
(%
rdi
)
ignore
prefetch
3
*
64
(%
rdi
)
ignore
prefetch
4
*
64
(%
rdi
)
ignore
prefetchw
(%
rsi
)
ignore
prefetchw
1
*
64
(%
rsi
)
ignore
prefetchw
2
*
64
(%
rsi
)
ignore
prefetchw
3
*
64
(%
rsi
)
ignore
prefetchw
4
*
64
(%
rsi
)
.
Lignore
:
subq
$
7
*
8
,%
rsp
movq
%
rbx
,
2
*
8
(%
rsp
)
movq
%
r12
,
3
*
8
(%
rsp
)
movq
%
r14
,
4
*
8
(%
rsp
)
movq
%
r13
,
5
*
8
(%
rsp
)
movq
%
rbp
,
6
*
8
(%
rsp
)
movq
%
r8
,(%
rsp
)
movq
%
r9
,
1
*
8
(%
rsp
)
pushq
%
rbx
pushq
%
r12
pushq
%
r14
pushq
%
r15
movq
%
r8
,%
r14
movq
%
r9
,%
r15
movl
%
ecx
,%
eax
movl
%
edx
,%
ecx
#ifdef FIX_ALIGNMENT
/
*
align
source
to
8
bytes
*/
movl
%
edi
,%
r8d
andl
$
7
,%
r8d
jnz
bad_alignment
after_bad_alignment
:
#endif
movl
$
64
,%
r10d
xorl
%
r9d
,%
r9d
movq
%
rcx
,%
r12
shrq
$
6
,%
r12
/
*
loopcounter
is
maintained
as
one
less
to
test
efficiently
for
the
previous
to
last
iteration
.
This
is
needed
to
stop
the
prefetching
.
*/
decq
%
r12
js
handle_tail
/*
<
64
*/
jz
loop_no_prefetch
/*
=
64
+
X
*/
jz
.
Lhandle_tail
/*
<
64
*/
clc
/
*
main
loop
.
clear
in
64
byte
blocks
*/
/
*
tries
hard
not
to
prefetch
over
the
boundary
*/
/
*
r10
:
64
,
r9
:
zero
,
r8
:
temp2
,
rbx
:
temp1
,
rax
:
sum
,
rcx
:
saved
length
*/
/
*
r9
:
zero
,
r8
:
temp2
,
rbx
:
temp1
,
rax
:
sum
,
rcx
:
saved
length
*/
/
*
r11
:
temp3
,
rdx
:
temp4
,
r12
loopcnt
*/
.
p2align
loop
:
/
*
Could
prefetch
more
than
one
loop
,
but
then
it
would
be
even
trickier
to
avoid
prefetching
over
the
boundary
.
The
hardware
prefetch
should
take
care
of
this
anyways
.
The
reason
for
this
prefetch
is
just
the
non
temporal
hint
to
avoid
cache
pollution
.
Hopefully
this
will
be
handled
properly
by
the
hardware
.
*/
prefetchnta
64
(%
rdi
)
loop_no_prefetch
:
/
*
r10
:
temp5
,
rbp
:
temp6
,
r14
temp7
,
r13
temp8
*/
.
p2align
4
.
Lloop
:
source
movq
(%
rdi
),%
rbx
source
...
...
@@ -104,175 +117,136 @@ loop_no_prefetch:
source
movq
24
(%
rdi
),%
rdx
dest
mov
nti
%
rbx
,(%
rsi
)
dest
mov
nti
%
r8
,
8
(%
rsi
)
dest
mov
nti
%
r11
,
16
(%
rsi
)
dest
mov
nti
%
rdx
,
24
(%
rsi
)
source
mov
q
32
(%
rdi
),%
r10
source
mov
q
40
(%
rdi
),%
rbp
source
mov
q
48
(%
rdi
),%
r14
source
mov
q
56
(%
rdi
),%
r13
addq
%
rbx
,%
rax
ignore
2
f
prefetch
5
*
64
(%
rdi
)
2
:
adcq
%
rbx
,%
rax
adcq
%
r8
,%
rax
adcq
%
r11
,%
rax
adcq
%
rdx
,%
rax
adcq
%
r10
,%
rax
adcq
%
rbp
,%
rax
adcq
%
r14
,%
rax
adcq
%
r13
,%
rax
source
movq
32
(%
rdi
),%
rbx
source
movq
40
(%
rdi
),%
r8
source
movq
48
(%
rdi
),%
r11
source
movq
56
(%
rdi
),%
rdx
decl
%
r12d
dest
mov
nti
%
rbx
,
32
(%
rsi
)
mov
q
%
rbx
,
(%
rsi
)
dest
mov
nti
%
r8
,
40
(%
rsi
)
mov
q
%
r8
,
8
(%
rsi
)
dest
mov
nti
%
r11
,
48
(%
rsi
)
mov
q
%
r11
,
16
(%
rsi
)
dest
mov
nti
%
rdx
,
56
(%
rsi
)
mov
q
%
rdx
,
24
(%
rsi
)
adcq
%
rbx
,%
rax
adcq
%
r8
,%
rax
adcq
%
r11
,%
rax
adcq
%
rdx
,%
rax
dest
movq
%
r10
,
32
(%
rsi
)
dest
movq
%
rbp
,
40
(%
rsi
)
dest
movq
%
r14
,
48
(%
rsi
)
dest
movq
%
r13
,
56
(%
rsi
)
adcq
%
r9
,%
rax
/*
add
in
carry
*/
ignore
3
f
prefetchw
5
*
64
(%
rsi
)
3
:
addq
%
r10
,%
rdi
addq
%
r10
,%
rsi
leaq
64
(%
rdi
),%
rdi
leaq
64
(%
rsi
)
,%
rsi
decq
%
r12
jz
loop_no_prefetch
/*
previous
to
last
iteration
?
*/
jns
loop
jnz
.
Lloop
adcq
%
r9
,%
rax
/
*
do
last
upto
56
bytes
*/
handle_tail
:
.
L
handle_tail
:
/
*
ecx
:
count
*/
movl
%
ecx
,%
r10d
andl
$
63
,%
ecx
shrl
$
3
,%
ecx
jz
fold
jz
.
L
fold
clc
movl
$
8
,%
edx
loop_8
:
.
p2align
4
.
L
loop_8
:
source
movq
(%
rdi
),%
rbx
adcq
%
rbx
,%
rax
dest
movnti
%
rbx
,(%
rsi
)
leaq
(%
rsi
,%
rdx
),%
rsi
/*
preserve
carry
*/
leaq
(%
rdi
,%
rdx
),%
rdi
decl
%
ecx
jnz
loop_8
dest
movq
%
rbx
,(%
rsi
)
leaq
8
(%
rsi
),%
rsi
/*
preserve
carry
*/
leaq
8
(%
rdi
),%
rdi
jnz
.
Lloop_8
adcq
%
r9
,%
rax
/*
add
in
carry
*/
fold
:
.
Lfold
:
/
*
reduce
checksum
to
32
bits
*/
movl
%
eax
,%
ebx
shrq
$
32
,%
rax
addq
%
rbx
,%
rax
addl
%
ebx
,%
eax
adcl
%
r9d
,%
eax
/
*
do
last
upto
6
bytes
*/
handle_7
:
.
L
handle_7
:
movl
%
r10d
,%
ecx
andl
$
7
,%
ecx
shrl
$
1
,%
ecx
jz
handle_1
jz
.
L
handle_1
movl
$
2
,%
edx
xorl
%
ebx
,%
ebx
clc
loop_1
:
.
p2align
4
.
Lloop_1
:
source
movw
(%
rdi
),%
bx
adc
q
%
rbx
,%
r
ax
adc
l
%
ebx
,%
e
ax
dest
movw
%
bx
,(%
rsi
)
addq
%
rdx
,%
rdi
addq
%
rdx
,%
rsi
decl
%
ecx
jnz
loop_1
adcw
%
r9w
,%
ax
/*
add
in
carry
*/
movw
%
bx
,(%
rsi
)
leaq
2
(%
rdi
),%
rdi
leaq
2
(%
rsi
),%
rsi
jnz
.
Lloop_1
adcl
%
r9d
,%
eax
/*
add
in
carry
*/
/
*
handle
last
odd
byte
*/
handle_1
:
.
Lhandle_1
:
testl
$
1
,%
r10d
jz
ende
jz
.
L
ende
xorl
%
ebx
,%
ebx
source
movb
(%
rdi
),%
bl
dest
movb
%
bl
,(%
rsi
)
add
w
%
bx
,%
ax
adc
w
%
r9w
,%
ax
/*
carry
*/
add
l
%
ebx
,%
e
ax
adc
l
%
r9d
,%
e
ax
/*
carry
*/
ende
:
sfence
popq
%
r15
popq
%
r14
popq
%
r12
popq
%
rbx
.
Lende
:
movq
2
*
8
(%
rsp
),%
rbx
movq
3
*
8
(%
rsp
),%
r12
movq
4
*
8
(%
rsp
),%
r14
movq
5
*
8
(%
rsp
),%
r13
movq
6
*
8
(%
rsp
),%
rbp
addq
$
7
*
8
,%
rsp
ret
#ifdef FIX_ALIGNMENT
/
*
align
source
to
8
bytes
.
*/
/
*
r8d
:
unalignedness
,
ecx
len
*/
bad_alignment
:
testl
$
1
,%
edi
jnz
odd_source
/
*
compute
distance
to
next
aligned
position
*/
movl
$
8
,%
r8d
xchgl
%
r8d
,%
ecx
subl
%
r8d
,%
ecx
/
*
handle
unaligned
part
*/
shrl
$
1
,%
ecx
xorl
%
ebx
,%
ebx
movl
$
2
,%
r10d
align_loop
:
source
movw
(%
rdi
),%
bx
addq
%
rbx
,%
rax
/*
carry
cannot
happen
*/
dest
movw
%
bx
,(%
rsi
)
addq
%
r10
,%
rdi
addq
%
r10
,%
rsi
decl
%
ecx
jnz
align_loop
jmp
after_bad_alignment
/
*
weird
case
.
need
to
swap
the
sum
at
the
end
because
the
spec
requires
16
bit
words
of
the
sum
to
be
always
paired
.
handle
it
recursively
because
it
should
be
rather
rare
.
*/
odd_source
:
/
*
copy
odd
byte
*/
xorl
%
ebx
,%
ebx
source
movb
(%
rdi
),%
bl
addl
%
ebx
,%
eax
/*
add
to
old
checksum
*/
adcl
$
0
,%
ecx
dest
movb
%
al
,(%
rsi
)
/
*
fix
arguments
*/
movl
%
eax
,%
ecx
incq
%
rsi
incq
%
rdi
decq
%
rdx
call
csum_partial_copy_generic
bswap
%
eax
/*
this
should
work
,
but
check
*/
jmp
ende
#endif
/
*
Exception
handlers
.
Very
simple
,
zeroing
is
done
in
the
wrappers
*/
bad_source
:
movl
$
-
EFAULT
,(%
r14
)
jmp
ende
.
Lbad_source
:
movq
(%
rsp
),%
rax
movl
$
-
EFAULT
,(%
rax
)
jmp
.
Lende
bad_dest
:
movl
$
-
EFAULT
,(%
r15
)
jmp
ende
.
Lbad_dest
:
movq
8
(%
rsp
),%
rax
movl
$
-
EFAULT
,(%
rax
)
jmp
.
Lende
arch/x86_64/lib/csum-partial.c
View file @
788ec66a
...
...
@@ -7,35 +7,39 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <asm/checksum.h>
/* Better way for this sought */
static
inline
unsigned
short
from64to16
(
unsigned
long
x
)
#define __force_inline inline __attribute__((always_inline))
static
inline
unsigned
short
from32to16
(
unsigned
a
)
{
/* add up 32-bit words for 33 bits */
x
=
(
x
&
0xffffffff
)
+
(
x
>>
32
);
/* add up 16-bit and 17-bit words for 17+c bits */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
/* add up 16-bit and 2-bit for 16+c bit */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
/* add up carry.. */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
return
x
;
unsigned
short
b
=
a
>>
16
;
asm
(
"addw %w2,%w0
\n\t
"
"adcw $0,%w0
\n
"
:
"=r"
(
b
)
:
"0"
(
b
),
"r"
(
a
));
return
b
;
}
/*
* Do a 64-bit checksum on an arbitrary memory area.
* Returns a 32bit checksum.
*
* This isn't a great routine, but it's not _horrible_ either.
* We rely on the compiler to unroll.
* This isn't as time critical as it used to be because many NICs
* do hardware checksumming these days.
*
* Things tried and found to not make it faster:
* Manual Prefetching
* Unrolling to an 128 bytes inner loop.
* Using interleaving with more registers to break the carry chains.
*/
static
inline
unsigned
do_csum
(
const
unsigned
char
*
buff
,
int
len
)
static
__force_inline
unsigned
do_csum
(
const
unsigned
char
*
buff
,
unsigned
len
)
{
int
odd
,
count
;
unsigned
odd
,
count
;
unsigned
long
result
=
0
;
if
(
len
<=
0
)
goto
out
;
if
(
unlikely
(
len
==
0
)
)
return
result
;
odd
=
1
&
(
unsigned
long
)
buff
;
if
(
unlikely
(
odd
))
{
result
=
*
buff
<<
8
;
...
...
@@ -45,7 +49,7 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
count
=
len
>>
1
;
/* nr of 16-bit words.. */
if
(
count
)
{
if
(
2
&
(
unsigned
long
)
buff
)
{
result
+=
*
(
unsigned
short
*
)
buff
;
result
+=
*
(
unsigned
short
*
)
buff
;
count
--
;
len
-=
2
;
buff
+=
2
;
...
...
@@ -59,18 +63,41 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
buff
+=
4
;
}
count
>>=
1
;
/* nr of 64-bit words.. */
if
(
count
)
{
/* main loop using 64byte blocks */
unsigned
long
zero
=
0
;
do
{
asm
(
" addq %1,%0
\n
"
" adcq %2,%0
\n
"
unsigned
count64
=
count
>>
3
;
while
(
count64
)
{
asm
(
"addq 0*8(%[src]),%[res]
\n\t
"
"adcq 1*8(%[src]),%[res]
\n\t
"
"adcq 2*8(%[src]),%[res]
\n\t
"
"adcq 3*8(%[src]),%[res]
\n\t
"
"adcq 4*8(%[src]),%[res]
\n\t
"
"adcq 5*8(%[src]),%[res]
\n\t
"
"adcq 6*8(%[src]),%[res]
\n\t
"
"adcq 7*8(%[src]),%[res]
\n\t
"
"adcq %[zero],%[res]"
:
[
res
]
"=r"
(
result
)
:
[
src
]
"r"
(
buff
),
[
zero
]
"r"
(
zero
),
"[res]"
(
result
));
buff
+=
64
;
count64
--
;
}
/* last upto 7 8byte blocks */
count
%=
8
;
while
(
count
)
{
asm
(
"addq %1,%0
\n\t
"
"adcq %2,%0
\n
"
:
"=r"
(
result
)
:
"m"
(
*
buff
),
"r"
(
zero
),
"0"
(
result
));
count
--
;
:
"m"
(
*
(
unsigned
long
*
)
buff
),
"r"
(
zero
),
"0"
(
result
));
--
count
;
buff
+=
8
;
}
while
(
count
);
result
=
(
result
&
0xffffffff
)
+
(
result
>>
32
);
}
result
=
add32_with_carry
(
result
>>
32
,
result
&
0xffffffff
);
if
(
len
&
4
)
{
result
+=
*
(
unsigned
int
*
)
buff
;
buff
+=
4
;
...
...
@@ -83,10 +110,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
}
if
(
len
&
1
)
result
+=
*
buff
;
result
=
from64to16
(
result
);
if
(
unlikely
(
odd
))
return
((
result
>>
8
)
&
0xff
)
|
((
result
&
0xff
)
<<
8
);
out:
result
=
add32_with_carry
(
result
>>
32
,
result
&
0xffffffff
);
if
(
unlikely
(
odd
))
{
result
=
from32to16
(
result
);
result
=
((
result
>>
8
)
&
0xff
)
|
((
result
&
0xff
)
<<
8
);
}
return
result
;
}
...
...
@@ -102,18 +130,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
*
* it's best to have buff aligned on a 64-bit boundary
*/
unsigned
int
csum_partial
(
const
unsigned
char
*
buff
,
int
len
,
unsigned
int
sum
)
unsigned
csum_partial
(
const
unsigned
char
*
buff
,
unsigned
len
,
unsigned
sum
)
{
unsigned
result
=
do_csum
(
buff
,
len
);
/* add in old sum, and carry.. */
asm
(
"addl %1,%0
\n\t
"
"adcl $0,%0"
:
"=r"
(
result
)
:
"r"
(
sum
),
"0"
(
result
));
return
result
;
return
add32_with_carry
(
do_csum
(
buff
,
len
),
sum
);
}
//EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
...
...
@@ -123,4 +144,3 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
return
~
csum_partial
(
buff
,
len
,
0
);
}
EXPORT_SYMBOL
(
ip_compute_csum
);
arch/x86_64/lib/csum-wrappers.c
View file @
788ec66a
/* Copyright 2002 Andi Kleen, SuSE Labs.
/* Copyright 2002
,2003
Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v.2
*
* Wrappers of assembly checksum functions for x86-64.
...
...
@@ -7,18 +7,6 @@
#include <asm/checksum.h>
#include <linux/module.h>
/* Better way for this sought */
static
inline
unsigned
from64to32
(
unsigned
long
x
)
{
/* add up 32-bit words for 33 bits */
x
=
(
x
&
0xffffffff
)
+
(
x
>>
32
);
/* add up 16-bit and 17-bit words for 17+c bits */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
/* add up 16-bit and 2-bit for 16+c bit */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
return
x
;
}
/**
* csum_partial_copy_from_user - Copy and checksum from user space.
* @src: source address (user space)
...
...
@@ -36,14 +24,32 @@ csum_partial_copy_from_user(const char *src, char *dst,
{
*
errp
=
0
;
if
(
likely
(
access_ok
(
VERIFY_READ
,
src
,
len
)))
{
unsigned
long
sum
;
sum
=
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
errp
,
NULL
);
/* Why 6, not 7? To handle odd addresses aligned we
would need to do considerable complications to fix the
checksum which is defined as an 16bit accumulator. The
fix alignment code is primarily for performance
compatibility with 32bit and that will handle odd
addresses slowly too. */
if
(
unlikely
((
unsigned
long
)
src
&
6
))
{
while
(((
unsigned
long
)
src
&
6
)
&&
len
>=
2
)
{
__u16
val16
;
*
errp
=
__get_user
(
val16
,
(
__u16
*
)
src
);
if
(
*
errp
)
return
isum
;
*
(
__u16
*
)
dst
=
val16
;
isum
=
add32_with_carry
(
isum
,
val16
);
src
+=
2
;
dst
+=
2
;
len
-=
2
;
}
}
isum
=
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
errp
,
NULL
);
if
(
likely
(
*
errp
==
0
))
return
from64to32
(
sum
);
return
isum
;
}
*
errp
=
-
EFAULT
;
memset
(
dst
,
0
,
len
);
return
0
;
return
isum
;
}
EXPORT_SYMBOL
(
csum_partial_copy_from_user
);
...
...
@@ -67,8 +73,22 @@ csum_partial_copy_to_user(const char *src, char *dst,
*
errp
=
-
EFAULT
;
return
0
;
}
if
(
unlikely
((
unsigned
long
)
dst
&
6
))
{
while
(((
unsigned
long
)
dst
&
6
)
&&
len
>=
2
)
{
__u16
val16
=
*
(
__u16
*
)
src
;
isum
=
add32_with_carry
(
isum
,
val16
);
*
errp
=
__put_user
(
val16
,
(
__u16
*
)
dst
);
if
(
*
errp
)
return
isum
;
src
+=
2
;
dst
+=
2
;
len
-=
2
;
}
}
*
errp
=
0
;
return
from64to32
(
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
NULL
,
errp
)
);
return
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
NULL
,
errp
);
}
EXPORT_SYMBOL
(
csum_partial_copy_to_user
);
...
...
@@ -85,11 +105,9 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
unsigned
int
csum_partial_copy_nocheck
(
const
char
*
src
,
char
*
dst
,
int
len
,
unsigned
int
sum
)
{
return
from64to32
(
csum_partial_copy_generic
(
src
,
dst
,
len
,
sum
,
NULL
,
NULL
)
);
return
csum_partial_copy_generic
(
src
,
dst
,
len
,
sum
,
NULL
,
NULL
);
}
//EXPORT_SYMBOL(csum_partial_copy_nocheck);
unsigned
short
csum_ipv6_magic
(
struct
in6_addr
*
saddr
,
struct
in6_addr
*
daddr
,
__u32
len
,
unsigned
short
proto
,
unsigned
int
sum
)
{
...
...
@@ -103,7 +121,7 @@ unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
" adcq $0,%[sum]
\n
"
:
[
sum
]
"=r"
(
sum64
)
:
"[sum]"
(
rest
),[
saddr
]
"r"
(
saddr
),
[
daddr
]
"r"
(
daddr
));
return
csum_fold
(
from64to32
(
sum64
));
return
csum_fold
(
add32_with_carry
(
sum64
&
0xffffffff
,
sum64
>>
32
));
}
EXPORT_SYMBOL
(
csum_ipv6_magic
);
arch/x86_64/lib/memcpy.S
View file @
788ec66a
...
...
@@ -12,103 +12,78 @@
*
rax
original
destination
*/
//
#
define
FIX_ALIGNMENT
.
globl
__memcpy
.
globl
memcpy
.
p2align
.
p2align
4
__memcpy
:
memcpy
:
pushq
%
rbx
movq
%
rdi
,%
rax
#ifdef FIX_ALIGNMENT
movl
%
edi
,%
ecx
andl
$
7
,%
ecx
jnz
bad_alignment
after_bad_alignment
:
#endif
movq
%
rdx
,%
rcx
movl
$
64
,%
ebx
shrq
$
6
,%
rcx
jz
handle_tail
movl
%
edx
,%
ecx
shrl
$
6
,%
ecx
jz
.
Lhandle_tail
.
p2align
4
.
Lloop_64
:
decl
%
ecx
loop_64
:
movq
(%
rsi
),%
r11
movq
8
(%
rsi
),%
r8
movq
2
*
8
(%
rsi
),%
r9
movq
3
*
8
(%
rsi
),%
r10
movq
%
r11
,(%
rdi
)
movq
%
r8
,
1
*
8
(%
rdi
)
movq
2
*
8
(%
rsi
),%
r9
movq
3
*
8
(%
rsi
),%
r10
movq
%
r9
,
2
*
8
(%
rdi
)
movq
%
r10
,
3
*
8
(%
rdi
)
movq
4
*
8
(%
rsi
),%
r11
movq
5
*
8
(%
rsi
),%
r8
movq
6
*
8
(%
rsi
),%
r9
movq
7
*
8
(%
rsi
),%
r10
movq
%
r11
,
4
*
8
(%
rdi
)
movq
%
r8
,
5
*
8
(%
rdi
)
movq
6
*
8
(%
rsi
),%
r9
movq
7
*
8
(%
rsi
),%
r10
movq
%
r9
,
6
*
8
(%
rdi
)
movq
%
r10
,
7
*
8
(%
rdi
)
addq
%
rbx
,%
rsi
addq
%
rbx
,%
rdi
decl
%
ecx
jnz
loop_64
leaq
64
(%
rsi
),%
rsi
leaq
64
(%
rdi
),%
rdi
jnz
.
Lloop_64
handle_tail
:
.
L
handle_tail
:
movl
%
edx
,%
ecx
andl
$
63
,%
ecx
shrl
$
3
,%
ecx
jz
handle_7
movl
$
8
,%
ebx
loop_8
:
jz
.
Lhandle_7
.
p2align
4
.
Lloop_8
:
decl
%
ecx
movq
(%
rsi
),%
r8
movq
%
r8
,(%
rdi
)
addq
%
rbx
,%
rdi
addq
%
rbx
,%
rsi
decl
%
ecx
jnz
loop_8
leaq
8
(%
rdi
),%
rdi
leaq
8
(%
rsi
),%
rsi
jnz
.
Lloop_8
handle_7
:
.
L
handle_7
:
movl
%
edx
,%
ecx
andl
$
7
,%
ecx
jz
ende
loop_1
:
jz
.
Lende
.
p2align
4
.
Lloop_1
:
movb
(%
rsi
),%
r8b
movb
%
r8b
,(%
rdi
)
incq
%
rdi
incq
%
rsi
decl
%
ecx
jnz
loop_1
jnz
.
L
loop_1
ende
:
sfence
.
Lende
:
popq
%
rbx
ret
#ifdef FIX_ALIGNMENT
/
*
align
destination
*/
/
*
This
is
simpleminded
.
For
bigger
blocks
it
may
make
sense
to
align
src
and
dst
to
their
aligned
subset
and
handle
the
rest
separately
*/
bad_alignment
:
movl
$
8
,%
r9d
subl
%
ecx
,%
r9d
movl
%
r9d
,%
ecx
subq
%
r9
,%
rdx
js
small_alignment
jz
small_alignment
align_1
:
movb
(%
rsi
),%
r8b
movb
%
r8b
,(%
rdi
)
incq
%
rdi
incq
%
rsi
decl
%
ecx
jnz
align_1
jmp
after_bad_alignment
small_alignment
:
addq
%
r9
,%
rdx
jmp
handle_7
#endif
arch/x86_64/lib/memset.S
View file @
788ec66a
...
...
@@ -11,7 +11,7 @@
*/
.
globl
__memset
.
globl
memset
.
p2align
.
p2align
4
memset
:
__memset
:
movq
%
rdi
,%
r10
...
...
@@ -25,15 +25,16 @@ __memset:
/
*
align
dst
*/
movl
%
edi
,%
r9d
andl
$
7
,%
r9d
jnz
bad_alignment
after_bad_alignment
:
jnz
.
L
bad_alignment
.
L
after_bad_alignment
:
movq
%
r11
,%
rcx
movl
$
64
,%
r8d
shrq
$
6
,%
rcx
jz
handle_tail
movl
%
r11d
,%
ecx
shrl
$
6
,%
ecx
jz
.
Lhandle_tail
loop_64
:
.
p2align
4
.
Lloop_64
:
decl
%
ecx
movq
%
rax
,(%
rdi
)
movq
%
rax
,
8
(%
rdi
)
movq
%
rax
,
16
(%
rdi
)
...
...
@@ -42,43 +43,45 @@ loop_64:
movq
%
rax
,
40
(%
rdi
)
movq
%
rax
,
48
(%
rdi
)
movq
%
rax
,
56
(%
rdi
)
addq
%
r8
,%
rdi
decl
%
ecx
jnz
loop_64
leaq
64
(%
rdi
),%
rdi
jnz
.
Lloop_64
/
*
Handle
tail
in
loops
.
The
loops
should
be
faster
than
hard
to
predict
jump
tables
.
*/
handle_tail
:
.
p2align
4
.
Lhandle_tail
:
movl
%
r11d
,%
ecx
andl
$
63
&(
~
7
),%
ecx
jz
handle_7
jz
.
L
handle_7
shrl
$
3
,%
ecx
loop_8
:
movq
%
rax
,(%
rdi
)
addq
$
8
,%
rdi
.
p2align
4
.
Lloop_8
:
decl
%
ecx
jnz
loop_8
movq
%
rax
,(%
rdi
)
leaq
8
(%
rdi
),%
rdi
jnz
.
Lloop_8
handle_7
:
.
L
handle_7
:
movl
%
r11d
,%
ecx
andl
$
7
,%
ecx
jz
ende
loop_1
:
movb
%
al
,(%
rdi
)
addq
$
1
,%
rdi
jz
.
Lende
.
p2align
4
.
Lloop_1
:
decl
%
ecx
jnz
loop_1
movb
%
al
,(%
rdi
)
leaq
1
(%
rdi
),%
rdi
jnz
.
Lloop_1
ende
:
.
L
ende
:
movq
%
r10
,%
rax
ret
bad_alignment
:
.
L
bad_alignment
:
cmpq
$
7
,%
r11
jbe
handle_7
jbe
.
L
handle_7
movq
%
rax
,(%
rdi
)
/*
unaligned
store
*/
movq
$
8
,%
r8
subq
%
r9
,%
r8
addq
%
r8
,%
rdi
subq
%
r8
,%
r11
jmp
after_bad_alignment
jmp
.
L
after_bad_alignment
arch/x86_64/vmlinux.lds.S
View file @
788ec66a
...
...
@@ -50,10 +50,10 @@ SECTIONS
.
xtime_lock
:
AT
((
LOADADDR
(
.
vsyscall_0
)
+
SIZEOF
(
.
vsyscall_0
)
+
63
)
&
~
(
63
))
{
*(
.
xtime_lock
)
}
xtime_lock
=
LOADADDR
(
.
xtime_lock
)
;
.
=
ALIGN
(
16
)
;
.
hpet
:
AT
((
LOADADDR
(
.
xtime_lock
)
+
SIZEOF
(
.
xtime_lock
)
+
15
)
&
~
(
15
))
{
*(
.
hpet
)
}
hpet
=
LOADADDR
(
.
hpet
)
;
.
vxtime
:
AT
((
LOADADDR
(
.
xtime_lock
)
+
SIZEOF
(
.
xtime_lock
)
+
15
)
&
~
(
15
))
{
*(
.
vxtime
)
}
vxtime
=
LOADADDR
(
.
vxtime
)
;
.
=
ALIGN
(
16
)
;
.
wall_jiffies
:
AT
((
LOADADDR
(
.
hpet
)
+
SIZEOF
(
.
hpet
)
+
15
)
&
~
(
15
))
{
*(
.
wall_jiffies
)
}
.
wall_jiffies
:
AT
((
LOADADDR
(
.
vxtime
)
+
SIZEOF
(
.
vxtime
)
+
15
)
&
~
(
15
))
{
*(
.
wall_jiffies
)
}
wall_jiffies
=
LOADADDR
(
.
wall_jiffies
)
;
.
=
ALIGN
(
16
)
;
.
sys_tz
:
AT
((
LOADADDR
(
.
wall_jiffies
)
+
SIZEOF
(
.
wall_jiffies
)
+
15
)
&
~
(
15
))
{
*(
.
sys_tz
)
}
...
...
@@ -105,6 +105,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
drivers/char/rocket.c
View file @
788ec66a
This diff is collapsed.
Click to expand it.
drivers/char/rocket.h
View file @
788ec66a
/*
* rocket.h --- the exported interface of the rocket driver to
* its configuration program.
* rocket.h --- the exported interface of the rocket driver to its configuration program.
*
* Written by Theodore Ts'o, Copyright 1997.
* Copyright 1997 Comtrol Corporation.
*
* Copyright 1994, 1997, 2003 Comtrol Corporation. All Rights Reserved.
*
* The following source code is subject to Comtrol Corporation's
* Developer's License Agreement.
*
* This source code is protected by United States copyright law and
* international copyright treaties.
*
* This source code may only be used to develop software products that
* will operate with Comtrol brand hardware.
*
* You may not reproduce nor distribute this source code in its original
* form but must produce a derivative work which includes portions of
* this source code only.
*
* The portions of this source code which you use in your derivative
* work must bear Comtrol's copyright notice:
*
* Copyright 1994 Comtrol Corporation.
*
*/
/* Model Information Struct */
...
...
drivers/char/rocket_int.h
View file @
788ec66a
...
...
@@ -2,26 +2,7 @@
* rocket_int.h --- internal header file for rocket.c
*
* Written by Theodore Ts'o, Copyright 1997.
*
* Copyright 1994, 1997, 2003 Comtrol Corporation. All Rights Reserved.
*
* The following source code is subject to Comtrol Corporation's
* Developer's License Agreement.
*
* This source code is protected by United States copyright law and
* international copyright treaties.
*
* This source code may only be used to develop software products that
* will operate with Comtrol brand hardware.
*
* You may not reproduce nor distribute this source code in its original
* form but must produce a derivative work which includes portions of
* this source code only.
*
* The portions of this source code which you use in your derivative
* work must bear Comtrol's copyright notice:
*
* Copyright 1994 Comtrol Corporation.
* Copyright 1997 Comtrol Corporation.
*
*/
...
...
@@ -98,17 +79,9 @@ static inline unsigned short sInW(unsigned short port)
#define sInW(a) (inw_p(a))
#endif
/* ROCKET_DEBUG_IO */
/* This is used to move arrays of bytes so byte swapping isn't
* appropriate. On Linux 2.3 and above outsw is the same as
* outsw_ns, but we use the old form for compatibility with
* old kernels. */
#if defined(__BIG_ENDIAN) && (LINUX_VERSION_CODE < VERSION_CODE(2,3,0))
#define sOutStrW(port, addr, count) if (count) outsw_ns(port, addr, count)
#define sInStrW(port, addr, count) if (count) insw_ns(port, addr, count)
#else
/* This is used to move arrays of bytes so byte swapping isn't appropriate. */
#define sOutStrW(port, addr, count) if (count) outsw(port, addr, count)
#define sInStrW(port, addr, count) if (count) insw(port, addr, count)
#endif
#define CTL_SIZE 8
#define AIOP_CTL_SIZE 4
...
...
@@ -1318,11 +1291,7 @@ struct r_port {
/* Compact PCI device */
#define PCI_DEVICE_ID_CRP16INTF 0x0903
/* Rocketport Compact PCI 16 port w/external I/F */
/* Taking care of some kernel incompatibilities... */
#if LINUX_VERSION_CODE > VERSION_CODE(2,5,68)
#define TTY_GET_LINE(t) t->index
#define TTY_DRIVER_MINOR_START(t) t->driver->minor_start
#define TTY_DRIVER_SUBTYPE(t) t->driver->subtype
#define TTY_DRIVER_NAME(t) t->driver->name
...
...
@@ -1330,15 +1299,4 @@ struct r_port {
#define TTY_DRIVER_FLUSH_BUFFER_EXISTS(t) t->driver->flush_buffer
#define TTY_DRIVER_FLUSH_BUFFER(t) t->driver->flush_buffer(t)
#else
#define TTY_GET_LINE(t) minor(t->device) - TTY_DRIVER_MINOR_START(t)
#define TTY_DRIVER_MINOR_START(t) t->driver.minor_start
#define TTY_DRIVER_SUBTYPE(t) t->driver.subtype
#define TTY_DRIVER_NAME(t) t->driver.name
#define TTY_DRIVER_NAME_BASE(t) t->driver.name_base
#define TTY_DRIVER_FLUSH_BUFFER_EXISTS(t) t->driver.flush_buffer
#define TTY_DRIVER_FLUSH_BUFFER(t) t->driver.flush_buffer(t)
#endif
drivers/pci/pci.ids
View file @
788ec66a
...
...
@@ -3752,6 +3752,8 @@
0005 Rocketport 8 port w/octa cable
0006 Rocketport 8 port w/RJ11 connectors
0007 Rocketport 4 port w/RJ11 connectors
0008 Rocketport 8 port w/ DB78 SNI (Siemens) connector
0009 Rocketport 16 port w/ DB78 SNI (Siemens) connector
000a Rocketport Plus 4 port
000b Rocketport Plus 8 port
000c RocketModem 6 port
...
...
drivers/scsi/scsi_scan.c
View file @
788ec66a
...
...
@@ -619,12 +619,12 @@ static int scsi_add_lun(Scsi_Device *sdev, char *inq_result, int *bflags)
if
(
inq_result
[
7
]
&
0x10
)
sdev
->
sdtr
=
1
;
scsi_device_register
(
sdev
);
sprintf
(
sdev
->
devfs_name
,
"scsi/host%d/bus%d/target%d/lun%d"
,
sdev
->
host
->
host_no
,
sdev
->
channel
,
sdev
->
id
,
sdev
->
lun
);
scsi_device_register
(
sdev
);
/*
* End driverfs/devfs code.
*/
...
...
fs/fs-writeback.c
View file @
788ec66a
...
...
@@ -260,8 +260,21 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
struct
address_space
*
mapping
=
inode
->
i_mapping
;
struct
backing_dev_info
*
bdi
=
mapping
->
backing_dev_info
;
if
(
bdi
->
memory_backed
)
if
(
bdi
->
memory_backed
)
{
if
(
sb
==
blockdev_superblock
)
{
/*
* Dirty memory-backed blockdev: the ramdisk
* driver does this.
*/
list_move
(
&
inode
->
i_list
,
&
sb
->
s_dirty
);
continue
;
}
/*
* Assume that all inodes on this superblock are memory
* backed. Skip the superblock.
*/
break
;
}
if
(
wbc
->
nonblocking
&&
bdi_write_congested
(
bdi
))
{
wbc
->
encountered_congestion
=
1
;
...
...
fs/namei.c
View file @
788ec66a
...
...
@@ -325,7 +325,7 @@ static inline int exec_permission_lite(struct inode *inode)
return
-
EACCES
;
ok:
return
security_inode_permission
_lite
(
inode
,
MAY_EXEC
);
return
security_inode_permission
(
inode
,
MAY_EXEC
);
}
/*
...
...
include/asm-generic/vmlinux.lds.h
View file @
788ec66a
...
...
@@ -45,3 +45,9 @@
*(__ksymtab_strings) \
}
#define SECURITY_INIT \
.security_initcall.init : { \
__security_initcall_start = .; \
*(.security_initcall.init) \
__security_initcall_end = .; \
}
include/asm-i386/fixmap.h
View file @
788ec66a
...
...
@@ -107,6 +107,14 @@ extern void __set_fixmap (enum fixed_addresses idx,
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
/*
* This is the range that is readable by user mode, and things
* acting like user mode such as get_user_pages.
*/
#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
extern
void
__this_fixmap_does_not_exist
(
void
);
/*
...
...
include/asm-x86_64/checksum.h
View file @
788ec66a
...
...
@@ -125,7 +125,7 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
*/
extern
unsigned
int
csum_partial
(
const
unsigned
char
*
buff
,
int
len
,
unsigned
int
sum
);
extern
unsigned
int
csum_partial
(
const
unsigned
char
*
buff
,
unsigned
len
,
unsigned
int
sum
);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER 1
...
...
@@ -179,4 +179,14 @@ extern unsigned short
csum_ipv6_magic
(
struct
in6_addr
*
saddr
,
struct
in6_addr
*
daddr
,
__u32
len
,
unsigned
short
proto
,
unsigned
int
sum
);
static
inline
unsigned
add32_with_carry
(
unsigned
a
,
unsigned
b
)
{
asm
(
"addl %2,%0
\n\t
"
"adcl $0,%0"
:
"=r"
(
a
)
:
"0"
(
a
),
"r"
(
b
));
return
a
;
}
#endif
include/asm-x86_64/fixmap.h
View file @
788ec66a
...
...
@@ -35,6 +35,8 @@
enum
fixed_addresses
{
VSYSCALL_LAST_PAGE
,
VSYSCALL_FIRST_PAGE
=
VSYSCALL_LAST_PAGE
+
((
VSYSCALL_END
-
VSYSCALL_START
)
>>
PAGE_SHIFT
)
-
1
,
VSYSCALL_HPET
,
FIX_HPET_BASE
,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE
,
/* local (CPU) APIC) -- required for SMP or not */
#endif
...
...
include/asm-x86_64/mc146818rtc.h
View file @
788ec66a
...
...
@@ -24,6 +24,11 @@ outb_p((addr),RTC_PORT(0)); \
outb_p((val),RTC_PORT(1)); \
})
#ifndef CONFIG_HPET_TIMER
#define RTC_IRQ 8
#else
/* Temporary workaround due to IRQ routing problem. */
#define RTC_IRQ 0
#endif
#endif
/* _ASM_MC146818RTC_H */
include/asm-x86_64/processor.h
View file @
788ec66a
...
...
@@ -313,7 +313,7 @@ extern inline void sync_core(void)
#define ARCH_HAS_PREFETCH
static
inline
void
prefetch
(
void
*
x
)
{
asm
volatile
(
"2: prefetch
nta
%0
\n
1:
\t
"
asm
volatile
(
"2: prefetch
t0
%0
\n
1:
\t
"
".section __ex_table,
\"
a
\"\n\t
"
" .align 8
\n\t
"
" .quad 2b,1b
\n\t
"
...
...
include/asm-x86_64/proto.h
View file @
788ec66a
...
...
@@ -54,6 +54,8 @@ extern void swap_low_mappings(void);
extern
void
oops_begin
(
void
);
extern
void
die
(
const
char
*
,
struct
pt_regs
*
,
long
);
extern
void
__die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
long
err
);
extern
void
__show_regs
(
struct
pt_regs
*
regs
);
extern
void
show_regs
(
struct
pt_regs
*
regs
);
extern
int
map_syscall32
(
struct
mm_struct
*
mm
,
unsigned
long
address
);
extern
char
*
syscall32_page
;
...
...
include/asm-x86_64/timex.h
View file @
788ec66a
...
...
@@ -30,6 +30,34 @@ static inline cycles_t get_cycles (void)
extern
unsigned
int
cpu_khz
;
extern
struct
hpet_data
hpet
;
/*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_NUMBER 0x00000f00
#define HPET_ID_REV 0x000000ff
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_T0_ENABLE 0x004
#define HPET_T0_PERIODIC 0x008
#define HPET_T0_SETVAL 0x040
#define HPET_T0_32BIT 0x100
extern
struct
vxtime_data
vxtime
;
#endif
include/asm-x86_64/vsyscall.h
View file @
788ec66a
...
...
@@ -15,7 +15,7 @@ enum vsyscall_num {
#ifdef __KERNEL__
#define __section_
hpet __attribute__ ((unused, __section__ (".hpet
"), aligned(16)))
#define __section_
vxtime __attribute__ ((unused, __section__ (".vxtime
"), aligned(16)))
#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
...
...
@@ -23,22 +23,24 @@ enum vsyscall_num {
#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(L1_CACHE_BYTES)))
#define VXTIME_TSC 1
#define VXTIME_HPET 2
struct
hpet
_data
{
long
address
;
/*
base address */
struct
vxtime
_data
{
long
hpet_address
;
/* HPET
base address */
unsigned
long
hz
;
/* HPET clocks / sec */
int
trigger
;
/* value at last interrupt */
int
last
;
int
offset
;
unsigned
long
last_tsc
;
long
ticks
;
long
quot
;
long
tsc_quot
;
int
mode
;
};
#define hpet_readl(a) readl(fix_to_virt(FIX_HPET_BASE) + a)
#define hpet_writel(d,a) writel(d, fix_to_virt(FIX_HPET_BASE) + a)
/* vsyscall space (readonly) */
extern
struct
hpet_data
__hpet
;
extern
struct
vxtime_data
__vxtime
;
extern
struct
timespec
__xtime
;
extern
volatile
unsigned
long
__jiffies
;
extern
unsigned
long
__wall_jiffies
;
...
...
@@ -46,7 +48,7 @@ extern struct timezone __sys_tz;
extern
seqlock_t
__xtime_lock
;
/* kernel space (writeable) */
extern
struct
hpet_data
hpet
;
extern
struct
vxtime_data
vxtime
;
extern
unsigned
long
wall_jiffies
;
extern
struct
timezone
sys_tz
;
extern
int
sysctl_vsyscall
;
...
...
include/linux/init.h
View file @
788ec66a
...
...
@@ -64,6 +64,7 @@ typedef int (*initcall_t)(void);
typedef
void
(
*
exitcall_t
)(
void
);
extern
initcall_t
__con_initcall_start
,
__con_initcall_end
;
extern
initcall_t
__security_initcall_start
,
__security_initcall_end
;
#endif
#ifndef MODULE
...
...
@@ -96,6 +97,9 @@ extern initcall_t __con_initcall_start, __con_initcall_end;
#define console_initcall(fn) \
static initcall_t __initcall_##fn __attribute__ ((unused,__section__ (".con_initcall.init")))=fn
#define security_initcall(fn) \
static initcall_t __initcall_##fn __attribute__ ((unused,__section__ (".security_initcall.init"))) = fn
struct
obs_kernel_param
{
const
char
*
str
;
int
(
*
setup_func
)(
char
*
);
...
...
@@ -143,6 +147,8 @@ struct obs_kernel_param {
#define device_initcall(fn) module_init(fn)
#define late_initcall(fn) module_init(fn)
#define security_initcall(fn) module_init(fn)
/* These macros create a dummy inline: gcc 2.9x does not count alias
as usage, hence the `unused function' warning when __init functions
are declared static. We use the dummy __*_module_inline functions
...
...
include/linux/pci_ids.h
View file @
788ec66a
...
...
@@ -1399,6 +1399,8 @@
#define PCI_DEVICE_ID_RP8OCTA 0x0005
#define PCI_DEVICE_ID_RP8J 0x0006
#define PCI_DEVICE_ID_RP4J 0x0007
#define PCI_DEVICE_ID_RP8SNI 0x0008
#define PCI_DEVICE_ID_RP16SNI 0x0009
#define PCI_DEVICE_ID_RPP4 0x000A
#define PCI_DEVICE_ID_RPP8 0x000B
#define PCI_DEVICE_ID_RP8M 0x000C
...
...
include/linux/security.h
View file @
788ec66a
...
...
@@ -46,7 +46,6 @@ extern void cap_capset_set (struct task_struct *target, kernel_cap_t *effective,
extern
int
cap_bprm_set_security
(
struct
linux_binprm
*
bprm
);
extern
void
cap_bprm_compute_creds
(
struct
linux_binprm
*
bprm
);
extern
int
cap_task_post_setuid
(
uid_t
old_ruid
,
uid_t
old_euid
,
uid_t
old_suid
,
int
flags
);
extern
void
cap_task_kmod_set_label
(
void
);
extern
void
cap_task_reparent_to_init
(
struct
task_struct
*
p
);
extern
int
cap_syslog
(
int
type
);
...
...
@@ -328,16 +327,6 @@ struct swap_info_struct;
* @inode contains the inode structure to check.
* @mask contains the permission mask.
* Return 0 if permission is granted.
* @inode_permission_lite:
* Check permission before accessing an inode. This hook is
* currently only called when checking MAY_EXEC access during
* pathname resolution. The dcache lock is held and thus modules
* that could sleep or contend the lock should return -EAGAIN to
* inform the kernel to drop the lock and try again calling the
* full permission hook.
* @inode contains the inode structure to check.
* @mask contains the permission mask.
* Return 0 if permission is granted.
* @inode_setattr:
* Check permission before setting file attributes. Note that the kernel
* call to notify_change is performed from several locations, whenever
...
...
@@ -607,10 +596,6 @@ struct swap_info_struct;
* @arg4 contains a argument.
* @arg5 contains a argument.
* Return 0 if permission is granted.
* @task_kmod_set_label:
* Set the security attributes in current->security for the kernel module
* loader thread, so that it has the permissions needed to perform its
* function.
* @task_reparent_to_init:
* Set the security attributes in @p->security for a kernel thread that
* is being reparented to the init task.
...
...
@@ -1057,7 +1042,6 @@ struct security_operations {
int
(
*
inode_readlink
)
(
struct
dentry
*
dentry
);
int
(
*
inode_follow_link
)
(
struct
dentry
*
dentry
,
struct
nameidata
*
nd
);
int
(
*
inode_permission
)
(
struct
inode
*
inode
,
int
mask
);
int
(
*
inode_permission_lite
)
(
struct
inode
*
inode
,
int
mask
);
int
(
*
inode_setattr
)
(
struct
dentry
*
dentry
,
struct
iattr
*
attr
);
int
(
*
inode_getattr
)
(
struct
vfsmount
*
mnt
,
struct
dentry
*
dentry
);
void
(
*
inode_delete
)
(
struct
inode
*
inode
);
...
...
@@ -1111,7 +1095,6 @@ struct security_operations {
int
(
*
task_prctl
)
(
int
option
,
unsigned
long
arg2
,
unsigned
long
arg3
,
unsigned
long
arg4
,
unsigned
long
arg5
);
void
(
*
task_kmod_set_label
)
(
void
);
void
(
*
task_reparent_to_init
)
(
struct
task_struct
*
p
);
void
(
*
task_to_inode
)(
struct
task_struct
*
p
,
struct
inode
*
inode
);
...
...
@@ -1471,12 +1454,6 @@ static inline int security_inode_permission (struct inode *inode, int mask)
return
security_ops
->
inode_permission
(
inode
,
mask
);
}
static
inline
int
security_inode_permission_lite
(
struct
inode
*
inode
,
int
mask
)
{
return
security_ops
->
inode_permission_lite
(
inode
,
mask
);
}
static
inline
int
security_inode_setattr
(
struct
dentry
*
dentry
,
struct
iattr
*
attr
)
{
...
...
@@ -1692,11 +1669,6 @@ static inline int security_task_prctl (int option, unsigned long arg2,
return
security_ops
->
task_prctl
(
option
,
arg2
,
arg3
,
arg4
,
arg5
);
}
static
inline
void
security_task_kmod_set_label
(
void
)
{
security_ops
->
task_kmod_set_label
();
}
static
inline
void
security_task_reparent_to_init
(
struct
task_struct
*
p
)
{
security_ops
->
task_reparent_to_init
(
p
);
...
...
@@ -2108,12 +2080,6 @@ static inline int security_inode_permission (struct inode *inode, int mask)
return
0
;
}
static
inline
int
security_inode_permission_lite
(
struct
inode
*
inode
,
int
mask
)
{
return
0
;
}
static
inline
int
security_inode_setattr
(
struct
dentry
*
dentry
,
struct
iattr
*
attr
)
{
...
...
@@ -2321,11 +2287,6 @@ static inline int security_task_prctl (int option, unsigned long arg2,
return
0
;
}
static
inline
void
security_task_kmod_set_label
(
void
)
{
cap_task_kmod_set_label
();
}
static
inline
void
security_task_reparent_to_init
(
struct
task_struct
*
p
)
{
cap_task_reparent_to_init
(
p
);
...
...
init/main.c
View file @
788ec66a
...
...
@@ -439,8 +439,8 @@ asmlinkage void __init start_kernel(void)
pte_chain_init
();
fork_init
(
num_physpages
);
proc_caches_init
();
security_scaffolding_startup
();
buffer_init
();
security_scaffolding_startup
();
vfs_caches_init
(
num_physpages
);
radix_tree_init
();
signals_init
();
...
...
kernel/sys.c
View file @
788ec66a
...
...
@@ -831,13 +831,11 @@ asmlinkage long sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid)
asmlinkage
long
sys_setfsuid
(
uid_t
uid
)
{
int
old_fsuid
;
int
retval
;
retval
=
security_task_setuid
(
uid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
);
if
(
retval
)
return
retval
;
old_fsuid
=
current
->
fsuid
;
if
(
security_task_setuid
(
uid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
))
return
old_fsuid
;
if
(
uid
==
current
->
uid
||
uid
==
current
->
euid
||
uid
==
current
->
suid
||
uid
==
current
->
fsuid
||
capable
(
CAP_SETUID
))
...
...
@@ -850,9 +848,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
current
->
fsuid
=
uid
;
}
retval
=
security_task_post_setuid
(
old_fsuid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
);
if
(
retval
)
return
retval
;
security_task_post_setuid
(
old_fsuid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
);
return
old_fsuid
;
}
...
...
@@ -863,13 +859,11 @@ asmlinkage long sys_setfsuid(uid_t uid)
asmlinkage
long
sys_setfsgid
(
gid_t
gid
)
{
int
old_fsgid
;
int
retval
;
retval
=
security_task_setgid
(
gid
,
(
gid_t
)
-
1
,
(
gid_t
)
-
1
,
LSM_SETID_FS
);
if
(
retval
)
return
retval
;
old_fsgid
=
current
->
fsgid
;
if
(
security_task_setgid
(
gid
,
(
gid_t
)
-
1
,
(
gid_t
)
-
1
,
LSM_SETID_FS
))
return
old_fsgid
;
if
(
gid
==
current
->
gid
||
gid
==
current
->
egid
||
gid
==
current
->
sgid
||
gid
==
current
->
fsgid
||
capable
(
CAP_SETGID
))
...
...
mm/memory.c
View file @
788ec66a
...
...
@@ -689,15 +689,16 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vma
=
find_extend_vma
(
mm
,
start
);
#ifdef FIXADDR_START
if
(
!
vma
&&
start
>=
FIXADDR_START
&&
start
<
FIXADDR_TOP
)
{
#ifdef FIXADDR_USER_START
if
(
!
vma
&&
start
>=
FIXADDR_USER_START
&&
start
<
FIXADDR_USER_END
)
{
static
struct
vm_area_struct
fixmap_vma
=
{
/* Catch users - if there are any valid
ones, we can make this be "&init_mm" or
something. */
.
vm_mm
=
NULL
,
.
vm_start
=
FIXADDR_START
,
.
vm_end
=
FIXADDR_
TOP
,
.
vm_start
=
FIXADDR_
USER_
START
,
.
vm_end
=
FIXADDR_
USER_END
,
.
vm_page_prot
=
PAGE_READONLY
,
.
vm_flags
=
VM_READ
|
VM_EXEC
,
};
...
...
@@ -705,6 +706,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
if
(
write
)
/* user fixmap pages are read-only */
return
i
?
:
-
EFAULT
;
pgd
=
pgd_offset_k
(
pg
);
if
(
!
pgd
)
return
i
?
:
-
EFAULT
;
...
...
@@ -712,8 +715,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if
(
!
pmd
)
return
i
?
:
-
EFAULT
;
pte
=
pte_offset_kernel
(
pmd
,
pg
);
if
(
!
pte
||
!
pte_present
(
*
pte
)
||
!
pte_user
(
*
pte
)
||
!
(
write
?
pte_write
(
*
pte
)
:
pte_read
(
*
pte
)))
if
(
!
pte
||
!
pte_present
(
*
pte
))
return
i
?
:
-
EFAULT
;
if
(
pages
)
{
pages
[
i
]
=
pte_page
(
*
pte
);
...
...
security/capability.c
View file @
788ec66a
...
...
@@ -248,12 +248,6 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid,
return
0
;
}
void
cap_task_kmod_set_label
(
void
)
{
cap_set_full
(
current
->
cap_effective
);
return
;
}
void
cap_task_reparent_to_init
(
struct
task_struct
*
p
)
{
p
->
cap_effective
=
CAP_INIT_EFF_SET
;
...
...
@@ -278,7 +272,6 @@ EXPORT_SYMBOL(cap_capset_set);
EXPORT_SYMBOL
(
cap_bprm_set_security
);
EXPORT_SYMBOL
(
cap_bprm_compute_creds
);
EXPORT_SYMBOL
(
cap_task_post_setuid
);
EXPORT_SYMBOL
(
cap_task_kmod_set_label
);
EXPORT_SYMBOL
(
cap_task_reparent_to_init
);
EXPORT_SYMBOL
(
cap_syslog
);
...
...
@@ -298,7 +291,6 @@ static struct security_operations capability_ops = {
.
bprm_set_security
=
cap_bprm_set_security
,
.
task_post_setuid
=
cap_task_post_setuid
,
.
task_kmod_set_label
=
cap_task_kmod_set_label
,
.
task_reparent_to_init
=
cap_task_reparent_to_init
,
.
syslog
=
cap_syslog
,
...
...
@@ -348,7 +340,7 @@ static void __exit capability_exit (void)
}
}
module_init
(
capability_init
);
security_initcall
(
capability_init
);
module_exit
(
capability_exit
);
MODULE_DESCRIPTION
(
"Standard Linux Capabilities Security Module"
);
...
...
security/dummy.c
View file @
788ec66a
...
...
@@ -308,11 +308,6 @@ static int dummy_inode_permission (struct inode *inode, int mask)
return
0
;
}
static
int
dummy_inode_permission_lite
(
struct
inode
*
inode
,
int
mask
)
{
return
0
;
}
static
int
dummy_inode_setattr
(
struct
dentry
*
dentry
,
struct
iattr
*
iattr
)
{
return
0
;
...
...
@@ -517,11 +512,6 @@ static int dummy_task_prctl (int option, unsigned long arg2, unsigned long arg3,
return
0
;
}
static
void
dummy_task_kmod_set_label
(
void
)
{
return
;
}
static
void
dummy_task_reparent_to_init
(
struct
task_struct
*
p
)
{
p
->
euid
=
p
->
fsuid
=
0
;
...
...
@@ -831,7 +821,6 @@ void security_fixup_ops (struct security_operations *ops)
set_to_dummy_if_null
(
ops
,
inode_readlink
);
set_to_dummy_if_null
(
ops
,
inode_follow_link
);
set_to_dummy_if_null
(
ops
,
inode_permission
);
set_to_dummy_if_null
(
ops
,
inode_permission_lite
);
set_to_dummy_if_null
(
ops
,
inode_setattr
);
set_to_dummy_if_null
(
ops
,
inode_getattr
);
set_to_dummy_if_null
(
ops
,
inode_delete
);
...
...
@@ -871,7 +860,6 @@ void security_fixup_ops (struct security_operations *ops)
set_to_dummy_if_null
(
ops
,
task_wait
);
set_to_dummy_if_null
(
ops
,
task_kill
);
set_to_dummy_if_null
(
ops
,
task_prctl
);
set_to_dummy_if_null
(
ops
,
task_kmod_set_label
);
set_to_dummy_if_null
(
ops
,
task_reparent_to_init
);
set_to_dummy_if_null
(
ops
,
task_to_inode
);
set_to_dummy_if_null
(
ops
,
ipc_permission
);
...
...
security/root_plug.c
View file @
788ec66a
...
...
@@ -94,7 +94,6 @@ static struct security_operations rootplug_security_ops = {
.
bprm_set_security
=
cap_bprm_set_security
,
.
task_post_setuid
=
cap_task_post_setuid
,
.
task_kmod_set_label
=
cap_task_kmod_set_label
,
.
task_reparent_to_init
=
cap_task_reparent_to_init
,
.
bprm_check_security
=
rootplug_bprm_check_security
,
...
...
@@ -135,7 +134,7 @@ static void __exit rootplug_exit (void)
printk
(
KERN_INFO
"Root Plug module removed
\n
"
);
}
module_init
(
rootplug_init
);
security_initcall
(
rootplug_init
);
module_exit
(
rootplug_exit
);
MODULE_DESCRIPTION
(
"Root Plug sample LSM module, written for Linux Journal article"
);
...
...
security/security.c
View file @
788ec66a
...
...
@@ -38,12 +38,22 @@ static inline int verify (struct security_operations *ops)
return
0
;
}
static
void
__init
do_security_initcalls
(
void
)
{
initcall_t
*
call
;
call
=
&
__security_initcall_start
;
while
(
call
<
&
__security_initcall_end
)
{
(
*
call
)();
call
++
;
}
}
/**
* security_scaffolding_startup - initialzes the security scaffolding framework
*
* This should be called early in the kernel initialization sequence.
*/
int
security_scaffolding_startup
(
void
)
int
__init
security_scaffolding_startup
(
void
)
{
printk
(
KERN_INFO
"Security Scaffold v"
SECURITY_SCAFFOLD_VERSION
" initialized
\n
"
);
...
...
@@ -55,6 +65,7 @@ int security_scaffolding_startup (void)
}
security_ops
=
&
dummy_security_ops
;
do_security_initcalls
();
return
0
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment