Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
312b2eaf
Commit
312b2eaf
authored
Feb 12, 2003
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/scratch/anton/linux-2.5
into samba.org:/scratch/anton/sfr
parents
3c87bf0a
2b009dcf
Changes
34
Hide whitespace changes
Inline
Side-by-side
Showing
34 changed files
with
228 additions
and
281 deletions
+228
-281
MAINTAINERS
MAINTAINERS
+7
-0
arch/i386/kernel/apic.c
arch/i386/kernel/apic.c
+8
-6
arch/i386/kernel/io_apic.c
arch/i386/kernel/io_apic.c
+5
-1
arch/i386/kernel/smpboot.c
arch/i386/kernel/smpboot.c
+0
-15
arch/i386/mach-voyager/voyager_smp.c
arch/i386/mach-voyager/voyager_smp.c
+5
-8
arch/i386/mm/hugetlbpage.c
arch/i386/mm/hugetlbpage.c
+12
-0
arch/ia64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
+12
-0
arch/sparc64/mm/hugetlbpage.c
arch/sparc64/mm/hugetlbpage.c
+12
-0
arch/x86_64/mm/hugetlbpage.c
arch/x86_64/mm/hugetlbpage.c
+12
-0
drivers/block/DAC960.c
drivers/block/DAC960.c
+11
-6
drivers/ide/pci/amd74xx.c
drivers/ide/pci/amd74xx.c
+2
-1
drivers/ide/pci/amd74xx.h
drivers/ide/pci/amd74xx.h
+14
-0
drivers/net/3c509.c
drivers/net/3c509.c
+40
-28
drivers/net/3c59x.c
drivers/net/3c59x.c
+5
-1
drivers/net/Space.c
drivers/net/Space.c
+0
-3
fs/buffer.c
fs/buffer.c
+4
-20
fs/ext3/inode.c
fs/ext3/inode.c
+1
-4
fs/ext3/super.c
fs/ext3/super.c
+0
-2
fs/jbd/journal.c
fs/jbd/journal.c
+0
-1
fs/jbd/recovery.c
fs/jbd/recovery.c
+6
-10
fs/jbd/transaction.c
fs/jbd/transaction.c
+15
-126
include/linux/ext3_jbd.h
include/linux/ext3_jbd.h
+0
-8
include/linux/hugetlb.h
include/linux/hugetlb.h
+2
-0
include/linux/jbd.h
include/linux/jbd.h
+0
-1
include/linux/jiffies.h
include/linux/jiffies.h
+4
-14
include/linux/sched.h
include/linux/sched.h
+1
-0
kernel/exit.c
kernel/exit.c
+1
-1
kernel/kmod.c
kernel/kmod.c
+1
-9
kernel/ksyms.c
kernel/ksyms.c
+3
-0
kernel/sys.c
kernel/sys.c
+2
-11
kernel/time.c
kernel/time.c
+14
-1
kernel/user.c
kernel/user.c
+17
-0
lib/radix-tree.c
lib/radix-tree.c
+1
-2
mm/mmap.c
mm/mmap.c
+11
-2
No files found.
MAINTAINERS
View file @
312b2eaf
...
...
@@ -460,6 +460,13 @@ M: henrique@cyclades.com
W: http://www.cyclades.com/
S: Supported
DAC960 RAID CONTROLLER DRIVER
P: Dave Olien
M dmo@osdl.org
W: http://www.osdl.org/archive/dmo/DAC960
L: linux-kernel@vger.kernel.org
S: Maintained
DAMA SLAVE for AX.25
P: Joerg Reuter
M: jreuter@yaina.de
...
...
arch/i386/kernel/apic.c
View file @
312b2eaf
...
...
@@ -52,7 +52,7 @@ int using_apic_timer = 0;
int
prof_multiplier
[
NR_CPUS
]
=
{
1
,
};
int
prof_old_multiplier
[
NR_CPUS
]
=
{
1
,
};
int
prof_counter
[
NR_CPUS
]
=
{
1
,
}
;
DEFINE_PER_CPU
(
int
,
prof_counter
)
=
1
;
int
get_maxlvt
(
void
)
{
...
...
@@ -997,7 +997,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile
(
regs
);
if
(
--
p
rof_counter
[
cpu
]
<=
0
)
{
if
(
--
p
er_cpu
(
prof_counter
,
cpu
)
<=
0
)
{
/*
* The multiplier may have changed since the last time we got
* to this point as a result of the user writing to
...
...
@@ -1006,10 +1006,12 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
*
* Interrupts are already masked off at this point.
*/
prof_counter
[
cpu
]
=
prof_multiplier
[
cpu
];
if
(
prof_counter
[
cpu
]
!=
prof_old_multiplier
[
cpu
])
{
__setup_APIC_LVTT
(
calibration_result
/
prof_counter
[
cpu
]);
prof_old_multiplier
[
cpu
]
=
prof_counter
[
cpu
];
per_cpu
(
prof_counter
,
cpu
)
=
prof_multiplier
[
cpu
];
if
(
per_cpu
(
prof_counter
,
cpu
)
!=
prof_old_multiplier
[
cpu
])
{
__setup_APIC_LVTT
(
calibration_result
/
per_cpu
(
prof_counter
,
cpu
));
prof_old_multiplier
[
cpu
]
=
per_cpu
(
prof_counter
,
cpu
);
}
#ifdef CONFIG_SMP
...
...
arch/i386/kernel/io_apic.c
View file @
312b2eaf
...
...
@@ -1440,7 +1440,8 @@ void disable_IO_APIC(void)
* by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
*/
static
void
__init
setup_ioapic_ids_from_mpc
(
void
)
#ifndef CONFIG_X86_NUMAQ
static
void
__init
setup_ioapic_ids_from_mpc
(
void
)
{
struct
IO_APIC_reg_00
reg_00
;
unsigned
long
phys_id_present_map
;
...
...
@@ -1533,6 +1534,9 @@ static void __init setup_ioapic_ids_from_mpc (void)
printk
(
" ok.
\n
"
);
}
}
#else
static
void
__init
setup_ioapic_ids_from_mpc
(
void
)
{
}
#endif
/*
* There is a nasty bug in some older SMP boards, their mptable lies
...
...
arch/i386/kernel/smpboot.c
View file @
312b2eaf
...
...
@@ -935,10 +935,6 @@ static void smp_tune_scheduling (void)
* Cycle through the processors sending APIC IPIs to boot each.
*/
extern
int
prof_multiplier
[
NR_CPUS
];
extern
int
prof_old_multiplier
[
NR_CPUS
];
extern
int
prof_counter
[
NR_CPUS
];
static
int
boot_cpu_logical_apicid
;
/* Where the IO area was mapped on multiquad, always 0 otherwise */
void
*
xquad_portio
;
...
...
@@ -949,17 +945,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
{
int
apicid
,
cpu
,
bit
;
/*
* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
*/
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
{
prof_counter
[
cpu
]
=
1
;
prof_old_multiplier
[
cpu
]
=
1
;
prof_multiplier
[
cpu
]
=
1
;
}
/*
* Setup boot CPU information
*/
...
...
arch/i386/mach-voyager/voyager_smp.c
View file @
312b2eaf
...
...
@@ -236,7 +236,7 @@ static __u32 trampoline_base;
/* The per cpu profile stuff - used in smp_local_timer_interrupt */
static
unsigned
int
prof_multiplier
[
NR_CPUS
]
__cacheline_aligned
=
{
1
,
};
static
unsigned
int
prof_old_multiplier
[
NR_CPUS
]
__cacheline_aligned
=
{
1
,
};
static
unsigned
int
prof_counter
[
NR_CPUS
]
__cacheline_aligned
=
{
1
,
}
;
static
DEFINE_PER_CPU
(
unsigned
int
,
prof_counter
)
=
1
;
/* the map used to check if a CPU has booted */
static
__u32
cpu_booted_map
;
...
...
@@ -393,9 +393,6 @@ find_smp_config(void)
/* initialize the CPU structures (moved from smp_boot_cpus) */
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
prof_counter
[
i
]
=
1
;
prof_old_multiplier
[
i
]
=
1
;
prof_multiplier
[
i
]
=
1
;
cpu_irq_affinity
[
i
]
=
~
0
;
}
cpu_online_map
=
(
1
<<
boot_cpu_id
);
...
...
@@ -1312,7 +1309,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile
(
regs
);
if
(
--
p
rof_counter
[
cpu
]
<=
0
)
{
if
(
--
p
er_cpu
(
prof_counter
,
cpu
)
<=
0
)
{
/*
* The multiplier may have changed since the last time we got
* to this point as a result of the user writing to
...
...
@@ -1321,10 +1318,10 @@ smp_local_timer_interrupt(struct pt_regs * regs)
*
* Interrupts are already masked off at this point.
*/
p
rof_counter
[
cpu
]
=
prof_multiplier
[
cpu
];
if
(
p
rof_counter
[
cpu
]
!=
prof_old_multiplier
[
cpu
])
{
p
er_cpu
(
prof_counter
,
cpu
)
=
prof_multiplier
[
cpu
];
if
(
p
er_cpu
(
prof_counter
,
cpu
)
!=
prof_old_multiplier
[
cpu
])
{
/* FIXME: need to update the vic timer tick here */
prof_old_multiplier
[
cpu
]
=
p
rof_counter
[
cpu
]
;
prof_old_multiplier
[
cpu
]
=
p
er_cpu
(
prof_counter
,
cpu
)
;
}
update_process_times
(
user_mode
(
regs
));
...
...
arch/i386/mm/hugetlbpage.c
View file @
312b2eaf
...
...
@@ -88,6 +88,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
set_pte
(
page_table
,
entry
);
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int
is_aligned_hugepage_range
(
unsigned
long
addr
,
unsigned
long
len
)
{
if
(
len
&
~
HPAGE_MASK
)
return
-
EINVAL
;
if
(
addr
&
~
HPAGE_MASK
)
return
-
EINVAL
;
return
0
;
}
int
copy_hugetlb_page_range
(
struct
mm_struct
*
dst
,
struct
mm_struct
*
src
,
struct
vm_area_struct
*
vma
)
{
...
...
arch/ia64/mm/hugetlbpage.c
View file @
312b2eaf
...
...
@@ -96,6 +96,18 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
return
;
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int
is_aligned_hugepage_range
(
unsigned
long
addr
,
unsigned
long
len
)
{
if
(
len
&
~
HPAGE_MASK
)
return
-
EINVAL
;
if
(
addr
&
~
HPAGE_MASK
)
return
-
EINVAL
;
return
0
;
}
int
copy_hugetlb_page_range
(
struct
mm_struct
*
dst
,
struct
mm_struct
*
src
,
struct
vm_area_struct
*
vma
)
{
...
...
arch/sparc64/mm/hugetlbpage.c
View file @
312b2eaf
...
...
@@ -232,6 +232,18 @@ make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
return
-
1
;
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int
is_aligned_hugepage_range
(
unsigned
long
addr
,
unsigned
long
len
)
{
if
(
len
&
~
HPAGE_MASK
)
return
-
EINVAL
;
if
(
addr
&
~
HPAGE_MASK
)
return
-
EINVAL
;
return
0
;
}
int
copy_hugetlb_page_range
(
struct
mm_struct
*
dst
,
struct
mm_struct
*
src
,
struct
vm_area_struct
*
vma
)
{
...
...
arch/x86_64/mm/hugetlbpage.c
View file @
312b2eaf
...
...
@@ -86,6 +86,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
set_pte
(
page_table
,
entry
);
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int
is_aligned_hugepage_range
(
unsigned
long
addr
,
unsigned
long
len
)
{
if
(
len
&
~
HPAGE_MASK
)
return
-
EINVAL
;
if
(
addr
&
~
HPAGE_MASK
)
return
-
EINVAL
;
return
0
;
}
int
copy_hugetlb_page_range
(
struct
mm_struct
*
dst
,
struct
mm_struct
*
src
,
struct
vm_area_struct
*
vma
)
...
...
drivers/block/DAC960.c
View file @
312b2eaf
...
...
@@ -1731,12 +1731,17 @@ static boolean DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
if
(
!
DAC960_V2_NewLogicalDeviceInfo
(
Controller
,
LogicalDeviceNumber
))
break
;
LogicalDeviceNumber
=
NewLogicalDeviceInfo
->
LogicalDeviceNumber
;
if
(
LogicalDeviceNumber
>
DAC960_MaxLogicalDrives
)
panic
(
"DAC960: Logical Drive Number %d not supported
\n
"
,
LogicalDeviceNumber
);
if
(
NewLogicalDeviceInfo
->
DeviceBlockSizeInBytes
!=
DAC960_BlockSize
)
panic
(
"DAC960: Logical Drive Block Size %d not supported
\n
"
,
NewLogicalDeviceInfo
->
DeviceBlockSizeInBytes
);
if
(
LogicalDeviceNumber
>=
DAC960_MaxLogicalDrives
)
{
DAC960_Error
(
"DAC960: Logical Drive Number %d not supported
\n
"
,
Controller
,
LogicalDeviceNumber
);
break
;
}
if
(
NewLogicalDeviceInfo
->
DeviceBlockSizeInBytes
!=
DAC960_BlockSize
)
{
DAC960_Error
(
"DAC960: Logical Drive Block Size %d not supported
\n
"
,
Controller
,
NewLogicalDeviceInfo
->
DeviceBlockSizeInBytes
);
LogicalDeviceNumber
++
;
continue
;
}
PhysicalDevice
.
Controller
=
0
;
PhysicalDevice
.
Channel
=
NewLogicalDeviceInfo
->
Channel
;
PhysicalDevice
.
TargetID
=
NewLogicalDeviceInfo
->
TargetID
;
...
...
drivers/ide/pci/amd74xx.c
View file @
312b2eaf
...
...
@@ -60,7 +60,7 @@ static struct amd_ide_chip {
{
PCI_DEVICE_ID_AMD_OPUS_7441
,
0x00
,
0x40
,
AMD_UDMA_100
},
/* AMD-768 Opus */
{
PCI_DEVICE_ID_AMD_8111_IDE
,
0x00
,
0x40
,
AMD_UDMA_100
},
/* AMD-8111 */
{
PCI_DEVICE_ID_NVIDIA_NFORCE_IDE
,
0x00
,
0x50
,
AMD_UDMA_100
},
/* nVidia nForce */
{
PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE
,
0x00
,
0x50
,
AMD_UDMA_100
},
/* nVidia nForce 2 */
{
0
}
};
...
...
@@ -446,6 +446,7 @@ static struct pci_device_id amd74xx_pci_tbl[] __devinitdata = {
{
PCI_VENDOR_ID_AMD
,
PCI_DEVICE_ID_AMD_OPUS_7441
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
3
},
{
PCI_VENDOR_ID_AMD
,
PCI_DEVICE_ID_AMD_8111_IDE
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
4
},
{
PCI_VENDOR_ID_NVIDIA
,
PCI_DEVICE_ID_NVIDIA_NFORCE_IDE
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
5
},
{
PCI_VENDOR_ID_NVIDIA
,
PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
6
},
{
0
,
},
};
...
...
drivers/ide/pci/amd74xx.h
View file @
312b2eaf
...
...
@@ -110,6 +110,20 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
.
bootable
=
ON_BOARD
,
.
extra
=
0
,
},
{
/* 6 */
.
vendor
=
PCI_VENDOR_ID_NVIDIA
,
.
device
=
PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE
,
.
name
=
"NFORCE2"
,
.
init_chipset
=
init_chipset_amd74xx
,
.
init_iops
=
NULL
,
.
init_hwif
=
init_hwif_amd74xx
,
.
init_dma
=
init_dma_amd74xx
,
.
channels
=
2
,
.
autodma
=
AUTODMA
,
.
enablebits
=
{{
0x50
,
0x01
,
0x01
},
{
0x50
,
0x02
,
0x02
}},
.
bootable
=
ON_BOARD
,
.
extra
=
0
,
},
{
.
vendor
=
0
,
.
device
=
0
,
...
...
drivers/net/3c509.c
View file @
312b2eaf
...
...
@@ -338,16 +338,6 @@ static int __init el3_common_init (struct net_device *dev)
dev
->
watchdog_timeo
=
TX_TIMEOUT
;
dev
->
do_ioctl
=
netdev_ioctl
;
#ifdef CONFIG_PM
/* register power management */
lp
->
pmdev
=
pm_register
(
PM_ISA_DEV
,
card_idx
,
el3_pm_callback
);
if
(
lp
->
pmdev
)
{
struct
pm_dev
*
p
;
p
=
lp
->
pmdev
;
p
->
data
=
(
struct
net_device
*
)
dev
;
}
#endif
return
0
;
}
...
...
@@ -417,6 +407,13 @@ static int __init el3_probe(int card_idx)
phys_addr
[
j
]
=
htons
(
read_eeprom
(
ioaddr
,
j
));
if_port
=
read_eeprom
(
ioaddr
,
8
)
>>
14
;
if
(
!
(
dev
=
init_etherdev
(
NULL
,
sizeof
(
struct
el3_private
))))
{
release_region
(
ioaddr
,
EL3_IO_EXTENT
);
pnp_device_detach
(
idev
);
return
-
ENOMEM
;
}
SET_MODULE_OWNER
(
dev
);
pnp_cards
++
;
goto
found
;
}
...
...
@@ -497,24 +494,29 @@ static int __init el3_probe(int card_idx)
}
irq
=
id_read_eeprom
(
9
)
>>
12
;
#if 0 /* Huh ?
Can someone explain what is this for ? */
if (dev) { /* Set passed-in IRQ or I/O Addr. */
if (dev->irq > 1 && dev->irq < 16)
if
(
!
(
dev
=
init_etherdev
(
NULL
,
sizeof
(
struct
el3_private
))))
return
-
ENOMEM
;
SET_MODULE_OWNER
(
dev
);
/* Set passed-in IRQ or I/O Addr. */
if
(
dev
->
irq
>
1
&&
dev
->
irq
<
16
)
irq
=
dev
->
irq
;
if (dev->base_addr) {
if
(
dev
->
base_addr
)
{
if
(
dev
->
mem_end
==
0x3c509
/* Magic key */
&&
dev
->
base_addr
>=
0x200
&&
dev
->
base_addr
<=
0x3e0
)
ioaddr = dev->base_addr & 0x3f0;
else if (dev->base_addr != ioaddr)
return -ENODEV;
}
ioaddr
=
dev
->
base_addr
&
0x3f0
;
else
if
(
dev
->
base_addr
!=
ioaddr
)
{
unregister_netdev
(
dev
);
return
-
ENODEV
;
}
}
#endif
if
(
!
request_region
(
ioaddr
,
EL3_IO_EXTENT
,
"3c509"
))
return
-
EBUSY
;
if
(
!
request_region
(
ioaddr
,
EL3_IO_EXTENT
,
"3c509"
))
{
unregister_netdev
(
dev
);
return
-
EBUSY
;
}
/* Set the adaptor tag so that the next card can be found. */
outb
(
0xd0
+
++
current_tag
,
id_port
);
...
...
@@ -524,6 +526,7 @@ static int __init el3_probe(int card_idx)
EL3WINDOW
(
0
);
if
(
inw
(
ioaddr
)
!=
0x6d50
)
{
unregister_netdev
(
dev
);
release_region
(
ioaddr
,
EL3_IO_EXTENT
);
return
-
ENODEV
;
}
...
...
@@ -531,12 +534,9 @@ static int __init el3_probe(int card_idx)
/* Free the interrupt so that some other card can use it. */
outw
(
0x0f00
,
ioaddr
+
WN0_IRQ
);
dev
=
init_etherdev
(
NULL
,
sizeof
(
struct
el3_private
));
if
(
dev
==
NULL
)
{
release_region
(
ioaddr
,
EL3_IO_EXTENT
);
return
-
ENOMEM
;
}
SET_MODULE_OWNER
(
dev
);
#ifdef __ISAPNP__
found:
/* PNP jumps here... */
#endif
/* __ISAPNP__ */
memcpy
(
dev
->
dev_addr
,
phys_addr
,
sizeof
(
phys_addr
));
dev
->
base_addr
=
ioaddr
;
...
...
@@ -547,6 +547,16 @@ static int __init el3_probe(int card_idx)
lp
->
dev
=
&
idev
->
dev
;
#endif
#ifdef CONFIG_PM
/* register power management */
lp
->
pmdev
=
pm_register
(
PM_ISA_DEV
,
card_idx
,
el3_pm_callback
);
if
(
lp
->
pmdev
)
{
struct
pm_dev
*
p
;
p
=
lp
->
pmdev
;
p
->
data
=
(
struct
net_device
*
)
dev
;
}
#endif
return
el3_common_init
(
dev
);
}
...
...
@@ -667,6 +677,7 @@ static int __init el3_eisa_probe (struct device *device)
}
#endif
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
/* This remove works for all device types.
*
* The net dev must be stored in the driver_data field */
...
...
@@ -679,6 +690,7 @@ static int __devexit el3_device_remove (struct device *device)
el3_common_remove
(
dev
);
return
0
;
}
#endif
/* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
...
...
drivers/net/3c59x.c
View file @
312b2eaf
...
...
@@ -181,7 +181,7 @@
- See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details.
- Also see Documentation/networking/vortex.txt
LK1.1.19 10Nov0
9
Marc Zyngier <maz@wild-wind.fr.eu.org>
LK1.1.19 10Nov0
2
Marc Zyngier <maz@wild-wind.fr.eu.org>
- EISA sysfs integration.
*/
...
...
@@ -817,7 +817,11 @@ struct vortex_private {
u32
power_state
[
16
];
};
#ifdef CONFIG_PCI
#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
#else
#define DEVICE_PCI(dev) NULL
#endif
#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
...
...
drivers/net/Space.c
View file @
312b2eaf
...
...
@@ -224,9 +224,6 @@ static struct devprobe isa_probes[] __initdata = {
#ifdef CONFIG_EL2
/* 3c503 */
{
el2_probe
,
0
},
#endif
#ifdef CONFIG_EL3
{
el3_probe
,
0
},
#endif
#ifdef CONFIG_HPLAN
{
hp_probe
,
0
},
#endif
...
...
fs/buffer.c
View file @
312b2eaf
...
...
@@ -31,7 +31,6 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/mempool.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
...
...
@@ -2791,7 +2790,6 @@ asmlinkage long sys_bdflush(int func, long data)
* Buffer-head allocation
*/
static
kmem_cache_t
*
bh_cachep
;
static
mempool_t
*
bh_mempool
;
/*
* Once the number of bh's in the machine exceeds this level, we start
...
...
@@ -2825,7 +2823,7 @@ static void recalc_bh_state(void)
struct
buffer_head
*
alloc_buffer_head
(
void
)
{
struct
buffer_head
*
ret
=
mempool_alloc
(
bh_mempool
,
GFP_NOFS
);
struct
buffer_head
*
ret
=
kmem_cache_alloc
(
bh_cachep
,
GFP_NOFS
);
if
(
ret
)
{
preempt_disable
();
__get_cpu_var
(
bh_accounting
).
nr
++
;
...
...
@@ -2839,7 +2837,7 @@ EXPORT_SYMBOL(alloc_buffer_head);
void
free_buffer_head
(
struct
buffer_head
*
bh
)
{
BUG_ON
(
!
list_empty
(
&
bh
->
b_assoc_buffers
));
mempool_free
(
bh
,
bh_mempool
);
kmem_cache_free
(
bh_cachep
,
bh
);
preempt_disable
();
__get_cpu_var
(
bh_accounting
).
nr
--
;
recalc_bh_state
();
...
...
@@ -2847,7 +2845,8 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL
(
free_buffer_head
);
static
void
init_buffer_head
(
void
*
data
,
kmem_cache_t
*
cachep
,
unsigned
long
flags
)
static
void
init_buffer_head
(
void
*
data
,
kmem_cache_t
*
cachep
,
unsigned
long
flags
)
{
if
((
flags
&
(
SLAB_CTOR_VERIFY
|
SLAB_CTOR_CONSTRUCTOR
))
==
SLAB_CTOR_CONSTRUCTOR
)
{
...
...
@@ -2858,19 +2857,6 @@ static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long fla
}
}
static
void
*
bh_mempool_alloc
(
int
gfp_mask
,
void
*
pool_data
)
{
return
kmem_cache_alloc
(
bh_cachep
,
gfp_mask
);
}
static
void
bh_mempool_free
(
void
*
element
,
void
*
pool_data
)
{
return
kmem_cache_free
(
bh_cachep
,
element
);
}
#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20
static
void
buffer_init_cpu
(
int
cpu
)
{
struct
bh_accounting
*
bha
=
&
per_cpu
(
bh_accounting
,
cpu
);
...
...
@@ -2907,8 +2893,6 @@ void __init buffer_init(void)
bh_cachep
=
kmem_cache_create
(
"buffer_head"
,
sizeof
(
struct
buffer_head
),
0
,
0
,
init_buffer_head
,
NULL
);
bh_mempool
=
mempool_create
(
MAX_UNUSED_BUFFERS
,
bh_mempool_alloc
,
bh_mempool_free
,
NULL
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
bh_wait_queue_heads
);
i
++
)
init_waitqueue_head
(
&
bh_wait_queue_heads
[
i
].
wqh
);
...
...
fs/ext3/inode.c
View file @
312b2eaf
...
...
@@ -1317,10 +1317,7 @@ static int ext3_writepage(struct page *page, struct writeback_control *wbc)
goto
out_fail
;
needed
=
ext3_writepage_trans_blocks
(
inode
);
if
(
wbc
->
for_reclaim
)
handle
=
ext3_journal_try_start
(
inode
,
needed
);
else
handle
=
ext3_journal_start
(
inode
,
needed
);
handle
=
ext3_journal_start
(
inode
,
needed
);
if
(
IS_ERR
(
handle
))
{
ret
=
PTR_ERR
(
handle
);
...
...
fs/ext3/super.c
View file @
312b2eaf
...
...
@@ -1343,9 +1343,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* superblock lock.
*/
EXT3_SB
(
sb
)
->
s_mount_state
|=
EXT3_ORPHAN_FS
;
unlock_super
(
sb
);
/* akpm: sigh */
ext3_orphan_cleanup
(
sb
,
es
);
lock_super
(
sb
);
EXT3_SB
(
sb
)
->
s_mount_state
&=
~
EXT3_ORPHAN_FS
;
if
(
needs_recovery
)
printk
(
KERN_INFO
"EXT3-fs: recovery complete.
\n
"
);
...
...
fs/jbd/journal.c
View file @
312b2eaf
...
...
@@ -38,7 +38,6 @@
#include <linux/proc_fs.h>
EXPORT_SYMBOL
(
journal_start
);
EXPORT_SYMBOL
(
journal_try_start
);
EXPORT_SYMBOL
(
journal_restart
);
EXPORT_SYMBOL
(
journal_extend
);
EXPORT_SYMBOL
(
journal_stop
);
...
...
fs/jbd/recovery.c
View file @
312b2eaf
...
...
@@ -212,16 +212,14 @@ do { \
*
* The primary function for recovering the log contents when mounting a
* journaled device.
*/
int
journal_recover
(
journal_t
*
journal
)
{
/*
*
* Recovery is done in three passes. In the first pass, we look for the
* end of the log. In the second, we assemble the list of revoke
* blocks. In the third and final pass, we replay any un-revoked blocks
* in the log.
*/
int
journal_recover
(
journal_t
*
journal
)
{
int
err
;
journal_superblock_t
*
sb
;
...
...
@@ -273,15 +271,13 @@ int journal_recover(journal_t *journal)
* journal structures in memory to ignore it (presumably because the
* caller has evidence that it is out of date).
* This function does'nt appear to be exorted..
*/
int
journal_skip_recovery
(
journal_t
*
journal
)
{
/*
*
* We perform one pass over the journal to allow us to tell the user how
* much recovery information is being erased, and to let us initialise
* the journal transaction sequence numbers to the next unused ID.
*/
int
journal_skip_recovery
(
journal_t
*
journal
)
{
int
err
;
journal_superblock_t
*
sb
;
...
...
fs/jbd/transaction.c
View file @
312b2eaf
...
...
@@ -266,113 +266,6 @@ handle_t *journal_start(journal_t *journal, int nblocks)
return
handle
;
}
/*
* Return zero on success
*/
static
int
try_start_this_handle
(
journal_t
*
journal
,
handle_t
*
handle
)
{
transaction_t
*
transaction
;
int
needed
;
int
nblocks
=
handle
->
h_buffer_credits
;
int
ret
=
0
;
jbd_debug
(
3
,
"New handle %p maybe going live.
\n
"
,
handle
);
lock_journal
(
journal
);
if
(
is_journal_aborted
(
journal
)
||
(
journal
->
j_errno
!=
0
&&
!
(
journal
->
j_flags
&
JFS_ACK_ERR
)))
{
ret
=
-
EROFS
;
goto
fail_unlock
;
}
if
(
journal
->
j_barrier_count
)
goto
fail_unlock
;
if
(
!
journal
->
j_running_transaction
&&
get_transaction
(
journal
,
1
)
==
0
)
goto
fail_unlock
;
transaction
=
journal
->
j_running_transaction
;
if
(
transaction
->
t_state
==
T_LOCKED
)
goto
fail_unlock
;
needed
=
transaction
->
t_outstanding_credits
+
nblocks
;
/* We could run log_start_commit here */
if
(
needed
>
journal
->
j_max_transaction_buffers
)
goto
fail_unlock
;
needed
=
journal
->
j_max_transaction_buffers
;
if
(
journal
->
j_committing_transaction
)
needed
+=
journal
->
j_committing_transaction
->
t_outstanding_credits
;
if
(
log_space_left
(
journal
)
<
needed
)
goto
fail_unlock
;
handle
->
h_transaction
=
transaction
;
transaction
->
t_outstanding_credits
+=
nblocks
;
transaction
->
t_updates
++
;
jbd_debug
(
4
,
"Handle %p given %d credits (total %d, free %d)
\n
"
,
handle
,
nblocks
,
transaction
->
t_outstanding_credits
,
log_space_left
(
journal
));
unlock_journal
(
journal
);
return
0
;
fail_unlock:
unlock_journal
(
journal
);
if
(
ret
>=
0
)
ret
=
-
1
;
return
ret
;
}
/**
* handle_t *journal_try_start() - Don't block, but try and get a handle
* @journal: Journal to start transaction on.
* @nblocks: number of block buffer we might modify
*
* Try to start a handle, but non-blockingly. If we weren't able
* to, return an ERR_PTR value.
*/
handle_t
*
journal_try_start
(
journal_t
*
journal
,
int
nblocks
)
{
handle_t
*
handle
=
journal_current_handle
();
int
err
;
if
(
!
journal
)
return
ERR_PTR
(
-
EROFS
);
if
(
handle
)
{
jbd_debug
(
4
,
"h_ref %d -> %d
\n
"
,
handle
->
h_ref
,
handle
->
h_ref
+
1
);
J_ASSERT
(
handle
->
h_transaction
->
t_journal
==
journal
);
if
(
is_handle_aborted
(
handle
))
return
ERR_PTR
(
-
EIO
);
handle
->
h_ref
++
;
return
handle
;
}
else
{
jbd_debug
(
4
,
"no current transaction
\n
"
);
}
if
(
is_journal_aborted
(
journal
))
return
ERR_PTR
(
-
EIO
);
handle
=
new_handle
(
nblocks
);
if
(
!
handle
)
return
ERR_PTR
(
-
ENOMEM
);
current
->
journal_info
=
handle
;
err
=
try_start_this_handle
(
journal
,
handle
);
if
(
err
<
0
)
{
kfree
(
handle
);
current
->
journal_info
=
NULL
;
return
ERR_PTR
(
err
);
}
return
handle
;
}
/**
* int journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
...
...
@@ -969,22 +862,23 @@ int journal_get_undo_access (handle_t *handle, struct buffer_head *bh)
}
/**
* int journal_dirty_data() - mark a buffer as containing dirty data which needs to be flushed before we can commit the current transaction.
* int journal_dirty_data() - mark a buffer as containing dirty data which
* needs to be flushed before we can commit the
* current transaction.
* @handle: transaction
* @bh: bufferhead to mark
*
* The buffer is placed on the transaction's data list and is marked as
* belonging to the transaction.
*
* Returns error number or 0 on success.
*/
int
journal_dirty_data
(
handle_t
*
handle
,
struct
buffer_head
*
bh
)
{
/*
* Returns error number or 0 on success.
*
* journal_dirty_data() can be called via page_launder->ext3_writepage
* by kswapd. So it cannot block. Happily, there's nothing here
* which needs lock_journal if `async' is set.
*/
int
journal_dirty_data
(
handle_t
*
handle
,
struct
buffer_head
*
bh
)
{
journal_t
*
journal
=
handle
->
h_transaction
->
t_journal
;
int
need_brelse
=
0
;
struct
journal_head
*
jh
;
...
...
@@ -1129,23 +1023,22 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
* @handle: transaction to add buffer to.
* @bh: buffer to mark
*
* mark dirty metadata which needs to be journaled as part of the current transaction.
* mark dirty metadata which needs to be journaled as part of the current
* transaction.
*
* The buffer is placed on the transaction's metadata list and is marked
* as belonging to the transaction.
*
* Returns error number or 0 on success.
*/
int
journal_dirty_metadata
(
handle_t
*
handle
,
struct
buffer_head
*
bh
)
{
/*
*
* Special care needs to be taken if the buffer already belongs to the
* current committing transaction (in which case we should have frozen
* data present for that commit). In that case, we don't relink the
* buffer: that only gets done when the old transaction finally
* completes its commit.
*
*/
int
journal_dirty_metadata
(
handle_t
*
handle
,
struct
buffer_head
*
bh
)
{
transaction_t
*
transaction
=
handle
->
h_transaction
;
journal_t
*
journal
=
transaction
->
t_journal
;
struct
journal_head
*
jh
=
bh2jh
(
bh
);
...
...
@@ -1726,13 +1619,6 @@ static inline int __journal_try_to_free_buffer(struct buffer_head *bh)
* to be called. We do this if the page is releasable by try_to_free_buffers().
* We also do it if the page has locked or dirty buffers and the caller wants
* us to perform sync or async writeout.
*/
int
journal_try_to_free_buffers
(
journal_t
*
journal
,
struct
page
*
page
,
int
unused_gfp_mask
)
{
/*
* journal_try_to_free_buffers(). Try to remove all this page's buffers
* from the journal.
*
* This complicates JBD locking somewhat. We aren't protected by the
* BKL here. We wish to remove the buffer from its committing or
...
...
@@ -1752,6 +1638,9 @@ int journal_try_to_free_buffers(journal_t *journal,
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*/
int
journal_try_to_free_buffers
(
journal_t
*
journal
,
struct
page
*
page
,
int
unused_gfp_mask
)
{
struct
buffer_head
*
head
;
struct
buffer_head
*
bh
;
int
ret
=
0
;
...
...
include/linux/ext3_jbd.h
View file @
312b2eaf
...
...
@@ -210,14 +210,6 @@ static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
return
journal_start
(
journal
,
nblocks
);
}
static
inline
handle_t
*
ext3_journal_try_start
(
struct
inode
*
inode
,
int
nblocks
)
{
if
(
inode
->
i_sb
->
s_flags
&
MS_RDONLY
)
return
ERR_PTR
(
-
EROFS
);
return
journal_try_start
(
EXT3_JOURNAL
(
inode
),
nblocks
);
}
/*
* The only special thing we need to do here is to make sure that all
* journal_stop calls result in the superblock being marked dirty, so
...
...
include/linux/hugetlb.h
View file @
312b2eaf
...
...
@@ -26,6 +26,7 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
unsigned
long
address
);
struct
page
*
follow_huge_pmd
(
struct
mm_struct
*
mm
,
unsigned
long
address
,
pmd_t
*
pmd
,
int
write
);
int
is_aligned_hugepage_range
(
unsigned
long
addr
,
unsigned
long
len
);
int
pmd_huge
(
pmd_t
pmd
);
extern
int
htlbpage_max
;
...
...
@@ -56,6 +57,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define hugepage_vma(mm, addr) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0
#define pmd_huge(x) 0
#ifndef HPAGE_MASK
...
...
include/linux/jbd.h
View file @
312b2eaf
...
...
@@ -726,7 +726,6 @@ static inline handle_t *journal_current_handle(void)
*/
extern
handle_t
*
journal_start
(
journal_t
*
,
int
nblocks
);
extern
handle_t
*
journal_try_start
(
journal_t
*
,
int
nblocks
);
extern
int
journal_restart
(
handle_t
*
,
int
nblocks
);
extern
int
journal_extend
(
handle_t
*
,
int
nblocks
);
extern
int
journal_get_write_access
(
handle_t
*
,
struct
buffer_head
*
);
...
...
include/linux/jiffies.h
View file @
312b2eaf
...
...
@@ -15,24 +15,14 @@
extern
u64
jiffies_64
;
extern
unsigned
long
volatile
jiffies
;
#if (BITS_PER_LONG < 64)
u64
get_jiffies_64
(
void
);
#else
static
inline
u64
get_jiffies_64
(
void
)
{
#if BITS_PER_LONG < 64
extern
seqlock_t
xtime_lock
;
unsigned
long
seq
;
u64
tmp
;
do
{
seq
=
read_seqbegin
(
&
xtime_lock
);
tmp
=
jiffies_64
;
}
while
(
read_seqretry
(
&
xtime_lock
,
seq
));
return
tmp
;
#else
return
(
u64
)
jiffies
;
#endif
}
#endif
/*
* These inlines deal with timer wrapping correctly. You are
...
...
include/linux/sched.h
View file @
312b2eaf
...
...
@@ -509,6 +509,7 @@ extern void __set_special_pids(pid_t session, pid_t pgrp);
/* per-UID process charging. */
extern
struct
user_struct
*
alloc_uid
(
uid_t
);
extern
void
free_uid
(
struct
user_struct
*
);
extern
void
switch_uid
(
struct
user_struct
*
);
#include <asm/current.h>
...
...
kernel/exit.c
View file @
312b2eaf
...
...
@@ -249,7 +249,7 @@ void reparent_to_init(void)
/* signals? */
security_task_reparent_to_init
(
current
);
memcpy
(
current
->
rlim
,
init_task
.
rlim
,
sizeof
(
*
(
current
->
rlim
)));
current
->
user
=
INIT_USER
;
switch_uid
(
INIT_USER
)
;
write_unlock_irq
(
&
tasklist_lock
);
}
...
...
kernel/kmod.c
View file @
312b2eaf
...
...
@@ -121,15 +121,7 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
if
(
curtask
->
files
->
fd
[
i
])
close
(
i
);
}
/* Drop the "current user" thing */
{
struct
user_struct
*
user
=
curtask
->
user
;
curtask
->
user
=
INIT_USER
;
atomic_inc
(
&
INIT_USER
->
__count
);
atomic_inc
(
&
INIT_USER
->
processes
);
atomic_dec
(
&
user
->
processes
);
free_uid
(
user
);
}
switch_uid
(
INIT_USER
);
/* Give kmod all effective privileges.. */
curtask
->
euid
=
curtask
->
fsuid
=
0
;
...
...
kernel/ksyms.c
View file @
312b2eaf
...
...
@@ -490,6 +490,9 @@ EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL
(
xtime_lock
);
EXPORT_SYMBOL
(
do_gettimeofday
);
EXPORT_SYMBOL
(
do_settimeofday
);
#if (BITS_PER_LONG < 64)
EXPORT_SYMBOL
(
get_jiffies_64
);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
EXPORT_SYMBOL
(
__might_sleep
);
#endif
...
...
kernel/sys.c
View file @
312b2eaf
...
...
@@ -561,19 +561,12 @@ asmlinkage long sys_setgid(gid_t gid)
static
int
set_user
(
uid_t
new_ruid
,
int
dumpclear
)
{
struct
user_struct
*
new_user
,
*
old_user
;
struct
user_struct
*
new_user
;
/* What if a process setreuid()'s and this brings the
* new uid over his NPROC rlimit? We can check this now
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
new_user
=
alloc_uid
(
new_ruid
);
if
(
!
new_user
)
return
-
EAGAIN
;
old_user
=
current
->
user
;
atomic_dec
(
&
old_user
->
processes
);
atomic_inc
(
&
new_user
->
processes
);
switch_uid
(
new_user
);
if
(
dumpclear
)
{
...
...
@@ -581,8 +574,6 @@ static int set_user(uid_t new_ruid, int dumpclear)
wmb
();
}
current
->
uid
=
new_ruid
;
current
->
user
=
new_user
;
free_uid
(
old_user
);
return
0
;
}
...
...
kernel/time.c
View file @
312b2eaf
...
...
@@ -27,7 +27,6 @@
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
/*
...
...
@@ -416,3 +415,17 @@ struct timespec current_kernel_time(void)
return
now
;
}
#if (BITS_PER_LONG < 64)
u64
get_jiffies_64
(
void
)
{
unsigned
long
seq
;
u64
ret
;
do
{
seq
=
read_seqbegin
(
&
xtime_lock
);
ret
=
jiffies_64
;
}
while
(
read_seqretry
(
&
xtime_lock
,
seq
));
return
ret
;
}
#endif
kernel/user.c
View file @
312b2eaf
...
...
@@ -116,6 +116,23 @@ struct user_struct * alloc_uid(uid_t uid)
return
up
;
}
void
switch_uid
(
struct
user_struct
*
new_user
)
{
struct
user_struct
*
old_user
;
/* What if a process setreuid()'s and this brings the
* new uid over his NPROC rlimit? We can check this now
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
old_user
=
current
->
user
;
atomic_inc
(
&
new_user
->
__count
);
atomic_inc
(
&
new_user
->
processes
);
atomic_dec
(
&
old_user
->
processes
);
current
->
user
=
new_user
;
free_uid
(
old_user
);
}
static
int
__init
uid_cache_init
(
void
)
{
...
...
lib/radix-tree.c
View file @
312b2eaf
...
...
@@ -154,8 +154,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Increase the height. */
node
->
slots
[
0
]
=
root
->
rnode
;
if
(
root
->
rnode
)
node
->
count
=
1
;
node
->
count
=
1
;
root
->
rnode
=
node
;
root
->
height
++
;
}
while
(
height
>
root
->
height
);
...
...
mm/mmap.c
View file @
312b2eaf
...
...
@@ -801,6 +801,13 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return
-
ENOMEM
;
if
(
addr
&
~
PAGE_MASK
)
return
-
EINVAL
;
if
(
is_file_hugepages
(
file
))
{
unsigned
long
ret
;
ret
=
is_aligned_hugepage_range
(
addr
,
len
);
if
(
ret
)
return
ret
;
}
return
addr
;
}
...
...
@@ -1224,8 +1231,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* we have start < mpnt->vm_end */
if
(
is_vm_hugetlb_page
(
mpnt
))
{
if
((
start
&
~
HPAGE_MASK
)
||
(
len
&
~
HPAGE_MASK
))
return
-
EINVAL
;
int
ret
=
is_aligned_hugepage_range
(
start
,
len
);
if
(
ret
)
return
ret
;
}
/* if it doesn't overlap, we have nothing.. */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment