Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a97c3b80
Commit
a97c3b80
authored
Feb 06, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge penguin:v2.5/linux
into athlon.transmeta.com:/home/torvalds/v2.5/linux
parents
8e018235
748261c0
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
125 additions
and
54 deletions
+125
-54
drivers/scsi/scsi.c
drivers/scsi/scsi.c
+8
-9
fs/bio.c
fs/bio.c
+13
-13
fs/dcache.c
fs/dcache.c
+1
-0
fs/file_table.c
fs/file_table.c
+14
-0
fs/namespace.c
fs/namespace.c
+3
-9
fs/nfs/inode.c
fs/nfs/inode.c
+2
-0
fs/ntfs/fs.c
fs/ntfs/fs.c
+1
-1
fs/udf/super.c
fs/udf/super.c
+1
-1
include/linux/fs.h
include/linux/fs.h
+9
-2
mm/bootmem.c
mm/bootmem.c
+13
-2
mm/slab.c
mm/slab.c
+60
-17
No files found.
drivers/scsi/scsi.c
View file @
a97c3b80
...
...
@@ -87,13 +87,16 @@ static void scsi_dump_status(int level);
struct
scsi_host_sg_pool
{
int
size
;
char
*
name
;
kmem_cache_t
*
slab
;
mempool_t
*
pool
;
};
static
const
int
scsi_host_sg_pool_sizes
[
SG_MEMPOOL_NR
]
=
{
8
,
16
,
32
,
64
,
MAX_PHYS_SEGMENTS
};
struct
scsi_host_sg_pool
scsi_sg_pools
[
SG_MEMPOOL_NR
];
#define SP(x) { x, "sgpool-" #x }
struct
scsi_host_sg_pool
scsi_sg_pools
[
SG_MEMPOOL_NR
]
=
{
SP
(
8
),
SP
(
16
),
SP
(
32
),
SP
(
64
),
SP
(
MAX_PHYS_SEGMENTS
)
};
#undef SP
/*
static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
*/
...
...
@@ -2489,7 +2492,6 @@ void scsi_free_sgtable(struct scatterlist *sgl, int index)
static
int
__init
init_scsi
(
void
)
{
struct
proc_dir_entry
*
generic
;
char
name
[
16
];
int
i
;
printk
(
KERN_INFO
"SCSI subsystem driver "
REVISION
"
\n
"
);
...
...
@@ -2499,18 +2501,15 @@ static int __init init_scsi(void)
*/
for
(
i
=
0
;
i
<
SG_MEMPOOL_NR
;
i
++
)
{
struct
scsi_host_sg_pool
*
sgp
=
scsi_sg_pools
+
i
;
int
size
=
s
csi_host_sg_pool_sizes
[
i
]
*
sizeof
(
struct
scatterlist
);
int
size
=
s
gp
->
size
*
sizeof
(
struct
scatterlist
);
snprintf
(
name
,
sizeof
(
name
)
-
1
,
"sgpool-%d"
,
scsi_host_sg_pool_sizes
[
i
]);
sgp
->
slab
=
kmem_cache_create
(
name
,
size
,
0
,
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
);
sgp
->
slab
=
kmem_cache_create
(
sgp
->
name
,
size
,
0
,
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
);
if
(
!
sgp
->
slab
)
panic
(
"SCSI: can't init sg slab
\n
"
);
sgp
->
pool
=
mempool_create
(
SG_MEMPOOL_SIZE
,
scsi_pool_alloc
,
scsi_pool_free
,
sgp
->
slab
);
if
(
!
sgp
->
pool
)
panic
(
"SCSI: can't init sg mempool
\n
"
);
sgp
->
size
=
size
;
}
/*
...
...
fs/bio.c
View file @
a97c3b80
...
...
@@ -33,20 +33,24 @@ static kmem_cache_t *bio_slab;
struct
biovec_pool
{
int
size
;
char
*
name
;
kmem_cache_t
*
slab
;
mempool_t
*
pool
;
};
static
struct
biovec_pool
bvec_array
[
BIOVEC_NR_POOLS
];
/*
* if you change this list, also change bvec_alloc or things will
* break badly! cannot be bigger than what you can fit into an
* unsigned short
*/
static
const
int
bvec_pool_sizes
[
BIOVEC_NR_POOLS
]
=
{
1
,
4
,
16
,
64
,
128
,
256
};
#define BIO_MAX_PAGES (bvec_pool_sizes[BIOVEC_NR_POOLS - 1])
#define BV(x) { x, "biovec-" #x }
static
struct
biovec_pool
bvec_array
[
BIOVEC_NR_POOLS
]
=
{
BV
(
1
),
BV
(
4
),
BV
(
16
),
BV
(
64
),
BV
(
128
),
BV
(
256
)
};
#undef BV
#define BIO_MAX_PAGES (bvec_array[BIOVEC_NR_POOLS - 1].size)
static
void
*
slab_pool_alloc
(
int
gfp_mask
,
void
*
data
)
{
...
...
@@ -64,7 +68,7 @@ static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, int *idx)
struct
bio_vec
*
bvl
;
/*
* see comment near bvec_
pool_sizes
define!
* see comment near bvec_
array
define!
*/
switch
(
nr
)
{
case
1
:
*
idx
=
0
;
break
;
...
...
@@ -452,21 +456,17 @@ int bio_endio(struct bio *bio, int uptodate, int nr_sectors)
static
void
__init
biovec_init_pool
(
void
)
{
char
name
[
16
];
int
i
,
size
;
memset
(
&
bvec_array
,
0
,
sizeof
(
bvec_array
));
for
(
i
=
0
;
i
<
BIOVEC_NR_POOLS
;
i
++
)
{
struct
biovec_pool
*
bp
=
bvec_array
+
i
;
size
=
b
vec_pool_sizes
[
i
]
*
sizeof
(
struct
bio_vec
);
size
=
b
p
->
size
*
sizeof
(
struct
bio_vec
);
printk
(
"biovec: init pool %d, %d entries, %d bytes
\n
"
,
i
,
b
vec_pool_sizes
[
i
]
,
size
);
b
p
->
size
,
size
);
snprintf
(
name
,
sizeof
(
name
)
-
1
,
"biovec-%d"
,
bvec_pool_sizes
[
i
]);
bp
->
slab
=
kmem_cache_create
(
name
,
size
,
0
,
bp
->
slab
=
kmem_cache_create
(
bp
->
name
,
size
,
0
,
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
);
if
(
!
bp
->
slab
)
panic
(
"biovec: can't init slab cache
\n
"
);
...
...
fs/dcache.c
View file @
a97c3b80
...
...
@@ -1283,6 +1283,7 @@ void __init vfs_caches_init(unsigned long mempages)
dcache_init
(
mempages
);
inode_init
(
mempages
);
files_init
(
mempages
);
mnt_init
(
mempages
);
bdev_cache_init
();
cdev_cache_init
();
...
...
fs/file_table.c
View file @
a97c3b80
...
...
@@ -186,3 +186,17 @@ int fs_may_remount_ro(struct super_block *sb)
file_list_unlock
();
return
0
;
}
void
__init
files_init
(
unsigned
long
mempages
)
{
int
n
;
/* One file with associated inode and dcache is very roughly 1K.
* Per default don't use more than 10% of our memory for files.
*/
n
=
(
mempages
*
(
PAGE_SIZE
/
1024
))
/
10
;
files_stat
.
max_files
=
n
;
if
(
files_stat
.
max_files
<
NR_FILE
)
files_stat
.
max_files
=
NR_FILE
;
}
fs/namespace.c
View file @
a97c3b80
...
...
@@ -1048,15 +1048,9 @@ void __init mnt_init(unsigned long mempages)
if
(
!
mnt_cache
)
panic
(
"Cannot create vfsmount cache"
);
mempages
>>=
(
16
-
PAGE_SHIFT
);
mempages
*=
sizeof
(
struct
list_head
);
for
(
order
=
0
;
((
1UL
<<
order
)
<<
PAGE_SHIFT
)
<
mempages
;
order
++
)
;
do
{
order
=
0
;
mount_hashtable
=
(
struct
list_head
*
)
__get_free_pages
(
GFP_ATOMIC
,
order
);
}
while
(
mount_hashtable
==
NULL
&&
--
order
>=
0
);
if
(
!
mount_hashtable
)
panic
(
"Failed to allocate mount hash table
\n
"
);
...
...
fs/nfs/inode.c
View file @
a97c3b80
...
...
@@ -455,6 +455,8 @@ nfs_read_super(struct super_block *sb, void *raw_data, int silent)
server
->
namelen
=
maxlen
;
sb
->
s_maxbytes
=
fsinfo
.
maxfilesize
;
if
(
sb
->
s_maxbytes
>
MAX_LFS_FILESIZE
)
sb
->
s_maxbytes
=
MAX_LFS_FILESIZE
;
/* Fire up the writeback cache */
if
(
nfs_reqlist_alloc
(
server
)
<
0
)
{
...
...
fs/ntfs/fs.c
View file @
a97c3b80
...
...
@@ -1130,7 +1130,7 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
/* Inform the kernel about which super operations are available. */
sb
->
s_op
=
&
ntfs_super_operations
;
sb
->
s_magic
=
NTFS_SUPER_MAGIC
;
sb
->
s_maxbytes
=
~
0ULL
>>
1
;
sb
->
s_maxbytes
=
MAX_LFS_FILESIZE
;
ntfs_debug
(
DEBUG_OTHER
,
"Reading special files
\n
"
);
if
(
ntfs_load_special_files
(
vol
))
{
ntfs_error
(
"Error loading special files
\n
"
);
...
...
fs/udf/super.c
View file @
a97c3b80
...
...
@@ -1544,7 +1544,7 @@ udf_read_super(struct super_block *sb, void *options, int silent)
iput
(
inode
);
goto
error_out
;
}
sb
->
s_maxbytes
=
~
0ULL
;
sb
->
s_maxbytes
=
MAX_LFS_FILESIZE
;
return
sb
;
error_out:
...
...
include/linux/fs.h
View file @
a97c3b80
...
...
@@ -207,6 +207,7 @@ extern void update_atime (struct inode *);
extern
void
buffer_init
(
unsigned
long
);
extern
void
inode_init
(
unsigned
long
);
extern
void
mnt_init
(
unsigned
long
);
extern
void
files_init
(
unsigned
long
);
/* bh state bits */
enum
bh_state_bits
{
...
...
@@ -518,6 +519,14 @@ extern int init_private_file(struct file *, struct dentry *, int);
#define MAX_NON_LFS ((1UL<<31) - 1)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
#elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE 0x7fffffffffffffff
#endif
#define FL_POSIX 1
#define FL_FLOCK 2
#define FL_BROKEN 4
/* broken flock() emulation */
...
...
@@ -1512,8 +1521,6 @@ static inline int is_mounted(kdev_t dev)
}
return
0
;
}
unsigned
long
generate_cluster
(
kdev_t
,
int
b
[],
int
);
unsigned
long
generate_cluster_swab32
(
kdev_t
,
int
b
[],
int
);
extern
kdev_t
ROOT_DEV
;
extern
char
root_device_name
[];
...
...
mm/bootmem.c
View file @
a97c3b80
...
...
@@ -247,18 +247,29 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
bootmem_data_t
*
bdata
=
pgdat
->
bdata
;
unsigned
long
i
,
count
,
total
=
0
;
unsigned
long
idx
;
unsigned
long
*
map
;
if
(
!
bdata
->
node_bootmem_map
)
BUG
();
count
=
0
;
idx
=
bdata
->
node_low_pfn
-
(
bdata
->
node_boot_start
>>
PAGE_SHIFT
);
for
(
i
=
0
;
i
<
idx
;
i
++
,
page
++
)
{
if
(
!
test_bit
(
i
,
bdata
->
node_bootmem_map
))
{
map
=
bdata
->
node_bootmem_map
;
for
(
i
=
0
;
i
<
idx
;
)
{
unsigned
long
v
=
~
map
[
i
/
BITS_PER_LONG
];
if
(
v
)
{
unsigned
long
m
;
for
(
m
=
1
;
m
&&
i
<
idx
;
m
<<=
1
,
page
++
,
i
++
)
{
if
(
v
&
m
)
{
count
++
;
ClearPageReserved
(
page
);
set_page_count
(
page
,
1
);
__free_page
(
page
);
}
}
}
else
{
i
+=
BITS_PER_LONG
;
page
+=
BITS_PER_LONG
;
}
}
total
+=
count
;
...
...
mm/slab.c
View file @
a97c3b80
...
...
@@ -186,8 +186,6 @@ typedef struct cpucache_s {
* manages a cache.
*/
#define CACHE_NAMELEN 20
/* max name length for a slab cache */
struct
kmem_cache_s
{
/* 1) each alloc & free */
/* full, partial first, then free */
...
...
@@ -225,7 +223,7 @@ struct kmem_cache_s {
unsigned
long
failures
;
/* 3) cache creation/removal */
c
har
name
[
CACHE_NAMELEN
]
;
c
onst
char
*
name
;
struct
list_head
next
;
#ifdef CONFIG_SMP
/* 4) per-cpu data */
...
...
@@ -335,6 +333,7 @@ typedef struct cache_sizes {
kmem_cache_t
*
cs_dmacachep
;
}
cache_sizes_t
;
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
static
cache_sizes_t
cache_sizes
[]
=
{
#if PAGE_SIZE == 4096
{
32
,
NULL
,
NULL
},
...
...
@@ -353,6 +352,29 @@ static cache_sizes_t cache_sizes[] = {
{
131072
,
NULL
,
NULL
},
{
0
,
NULL
,
NULL
}
};
/* Must match cache_sizes above. Out of line to keep cache footprint low. */
#define CN(x) { x, x " (DMA)" }
static
struct
{
char
*
name
;
char
*
name_dma
;
}
cache_names
[]
=
{
#if PAGE_SIZE == 4096
CN
(
"size-32"
),
#endif
CN
(
"size-64"
),
CN
(
"size-128"
),
CN
(
"size-256"
),
CN
(
"size-512"
),
CN
(
"size-1024"
),
CN
(
"size-2048"
),
CN
(
"size-4096"
),
CN
(
"size-8192"
),
CN
(
"size-16384"
),
CN
(
"size-32768"
),
CN
(
"size-65536"
),
CN
(
"size-131072"
)
};
#undef CN
/* internal cache of cache description objs */
static
kmem_cache_t
cache_cache
=
{
...
...
@@ -437,7 +459,6 @@ void __init kmem_cache_init(void)
void
__init
kmem_cache_sizes_init
(
void
)
{
cache_sizes_t
*
sizes
=
cache_sizes
;
char
name
[
20
];
/*
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
...
...
@@ -450,9 +471,9 @@ void __init kmem_cache_sizes_init(void)
* eliminates "false sharing".
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches. */
sprintf
(
name
,
"size-%Zd"
,
sizes
->
cs_size
);
if
(
!
(
sizes
->
cs_cachep
=
kmem_cache_create
(
name
,
sizes
->
cs_size
,
kmem_cache_create
(
cache_names
[
sizes
-
cache_sizes
].
name
,
sizes
->
cs_size
,
0
,
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
)))
{
BUG
();
}
...
...
@@ -462,8 +483,9 @@ void __init kmem_cache_sizes_init(void)
offslab_limit
=
sizes
->
cs_size
-
sizeof
(
slab_t
);
offslab_limit
/=
2
;
}
sprintf
(
name
,
"size-%Zd(DMA)"
,
sizes
->
cs_size
);
sizes
->
cs_dmacachep
=
kmem_cache_create
(
name
,
sizes
->
cs_size
,
0
,
sizes
->
cs_dmacachep
=
kmem_cache_create
(
cache_names
[
sizes
-
cache_sizes
].
name_dma
,
sizes
->
cs_size
,
0
,
SLAB_CACHE_DMA
|
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
);
if
(
!
sizes
->
cs_dmacachep
)
BUG
();
...
...
@@ -604,6 +626,11 @@ static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
* Cannot be called within a int, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache
* and the @dtor is run before the pages are handed back.
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting
* unloaded.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
...
...
@@ -632,7 +659,6 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
* Sanity checks... these are all serious usage bugs.
*/
if
((
!
name
)
||
((
strlen
(
name
)
>=
CACHE_NAMELEN
-
1
))
||
in_interrupt
()
||
(
size
<
BYTES_PER_WORD
)
||
(
size
>
(
1
<<
MAX_OBJ_ORDER
)
*
PAGE_SIZE
)
||
...
...
@@ -797,8 +823,7 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
cachep
->
slabp_cache
=
kmem_find_general_cachep
(
slab_size
,
0
);
cachep
->
ctor
=
ctor
;
cachep
->
dtor
=
dtor
;
/* Copy name over so we don't have problems with unloaded modules */
strcpy
(
cachep
->
name
,
name
);
cachep
->
name
=
name
;
#ifdef CONFIG_SMP
if
(
g_cpucache_up
)
...
...
@@ -811,12 +836,22 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
list_for_each
(
p
,
&
cache_chain
)
{
kmem_cache_t
*
pc
=
list_entry
(
p
,
kmem_cache_t
,
next
);
/* The name field is constant - no lock needed. */
if
(
!
strcmp
(
pc
->
name
,
name
))
char
tmp
;
/* This happens when the module gets unloaded and doesn't
destroy its slab cache and noone else reuses the vmalloc
area of the module. Print a warning. */
if
(
__get_user
(
tmp
,
pc
->
name
))
{
printk
(
"SLAB: cache with size %d has lost its name
\n
"
,
pc
->
objsize
);
continue
;
}
if
(
!
strcmp
(
pc
->
name
,
name
))
{
printk
(
"kmem_cache_create: duplicate cache %s
\n
"
,
name
);
up
(
&
cache_chain_sem
);
BUG
();
}
}
}
/* There is no reason to lock our new cache before we
* link it in - no one knows about it yet...
...
...
@@ -1878,6 +1913,7 @@ static int proc_getdata (char*page, char**start, off_t off, int count)
unsigned
long
num_objs
;
unsigned
long
active_slabs
=
0
;
unsigned
long
num_slabs
;
const
char
*
name
;
cachep
=
list_entry
(
p
,
kmem_cache_t
,
next
);
spin_lock_irq
(
&
cachep
->
spinlock
);
...
...
@@ -1906,8 +1942,15 @@ static int proc_getdata (char*page, char**start, off_t off, int count)
num_slabs
+=
active_slabs
;
num_objs
=
num_slabs
*
cachep
->
num
;
name
=
cachep
->
name
;
{
char
tmp
;
if
(
__get_user
(
tmp
,
name
))
name
=
"broken"
;
}
len
+=
sprintf
(
page
+
len
,
"%-17s %6lu %6lu %6u %4lu %4lu %4u"
,
cachep
->
name
,
active_objs
,
num_objs
,
cachep
->
objsize
,
name
,
active_objs
,
num_objs
,
cachep
->
objsize
,
active_slabs
,
num_slabs
,
(
1
<<
cachep
->
gfporder
));
#if STATS
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment