Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
2a9162d3
Commit
2a9162d3
authored
Mar 25, 2003
by
lenz@mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge lgrimmer@bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/suse80/my/mysql-4.1
parents
e93fc8de
1c5dd1f5
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
528 additions
and
468 deletions
+528
-468
include/my_sys.h
include/my_sys.h
+3
-3
myisam/mi_test2.c
myisam/mi_test2.c
+5
-5
myisam/sp_test.c
myisam/sp_test.c
+1
-1
mysys/mf_keycache.c
mysys/mf_keycache.c
+494
-443
sql/mysqld.cc
sql/mysqld.cc
+7
-7
sql/share/charsets/Index.xml
sql/share/charsets/Index.xml
+12
-3
sql/share/charsets/latin7.xml
sql/share/charsets/latin7.xml
+3
-3
sql/sql_test.cc
sql/sql_test.cc
+2
-2
strings/ctype-extra.c
strings/ctype-extra.c
+1
-1
No files found.
include/my_sys.h
View file @
2a9162d3
...
...
@@ -219,9 +219,9 @@ extern my_bool my_charset_same(CHARSET_INFO *cs1, CHARSET_INFO *cs2);
/* statistics */
extern
ulong
_my_cache_w_requests
,
_my_cache_write
,
_
my_cache_r_requests
,
_
my_cache_read
;
extern
ulong
_my_blocks_used
,
_
my_blocks_changed
;
extern
ulong
my_cache_w_requests
,
my_cache_write
,
my_cache_r_requests
,
my_cache_read
;
extern
ulong
my_blocks_used
,
my_blocks_changed
;
extern
uint
key_cache_block_size
;
extern
ulong
my_file_opened
,
my_stream_opened
,
my_tmp_file_created
;
extern
my_bool
key_cache_inited
,
my_init_done
;
...
...
myisam/mi_test2.c
View file @
2a9162d3
...
...
@@ -646,13 +646,13 @@ int main(int argc, char *argv[])
(
long
)
range_records
>
(
long
)
records
*
14
/
10
+
2
)
{
printf
(
"mi_records_range for key: %d returned %ld; Should be about %ld
\n
"
,
i
,
range_records
,
records
);
i
,
(
long
)
range_records
,
(
long
)
records
);
goto
end
;
}
if
(
verbose
&&
records
)
{
printf
(
"mi_records_range returned %ld; Exact is %ld (diff: %4.2g %%)
\n
"
,
range_records
,
records
,
(
long
)
range_records
,
(
long
)
records
,
labs
((
long
)
range_records
-
(
long
)
records
)
*
100
.
0
/
records
);
}
...
...
@@ -667,7 +667,7 @@ int main(int argc, char *argv[])
{
puts
(
"Wrong info from mi_info"
);
printf
(
"Got: records: %ld delete: %ld i_keys: %d
\n
"
,
info
.
records
,
info
.
deleted
,
info
.
keys
);
(
long
)
info
.
records
,
(
long
)
info
.
deleted
,
info
.
keys
);
}
if
(
verbose
)
{
...
...
@@ -822,8 +822,8 @@ w_requests: %10lu\n\
writes: %10lu
\n
\
r_requests: %10lu
\n
\
reads: %10lu
\n
"
,
_my_blocks_used
,
_my_cache_w_requests
,
_
my_cache_write
,
_my_cache_r_requests
,
_
my_cache_read
);
my_blocks_used
,
my_cache_w_requests
,
my_cache_write
,
my_cache_r_requests
,
my_cache_read
);
}
end_key_cache
();
if
(
blob_buffer
)
...
...
myisam/sp_test.c
View file @
2a9162d3
...
...
@@ -272,7 +272,7 @@ int run_test(const char *filename)
create_key
(
key
,
nrecords
*
upd
);
print_key
(
key
,
" INTERSECT
\n
"
);
hrows
=
mi_records_in_range
(
file
,
0
,
key
,
0
,
HA_READ_MBR_INTERSECT
,
record
+
1
,
0
,
0
);
printf
(
" %ld rows
\n
"
,
hrows
);
printf
(
" %ld rows
\n
"
,
(
long
)
hrows
);
if
(
mi_close
(
file
))
goto
err
;
...
...
mysys/mf_keycache.c
View file @
2a9162d3
...
...
@@ -84,19 +84,23 @@
#define COND_FOR_READERS 2
typedef
pthread_cond_t
KEYCACHE_CONDVAR
;
/* info about requests in a waiting queue */
typedef
struct
st_keycache_wqueue
{
/* info about requests in a waiting queue */
{
struct
st_my_thread_var
*
last_thread
;
/* circular list of waiting threads */
}
KEYCACHE_WQUEUE
;
/* descriptor of the page in the key cache block buffer */
typedef
struct
st_keycache_page
{
/* descriptor of the page in the key cache block buffer */
{
int
file
;
/* file to which the page belongs to */
my_off_t
filepos
;
/* position of the page in the file */
}
KEYCACHE_PAGE
;
/* element in the chain of a hash table bucket */
typedef
struct
st_hash_link
{
/* element in the chain of a hash table bucket */
{
struct
st_hash_link
*
next
,
**
prev
;
/* to connect links in the same bucket */
struct
st_block_link
*
block
;
/* reference to the block for the page: */
File
file
;
/* from such a file */
...
...
@@ -117,8 +121,9 @@ typedef struct st_hash_link
#define PAGE_TO_BE_READ 1
#define PAGE_WAIT_TO_BE_READ 2
/* key cache block */
typedef
struct
st_block_link
{
/* key cache block */
{
struct
st_block_link
*
next_used
,
**
prev_used
;
/* to connect links in the LRU chain (ring) */
struct
st_block_link
...
...
@@ -148,35 +153,31 @@ static KEYCACHE_WQUEUE
static
KEYCACHE_WQUEUE
waiting_for_block
;
/* queue of requests waiting for a free block */
static
HASH_LINK
**
_
my_hash_root
;
/* arr. of entries into hash table buckets */
static
uint
_
my_hash_entries
;
/* max number of entries in the hash table */
static
HASH_LINK
*
_
my_hash_link_root
;
/* memory for hash table links */
static
int
_
my_hash_links
;
/* max number of hash links */
static
int
_
my_hash_links_used
;
/* number of hash links currently used */
static
HASH_LINK
*
_
my_free_hash_list
;
/* list of free hash links */
static
BLOCK_LINK
*
_
my_block_root
;
/* memory for block links */
static
int
_
my_disk_blocks
;
/* max number of blocks in the cache */
static
byte
HUGE_PTR
*
_my_block_mem
;
/* memory for block buffers
*/
static
BLOCK_LINK
*
_
my_used_last
;
/* ptr to the last block of the LRU chain */
ulong
_
my_blocks_used
,
/* number of currently used blocks */
_
my_blocks_changed
;
/* number of currently dirty blocks */
static
HASH_LINK
**
my_hash_root
;
/* arr. of entries into hash table buckets */
static
uint
my_hash_entries
;
/* max number of entries in the hash table */
static
HASH_LINK
*
my_hash_link_root
;
/* memory for hash table links */
static
int
my_hash_links
;
/* max number of hash links */
static
int
my_hash_links_used
;
/* number of hash links currently used */
static
HASH_LINK
*
my_free_hash_list
;
/* list of free hash links */
static
BLOCK_LINK
*
my_block_root
;
/* memory for block links */
static
int
my_disk_blocks
;
/* max number of blocks in the cache */
static
byte
HUGE_PTR
*
my_block_mem
;
/* memory for block buffers
*/
static
BLOCK_LINK
*
my_used_last
;
/* ptr to the last block of the LRU chain */
ulong
my_blocks_used
,
/* number of currently used blocks */
my_blocks_changed
;
/* number of currently dirty blocks */
#if defined(KEYCACHE_DEBUG)
static
ulong
_
my_blocks_available
;
/* number of blocks available in the LRU chain */
ulong
my_blocks_available
;
/* number of blocks available in the LRU chain */
#endif
/* defined(KEYCACHE_DEBUG) */
ulong
_my_cache_w_requests
,
_
my_cache_write
,
/* counters */
_my_cache_r_requests
,
_
my_cache_read
;
/* for statistics */
ulong
my_cache_w_requests
,
my_cache_write
,
/* counters */
my_cache_r_requests
,
my_cache_read
;
/* for statistics */
static
BLOCK_LINK
*
changed_blocks
[
CHANGED_BLOCKS_HASH
];
/* hash table for file dirty blocks */
static
BLOCK_LINK
*
file_blocks
[
CHANGED_BLOCKS_HASH
];
/* hash table for other file blocks */
/* that are not free */
#ifndef DBUG_OFF
static
my_bool
_my_printed
;
#endif
#define KEYCACHE_HASH(f, pos) \
(((ulong) ((pos) >> key_cache_shift)+(ulong) (f)) & (
_
my_hash_entries-1))
(((ulong) ((pos) >> key_cache_shift)+(ulong) (f)) & (my_hash_entries-1))
#define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1))
#define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log"
...
...
@@ -230,9 +231,9 @@ static long keycache_thread_id;
#endif
/* defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) */
#define BLOCK_NUMBER(b) \
((uint) (((char*)(b) - (char *)
_
my_block_root) / sizeof(BLOCK_LINK)))
((uint) (((char*)(b) - (char *) my_block_root) / sizeof(BLOCK_LINK)))
#define HASH_LINK_NUMBER(h) \
((uint) (((char*)(h) - (char *)
_
my_hash_link_root) / sizeof(HASH_LINK)))
((uint) (((char*)(h) - (char *) my_hash_link_root) / sizeof(HASH_LINK)))
#if (defined(KEYCACHE_TIMEOUT) && !defined(__WIN__)) || defined(KEYCACHE_DEBUG)
static
int
keycache_pthread_cond_wait
(
pthread_cond_t
*
cond
,
...
...
@@ -269,6 +270,7 @@ static uint next_power(uint value)
Initialize the key cache,
return number of blocks in it
*/
int
init_key_cache
(
ulong
use_mem
)
{
uint
blocks
,
hash_links
,
length
;
...
...
@@ -277,7 +279,7 @@ int init_key_cache(ulong use_mem)
DBUG_ENTER
(
"init_key_cache"
);
KEYCACHE_DEBUG_OPEN
;
if
(
key_cache_inited
&&
_
my_disk_blocks
>
0
)
if
(
key_cache_inited
&&
my_disk_blocks
>
0
)
{
DBUG_PRINT
(
"warning"
,(
"key cache already in use"
));
DBUG_RETURN
(
0
);
...
...
@@ -285,51 +287,48 @@ int init_key_cache(ulong use_mem)
if
(
!
key_cache_inited
)
{
key_cache_inited
=
TRUE
;
_
my_disk_blocks
=
-
1
;
my_disk_blocks
=
-
1
;
key_cache_shift
=
my_bit_log2
(
key_cache_block_size
);
DBUG_PRINT
(
"info"
,(
"key_cache_block_size: %u"
,
key_cache_block_size
));
#ifndef DBUG_OFF
_my_printed
=
0
;
#endif
}
_my_cache_w_requests
=
_my_cache_r_requests
=
_my_cache_read
=
_
my_cache_write
=
0
;
my_cache_w_requests
=
my_cache_r_requests
=
my_cache_read
=
my_cache_write
=
0
;
_
my_block_mem
=
NULL
;
_
my_block_root
=
NULL
;
my_block_mem
=
NULL
;
my_block_root
=
NULL
;
blocks
=
(
uint
)
(
use_mem
/
(
sizeof
(
BLOCK_LINK
)
+
2
*
sizeof
(
HASH_LINK
)
+
sizeof
(
HASH_LINK
*
)
*
5
/
4
+
key_cache_block_size
));
/* It doesn't make sense to have too few blocks (less than 8) */
if
(
blocks
>=
8
&&
_
my_disk_blocks
<
0
)
if
(
blocks
>=
8
&&
my_disk_blocks
<
0
)
{
for
(;;)
{
/* Set
_
my_hash_entries to the next bigger 2 power */
if
((
_
my_hash_entries
=
next_power
(
blocks
))
<
blocks
*
5
/
4
)
_
my_hash_entries
<<=
1
;
/* Set my_hash_entries to the next bigger 2 power */
if
((
my_hash_entries
=
next_power
(
blocks
))
<
blocks
*
5
/
4
)
my_hash_entries
<<=
1
;
hash_links
=
2
*
blocks
;
#if defined(MAX_THREADS)
if
(
hash_links
<
MAX_THREADS
+
blocks
-
1
)
hash_links
=
MAX_THREADS
+
blocks
-
1
;
#endif
while
((
length
=
blocks
*
sizeof
(
BLOCK_LINK
)
+
hash_links
*
sizeof
(
HASH_LINK
)
+
sizeof
(
HASH_LINK
*
)
*
_my_hash_entries
)
+
((
ulong
)
blocks
<<
key_cache_shift
)
>
use_mem
)
while
((
length
=
(
ALIGN_SIZE
(
blocks
*
sizeof
(
BLOCK_LINK
)
)
+
ALIGN_SIZE
(
hash_links
*
sizeof
(
HASH_LINK
)
)
+
ALIGN_SIZE
(
sizeof
(
HASH_LINK
*
)
*
my_hash_entries
)))
+
((
ulong
)
blocks
<<
key_cache_shift
)
>
use_mem
)
blocks
--
;
/* Allocate memory for cache page buffers */
if
((
_
my_block_mem
=
my_malloc_lock
((
ulong
)
blocks
*
key_cache_block_size
,
if
((
my_block_mem
=
my_malloc_lock
((
ulong
)
blocks
*
key_cache_block_size
,
MYF
(
0
))))
{
/*
Allocate memory for blocks, hash_links and hash entries;
For each block 2 hash links are allocated
*/
if
((
_
my_block_root
=
(
BLOCK_LINK
*
)
my_malloc
((
uint
)
length
,
MYF
(
0
))))
if
((
my_block_root
=
(
BLOCK_LINK
*
)
my_malloc
((
uint
)
length
,
MYF
(
0
))))
break
;
my_free_lock
(
_
my_block_mem
,
MYF
(
0
));
my_free_lock
(
my_block_mem
,
MYF
(
0
));
}
if
(
blocks
<
8
)
{
...
...
@@ -338,29 +337,32 @@ int init_key_cache(ulong use_mem)
}
blocks
=
blocks
/
4
*
3
;
}
_my_disk_blocks
=
(
int
)
blocks
;
_my_hash_links
=
hash_links
;
_my_hash_root
=
(
HASH_LINK
**
)
(
_my_block_root
+
blocks
);
_my_hash_link_root
=
(
HASH_LINK
*
)
(
_my_hash_root
+
_my_hash_entries
);
bzero
((
byte
*
)
_my_block_root
,
_my_disk_blocks
*
sizeof
(
BLOCK_LINK
));
bzero
((
byte
*
)
_my_hash_root
,
_my_hash_entries
*
sizeof
(
HASH_LINK
*
));
bzero
((
byte
*
)
_my_hash_link_root
,
_my_hash_links
*
sizeof
(
HASH_LINK
));
_my_hash_links_used
=
0
;
_my_free_hash_list
=
NULL
;
_my_blocks_used
=
_my_blocks_changed
=
0
;
my_disk_blocks
=
(
int
)
blocks
;
my_hash_links
=
hash_links
;
my_hash_root
=
(
HASH_LINK
**
)
((
char
*
)
my_block_root
+
ALIGN_SIZE
(
blocks
*
sizeof
(
BLOCK_LINK
)));
my_hash_link_root
=
(
HASH_LINK
*
)
((
char
*
)
my_hash_root
+
ALIGN_SIZE
((
sizeof
(
HASH_LINK
*
)
*
my_hash_entries
)));
bzero
((
byte
*
)
my_block_root
,
my_disk_blocks
*
sizeof
(
BLOCK_LINK
));
bzero
((
byte
*
)
my_hash_root
,
my_hash_entries
*
sizeof
(
HASH_LINK
*
));
bzero
((
byte
*
)
my_hash_link_root
,
my_hash_links
*
sizeof
(
HASH_LINK
));
my_hash_links_used
=
0
;
my_free_hash_list
=
NULL
;
my_blocks_used
=
my_blocks_changed
=
0
;
#if defined(KEYCACHE_DEBUG)
_
my_blocks_available
=
0
;
my_blocks_available
=
0
;
#endif
/* The LRU chain is empty after initialization */
_
my_used_last
=
NULL
;
my_used_last
=
NULL
;
waiting_for_hash_link
.
last_thread
=
NULL
;
waiting_for_block
.
last_thread
=
NULL
;
DBUG_PRINT
(
"exit"
,
(
"disk_blocks: %d block_root: %lx hash_entries: %d hash_root: %lx \
hash_links: %d hash_link_root %lx"
,
_my_disk_blocks
,
_my_block_root
,
_my_hash_entries
,
_
my_hash_root
,
_my_hash_links
,
_
my_hash_link_root
));
my_disk_blocks
,
my_block_root
,
my_hash_entries
,
my_hash_root
,
my_hash_links
,
my_hash_link_root
));
}
bzero
((
gptr
)
changed_blocks
,
sizeof
(
changed_blocks
[
0
])
*
CHANGED_BLOCKS_HASH
);
bzero
((
gptr
)
file_blocks
,
sizeof
(
file_blocks
[
0
])
*
CHANGED_BLOCKS_HASH
);
...
...
@@ -369,10 +371,10 @@ int init_key_cache(ulong use_mem)
err:
error
=
my_errno
;
if
(
_
my_block_mem
)
my_free_lock
((
gptr
)
_
my_block_mem
,
MYF
(
0
));
if
(
_
my_block_mem
)
my_free
((
gptr
)
_
my_block_root
,
MYF
(
0
));
if
(
my_block_mem
)
my_free_lock
((
gptr
)
my_block_mem
,
MYF
(
0
));
if
(
my_block_mem
)
my_free
((
gptr
)
my_block_root
,
MYF
(
0
));
my_errno
=
error
;
DBUG_RETURN
(
0
);
}
...
...
@@ -402,25 +404,26 @@ int resize_key_cache(ulong use_mem)
/*
Remove key_cache from memory
*/
void
end_key_cache
(
void
)
{
DBUG_ENTER
(
"end_key_cache"
);
if
(
_
my_disk_blocks
>
0
)
if
(
my_disk_blocks
>
0
)
{
if
(
_
my_block_mem
)
if
(
my_block_mem
)
{
my_free_lock
((
gptr
)
_
my_block_mem
,
MYF
(
0
));
my_free
((
gptr
)
_
my_block_root
,
MYF
(
0
));
my_free_lock
((
gptr
)
my_block_mem
,
MYF
(
0
));
my_free
((
gptr
)
my_block_root
,
MYF
(
0
));
}
_
my_disk_blocks
=
-
1
;
my_disk_blocks
=
-
1
;
}
KEYCACHE_DEBUG_CLOSE
;
key_cache_inited
=
0
;
DBUG_PRINT
(
"status"
,
(
"used: %d changed: %d w_requests: %ld \
writes: %ld r_requests: %ld reads: %ld"
,
_my_blocks_used
,
_my_blocks_changed
,
_
my_cache_w_requests
,
_my_cache_write
,
_my_cache_r_requests
,
_
my_cache_read
));
my_blocks_used
,
my_blocks_changed
,
my_cache_w_requests
,
my_cache_write
,
my_cache_r_requests
,
my_cache_read
));
DBUG_VOID_RETURN
;
}
/* end_key_cache */
...
...
@@ -428,12 +431,14 @@ void end_key_cache(void)
/*
Link a thread into double-linked queue of waiting threads
*/
static
inline
void
link_into_queue
(
KEYCACHE_WQUEUE
*
wqueue
,
struct
st_my_thread_var
*
thread
)
{
struct
st_my_thread_var
*
last
;
if
(
!
(
last
=
wqueue
->
last_thread
))
{
/* Queue is empty */
{
/* Queue is empty */
thread
->
next
=
thread
;
thread
->
prev
=&
thread
->
next
;
}
...
...
@@ -450,6 +455,7 @@ static inline void link_into_queue(KEYCACHE_WQUEUE *wqueue,
/*
Unlink a thread from double-linked queue of waiting threads
*/
static
inline
void
unlink_from_queue
(
KEYCACHE_WQUEUE
*
wqueue
,
struct
st_my_thread_var
*
thread
)
{
...
...
@@ -472,6 +478,7 @@ static inline void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
/*
Add a thread to single-linked queue of waiting threads
*/
static
inline
void
add_to_queue
(
KEYCACHE_WQUEUE
*
wqueue
,
struct
st_my_thread_var
*
thread
)
{
...
...
@@ -490,7 +497,8 @@ static inline void add_to_queue(KEYCACHE_WQUEUE *wqueue,
/*
Remove all threads from queue signaling them to proceed
*/
static
inline
void
release_queue
(
KEYCACHE_WQUEUE
*
wqueue
)
static
void
release_queue
(
KEYCACHE_WQUEUE
*
wqueue
)
{
struct
st_my_thread_var
*
last
=
wqueue
->
last_thread
;
struct
st_my_thread_var
*
next
=
last
->
next
;
...
...
@@ -511,6 +519,7 @@ static inline void release_queue(KEYCACHE_WQUEUE *wqueue)
/*
Unlink a block from the chain of dirty/clean blocks
*/
static
inline
void
unlink_changed
(
BLOCK_LINK
*
block
)
{
if
(
block
->
next_changed
)
...
...
@@ -522,6 +531,7 @@ static inline void unlink_changed(BLOCK_LINK *block)
/*
Link a block into the chain of dirty/clean blocks
*/
static
inline
void
link_changed
(
BLOCK_LINK
*
block
,
BLOCK_LINK
**
phead
)
{
block
->
prev_changed
=
phead
;
...
...
@@ -535,7 +545,8 @@ static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead)
Unlink a block from the chain of dirty/clean blocks, if it's asked for,
and link it to the chain of clean blocks for the specified file
*/
static
inline
void
link_to_file_list
(
BLOCK_LINK
*
block
,
int
file
,
static
void
link_to_file_list
(
BLOCK_LINK
*
block
,
int
file
,
my_bool
unlink
)
{
if
(
unlink
)
...
...
@@ -544,7 +555,7 @@ static inline void link_to_file_list(BLOCK_LINK *block,int file,
if
(
block
->
status
&
BLOCK_CHANGED
)
{
block
->
status
&=~
BLOCK_CHANGED
;
_
my_blocks_changed
--
;
my_blocks_changed
--
;
}
}
...
...
@@ -553,18 +564,20 @@ static inline void link_to_file_list(BLOCK_LINK *block,int file,
Unlink a block from the chain of clean blocks for the specified
file and link it to the chain of dirty blocks for this file
*/
static
inline
void
link_to_changed_list
(
BLOCK_LINK
*
block
)
{
unlink_changed
(
block
);
link_changed
(
block
,
&
changed_blocks
[
FILE_HASH
(
block
->
hash_link
->
file
)]);
block
->
status
|=
BLOCK_CHANGED
;
_
my_blocks_changed
++
;
my_blocks_changed
++
;
}
/*
Link a block to the LRU chain at the beginning or at the end
*/
static
void
link_block
(
BLOCK_LINK
*
block
,
my_bool
at_end
)
{
KEYCACHE_DBUG_ASSERT
(
!
(
block
->
hash_link
&&
block
->
hash_link
->
requests
));
...
...
@@ -597,33 +610,33 @@ static void link_block(BLOCK_LINK *block, my_bool at_end)
KEYCACHE_DBUG_PRINT
(
"link_block"
,
(
"linked,unlinked block %u status=%x #requests=%u #available=%u"
,
BLOCK_NUMBER
(
block
),
block
->
status
,
block
->
requests
,
_
my_blocks_available
));
block
->
requests
,
my_blocks_available
));
#endif
return
;
}
if
(
_
my_used_last
)
if
(
my_used_last
)
{
_
my_used_last
->
next_used
->
prev_used
=&
block
->
next_used
;
block
->
next_used
=
_
my_used_last
->
next_used
;
block
->
prev_used
=
&
_
my_used_last
->
next_used
;
_
my_used_last
->
next_used
=
block
;
my_used_last
->
next_used
->
prev_used
=&
block
->
next_used
;
block
->
next_used
=
my_used_last
->
next_used
;
block
->
prev_used
=
&
my_used_last
->
next_used
;
my_used_last
->
next_used
=
block
;
if
(
at_end
)
_
my_used_last
=
block
;
my_used_last
=
block
;
}
else
{
/* The LRU chain is empty */
_
my_used_last
=
block
->
next_used
=
block
;
my_used_last
=
block
->
next_used
=
block
;
block
->
prev_used
=&
block
->
next_used
;
}
KEYCACHE_THREAD_TRACE
(
"link_block"
);
#if defined(KEYCACHE_DEBUG)
_
my_blocks_available
++
;
my_blocks_available
++
;
KEYCACHE_DBUG_PRINT
(
"link_block"
,
(
"linked block %u:%1u status=%x #requests=%u #available=%u"
,
BLOCK_NUMBER
(
block
),
at_end
,
block
->
status
,
block
->
requests
,
_my_blocks_available
));
KEYCACHE_DBUG_ASSERT
(
_my_blocks_available
<=
_
my_blocks_used
);
block
->
requests
,
my_blocks_available
));
KEYCACHE_DBUG_ASSERT
(
my_blocks_available
<=
my_blocks_used
);
#endif
}
...
...
@@ -631,28 +644,29 @@ static void link_block(BLOCK_LINK *block, my_bool at_end)
/*
Unlink a block from the LRU chain
*/
static
inline
void
unlink_block
(
BLOCK_LINK
*
block
)
static
void
unlink_block
(
BLOCK_LINK
*
block
)
{
if
(
block
->
next_used
==
block
)
/* The list contains only one member */
_
my_used_last
=
NULL
;
my_used_last
=
NULL
;
else
{
block
->
next_used
->
prev_used
=
block
->
prev_used
;
*
block
->
prev_used
=
block
->
next_used
;
if
(
_
my_used_last
==
block
)
_
my_used_last
=
STRUCT_PTR
(
BLOCK_LINK
,
next_used
,
block
->
prev_used
);
if
(
my_used_last
==
block
)
my_used_last
=
STRUCT_PTR
(
BLOCK_LINK
,
next_used
,
block
->
prev_used
);
}
block
->
next_used
=
NULL
;
KEYCACHE_THREAD_TRACE
(
"unlink_block"
);
#if defined(KEYCACHE_DEBUG)
_
my_blocks_available
--
;
my_blocks_available
--
;
KEYCACHE_DBUG_PRINT
(
"unlink_block"
,
(
"unlinked block %u status=%x #requests=%u #available=%u"
,
BLOCK_NUMBER
(
block
),
block
->
status
,
block
->
requests
,
_my_blocks_available
));
KEYCACHE_DBUG_ASSERT
(
_
my_blocks_available
>=
0
);
block
->
requests
,
my_blocks_available
));
KEYCACHE_DBUG_ASSERT
(
my_blocks_available
>=
0
);
#endif
}
...
...
@@ -660,7 +674,7 @@ static inline void unlink_block(BLOCK_LINK *block)
/*
Register requests for a block
*/
static
inline
void
reg_requests
(
BLOCK_LINK
*
block
,
int
count
)
static
void
reg_requests
(
BLOCK_LINK
*
block
,
int
count
)
{
if
(
!
block
->
requests
)
/* First request for the block unlinks it */
...
...
@@ -673,6 +687,7 @@ static inline void reg_requests(BLOCK_LINK *block, int count)
Unregister request for a block
linking it to the LRU chain if it's the last request
*/
static
inline
void
unreg_request
(
BLOCK_LINK
*
block
,
int
at_end
)
{
if
(
!
--
block
->
requests
)
...
...
@@ -682,6 +697,7 @@ static inline void unreg_request(BLOCK_LINK *block, int at_end)
/*
Remove a reader of the page in block
*/
static
inline
void
remove_reader
(
BLOCK_LINK
*
block
)
{
if
(
!
--
block
->
hash_link
->
requests
&&
block
->
condvar
)
...
...
@@ -706,8 +722,9 @@ static inline void wait_for_readers(BLOCK_LINK *block)
/*
a
dd a hash link to a bucket in the hash_table
A
dd a hash link to a bucket in the hash_table
*/
static
inline
void
link_hash
(
HASH_LINK
**
start
,
HASH_LINK
*
hash_link
)
{
if
(
*
start
)
...
...
@@ -721,7 +738,8 @@ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link)
/*
Remove a hash link from the hash table
*/
static
inline
void
unlink_hash
(
HASH_LINK
*
hash_link
)
static
void
unlink_hash
(
HASH_LINK
*
hash_link
)
{
KEYCACHE_DBUG_PRINT
(
"unlink_hash"
,
(
"file %u, filepos %lu #requests=%u"
,
(
uint
)
hash_link
->
file
,(
ulong
)
hash_link
->
diskpos
,
hash_link
->
requests
));
...
...
@@ -730,7 +748,8 @@ static inline void unlink_hash(HASH_LINK *hash_link)
hash_link
->
next
->
prev
=
hash_link
->
prev
;
hash_link
->
block
=
NULL
;
if
(
waiting_for_hash_link
.
last_thread
)
{
/* Signal that A free hash link appeared */
{
/* Signal that A free hash link appeared */
struct
st_my_thread_var
*
last_thread
=
waiting_for_hash_link
.
last_thread
;
struct
st_my_thread_var
*
first_thread
=
last_thread
->
next
;
struct
st_my_thread_var
*
next_thread
=
first_thread
;
...
...
@@ -756,18 +775,20 @@ static inline void unlink_hash(HASH_LINK *hash_link)
}
}
while
(
thread
!=
last_thread
);
link_hash
(
&
_
my_hash_root
[
KEYCACHE_HASH
(
hash_link
->
file
,
link_hash
(
&
my_hash_root
[
KEYCACHE_HASH
(
hash_link
->
file
,
hash_link
->
diskpos
)],
hash_link
);
return
;
}
hash_link
->
next
=
_
my_free_hash_list
;
_
my_free_hash_list
=
hash_link
;
hash_link
->
next
=
my_free_hash_list
;
my_free_hash_list
=
hash_link
;
}
/*
Get the hash link for a page
*/
static
inline
HASH_LINK
*
get_hash_link
(
int
file
,
my_off_t
filepos
)
static
HASH_LINK
*
get_hash_link
(
int
file
,
my_off_t
filepos
)
{
reg1
HASH_LINK
*
hash_link
,
**
start
;
KEYCACHE_PAGE
page
;
...
...
@@ -784,7 +805,7 @@ restart:
start contains the head of the bucket list,
hash_link points to the first member of the list
*/
hash_link
=
*
(
start
=&
_
my_hash_root
[
KEYCACHE_HASH
(
file
,
filepos
)]);
hash_link
=
*
(
start
=
&
my_hash_root
[
KEYCACHE_HASH
(
file
,
filepos
)]);
#if defined(KEYCACHE_DEBUG)
cnt
=
0
;
#endif
...
...
@@ -795,7 +816,7 @@ restart:
hash_link
=
hash_link
->
next
;
#if defined(KEYCACHE_DEBUG)
cnt
++
;
if
(
!
(
cnt
<=
_
my_hash_links_used
))
if
(
!
(
cnt
<=
my_hash_links_used
))
{
int
i
;
for
(
i
=
0
,
hash_link
=*
start
;
...
...
@@ -805,22 +826,24 @@ restart:
(
uint
)
hash_link
->
file
,(
ulong
)
hash_link
->
diskpos
));
}
}
KEYCACHE_DBUG_ASSERT
(
n
<=
_
my_hash_links_used
);
KEYCACHE_DBUG_ASSERT
(
n
<=
my_hash_links_used
);
#endif
}
if
(
!
hash_link
)
{
/* There is no hash link in the hash table for the pair (file, filepos) */
if
(
_my_free_hash_list
)
{
hash_link
=
_my_free_hash_list
;
_my_free_hash_list
=
hash_link
->
next
;
/* There is no hash link in the hash table for the pair (file, filepos) */
if
(
my_free_hash_list
)
{
hash_link
=
my_free_hash_list
;
my_free_hash_list
=
hash_link
->
next
;
}
else
if
(
_my_hash_links_used
<
_
my_hash_links
)
else
if
(
my_hash_links_used
<
my_hash_links
)
{
hash_link
=
&
_my_hash_link_root
[
_
my_hash_links_used
++
];
hash_link
=
&
my_hash_link_root
[
my_hash_links_used
++
];
}
else
{
/* Wait for a free hash link */
{
/* Wait for a free hash link */
struct
st_my_thread_var
*
thread
=
my_thread_var
;
KEYCACHE_DBUG_PRINT
(
"get_hash_link"
,
(
"waiting"
));
page
.
file
=
file
;
page
.
filepos
=
filepos
;
...
...
@@ -846,6 +869,7 @@ restart:
If the page is not in the cache return a free block, if there is none
return the lru block after saving its buffer if the page is dirty
*/
static
BLOCK_LINK
*
find_key_block
(
int
file
,
my_off_t
filepos
,
int
wrmode
,
int
*
page_st
)
{
...
...
@@ -874,8 +898,8 @@ restart:
page_status
=
PAGE_READ
;
if
(
page_status
==
PAGE_READ
&&
(
block
->
status
&
BLOCK_IN_SWITCH
))
{
/* This is a request for a page to be removed from cache */
{
/* This is a request for a page to be removed from cache */
KEYCACHE_DBUG_PRINT
(
"find_key_block"
,
(
"request for old page in block %u"
,
BLOCK_NUMBER
(
block
)));
/*
...
...
@@ -907,20 +931,23 @@ restart:
}
}
else
{
/* This is a request for a new page or for a page not to be removed */
{
/* This is a request for a new page or for a page not to be removed */
if
(
!
block
)
{
/* No block is assigned for the page yet */
if
(
_my_blocks_used
<
(
uint
)
_my_disk_blocks
)
{
/* There are some never used blocks, take first of them */
hash_link
->
block
=
block
=
&
_my_block_root
[
_my_blocks_used
];
block
->
buffer
=
ADD_TO_PTR
(
_my_block_mem
,
((
ulong
)
_my_blocks_used
*
key_cache_block_size
),
{
/* No block is assigned for the page yet */
if
(
my_blocks_used
<
(
uint
)
my_disk_blocks
)
{
/* There are some never used blocks, take first of them */
hash_link
->
block
=
block
=
&
my_block_root
[
my_blocks_used
];
block
->
buffer
=
ADD_TO_PTR
(
my_block_mem
,
((
ulong
)
my_blocks_used
*
key_cache_block_size
),
byte
*
);
block
->
status
=
0
;
block
->
length
=
0
;
block
->
offset
=
key_cache_block_size
;
block
->
requests
=
1
;
_
my_blocks_used
++
;
my_blocks_used
++
;
link_to_file_list
(
block
,
file
,
0
);
block
->
hash_link
=
hash_link
;
page_status
=
PAGE_TO_BE_READ
;
...
...
@@ -928,14 +955,15 @@ restart:
(
"got never used block %u"
,
BLOCK_NUMBER
(
block
)));
}
else
{
/* There are no never used blocks, use a block from the LRU chain */
{
/* There are no never used blocks, use a block from the LRU chain */
/*
Wait until a new block is added to the LRU chain;
several threads might wait here for the same page,
all of them must get the same block
*/
if
(
!
_
my_used_last
)
if
(
!
my_used_last
)
{
struct
st_my_thread_var
*
thread
=
my_thread_var
;
thread
->
opt_info
=
(
void
*
)
hash_link
;
...
...
@@ -954,20 +982,23 @@ restart:
Take the first block from the LRU chain
unlinking it from the chain
*/
block
=
_
my_used_last
->
next_used
;
block
=
my_used_last
->
next_used
;
reg_requests
(
block
,
1
);
hash_link
->
block
=
block
;
}
if
(
block
->
hash_link
!=
hash_link
&&
!
(
block
->
status
&
BLOCK_IN_SWITCH
)
)
{
/* this is a primary request for a new page */
if
(
block
->
hash_link
!=
hash_link
&&
!
(
block
->
status
&
BLOCK_IN_SWITCH
)
)
{
/* this is a primary request for a new page */
block
->
status
|=
BLOCK_IN_SWITCH
;
KEYCACHE_DBUG_PRINT
(
"find_key_block"
,
(
"got block %u for new page"
,
BLOCK_NUMBER
(
block
)));
if
(
block
->
status
&
BLOCK_CHANGED
)
{
/* The block contains a dirty page - push it out of the cache */
{
/* The block contains a dirty page - push it out of the cache */
KEYCACHE_DBUG_PRINT
(
"find_key_block"
,(
"block is dirty"
));
...
...
@@ -980,7 +1011,7 @@ restart:
block
->
length
,
block
->
hash_link
->
diskpos
,
MYF
(
MY_NABP
|
MY_WAIT_IF_FULL
));
keycache_pthread_mutex_lock
(
&
THR_LOCK_keycache
);
_
my_cache_write
++
;
my_cache_write
++
;
}
block
->
status
|=
BLOCK_REASSIGNED
;
...
...
@@ -1019,7 +1050,7 @@ restart:
}
}
_
my_cache_read
++
;
my_cache_read
++
;
}
else
{
...
...
@@ -1046,6 +1077,7 @@ restart:
do not to report error when the size of successfully read
portion is less than read_length, but not less than min_length
*/
static
void
read_block
(
BLOCK_LINK
*
block
,
uint
read_length
,
uint
min_length
,
my_bool
primary
)
{
...
...
@@ -1055,7 +1087,8 @@ static void read_block(BLOCK_LINK *block, uint read_length,
KEYCACHE_THREAD_TRACE
(
"read_block"
);
if
(
primary
)
{
/*
{
/*
This code is executed only by threads
that submitted primary requests
*/
...
...
@@ -1082,7 +1115,8 @@ static void read_block(BLOCK_LINK *block, uint read_length,
release_queue
(
&
block
->
wqueue
[
COND_FOR_REQUESTED
]);
}
else
{
/*
{
/*
This code is executed only by threads
that submitted secondary requests
*/
...
...
@@ -1122,8 +1156,9 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
DBUG_PRINT
(
"enter"
,
(
"file %u, filepos %lu, length %u"
,
(
uint
)
file
,(
ulong
)
filepos
,
length
));
if
(
_my_disk_blocks
>
0
)
{
/* Key cache is used */
if
(
my_disk_blocks
>
0
)
{
/* Key cache is used */
reg1
BLOCK_LINK
*
block
;
uint
offset
=
(
uint
)
(
filepos
&
(
key_cache_block_size
-
1
));
byte
*
start
=
buff
;
...
...
@@ -1144,7 +1179,7 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
key_cache_block_size
:
length
;
KEYCACHE_DBUG_ASSERT
(
read_length
>
0
);
keycache_pthread_mutex_lock
(
&
THR_LOCK_keycache
);
_
my_cache_r_requests
++
;
my_cache_r_requests
++
;
block
=
find_key_block
(
file
,
filepos
,
0
,
&
page_st
);
if
(
page_st
!=
PAGE_READ
)
{
...
...
@@ -1212,8 +1247,8 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
}
/* Key cache is not used */
statistic_increment
(
_
my_cache_r_requests
,
&
THR_LOCK_keycache
);
statistic_increment
(
_
my_cache_read
,
&
THR_LOCK_keycache
);
statistic_increment
(
my_cache_r_requests
,
&
THR_LOCK_keycache
);
statistic_increment
(
my_cache_read
,
&
THR_LOCK_keycache
);
if
(
my_pread
(
file
,(
byte
*
)
buff
,
length
,
filepos
,
MYF
(
MY_NABP
)))
error
=
1
;
DBUG_RETURN
(
error
?
(
byte
*
)
0
:
buff
);
...
...
@@ -1227,6 +1262,7 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
if !dont_write then all dirty pages involved in writing should
have been flushed from key cache before the function starts
*/
int
key_cache_write
(
File
file
,
my_off_t
filepos
,
byte
*
buff
,
uint
length
,
uint
block_length
__attribute__
((
unused
)),
int
dont_write
)
...
...
@@ -1239,8 +1275,9 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
(
uint
)
file
,(
ulong
)
filepos
,
length
,
block_length
));
if
(
!
dont_write
)
{
/* Force writing from buff into disk */
statistic_increment
(
_my_cache_write
,
&
THR_LOCK_keycache
);
{
/* Force writing from buff into disk */
statistic_increment
(
my_cache_write
,
&
THR_LOCK_keycache
);
if
(
my_pwrite
(
file
,
buff
,
length
,
filepos
,
MYF
(
MY_NABP
|
MY_WAIT_IF_FULL
)))
DBUG_RETURN
(
1
);
}
...
...
@@ -1249,8 +1286,9 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
DBUG_EXECUTE
(
"check_keycache"
,
test_key_cache
(
"start of key_cache_write"
,
1
););
#endif
if
(
_my_disk_blocks
>
0
)
{
/* Key cache is used */
if
(
my_disk_blocks
>
0
)
{
/* Key cache is used */
uint
read_length
;
uint
offset
=
(
uint
)
(
filepos
&
(
key_cache_block_size
-
1
));
int
page_st
;
...
...
@@ -1263,7 +1301,7 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
key_cache_block_size
:
length
;
KEYCACHE_DBUG_ASSERT
(
read_length
>
0
);
keycache_pthread_mutex_lock
(
&
THR_LOCK_keycache
);
_
my_cache_w_requests
++
;
my_cache_w_requests
++
;
block
=
find_key_block
(
file
,
filepos
,
1
,
&
page_st
);
if
(
page_st
!=
PAGE_READ
&&
(
offset
||
read_length
<
key_cache_block_size
))
...
...
@@ -1273,7 +1311,8 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
offset
,(
my_bool
)(
page_st
==
PAGE_TO_BE_READ
));
if
(
!
dont_write
)
{
/* buff has been written to disk at start */
{
/* buff has been written to disk at start */
if
((
block
->
status
&
BLOCK_CHANGED
)
&&
(
!
offset
&&
read_length
>=
key_cache_block_size
))
link_to_file_list
(
block
,
block
->
hash_link
->
file
,
1
);
...
...
@@ -1318,8 +1357,8 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
/* Key cache is not used */
if
(
dont_write
)
{
statistic_increment
(
_
my_cache_w_requests
,
&
THR_LOCK_keycache
);
statistic_increment
(
_
my_cache_write
,
&
THR_LOCK_keycache
);
statistic_increment
(
my_cache_w_requests
,
&
THR_LOCK_keycache
);
statistic_increment
(
my_cache_write
,
&
THR_LOCK_keycache
);
if
(
my_pwrite
(
file
,(
byte
*
)
buff
,
length
,
filepos
,
MYF
(
MY_NABP
|
MY_WAIT_IF_FULL
)))
error
=
1
;
}
...
...
@@ -1337,6 +1376,7 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
remove it from the chain file of dirty/clean blocks
and add it at the beginning of the LRU chain
*/
static
void
free_block
(
BLOCK_LINK
*
block
)
{
KEYCACHE_THREAD_TRACE
(
"free block"
);
...
...
@@ -1372,6 +1412,7 @@ static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b)
Flush a portion of changed blocks to disk,
free used blocks if requested
*/
static
int
flush_cached_blocks
(
File
file
,
BLOCK_LINK
**
cache
,
BLOCK_LINK
**
end
,
enum
flush_type
type
)
...
...
@@ -1399,7 +1440,7 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache,
error
=
my_pwrite
(
file
,
block
->
buffer
+
block
->
offset
,
block
->
length
,
block
->
hash_link
->
diskpos
,
MYF
(
MY_NABP
|
MY_WAIT_IF_FULL
));
keycache_pthread_mutex_lock
(
&
THR_LOCK_keycache
);
_
my_cache_write
++
;
my_cache_write
++
;
if
(
error
)
{
block
->
status
|=
BLOCK_ERROR
;
...
...
@@ -1409,7 +1450,7 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache,
/* type will never be FLUSH_IGNORE_CHANGED here */
if
(
!
(
type
==
FLUSH_KEEP
||
type
==
FLUSH_FORCE_WRITE
))
{
_
my_blocks_changed
--
;
my_blocks_changed
--
;
free_block
(
block
);
}
else
...
...
@@ -1427,13 +1468,14 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache,
/*
Flush all blocks for a file to disk
*/
int
flush_key_blocks
(
File
file
,
enum
flush_type
type
)
{
int
last_errno
=
0
;
BLOCK_LINK
*
cache_buff
[
FLUSH_CACHE
],
**
cache
;
DBUG_ENTER
(
"flush_key_blocks"
);
DBUG_PRINT
(
"enter"
,(
"file: %d blocks_used: %d blocks_changed: %d"
,
file
,
_my_blocks_used
,
_
my_blocks_changed
));
file
,
my_blocks_used
,
my_blocks_changed
));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
DBUG_EXECUTE
(
"check_keycache"
,
test_key_cache
(
"start of flush_key_blocks"
,
0
););
...
...
@@ -1442,9 +1484,10 @@ int flush_key_blocks(File file, enum flush_type type)
keycache_pthread_mutex_lock
(
&
THR_LOCK_keycache
);
cache
=
cache_buff
;
if
(
_
my_disk_blocks
>
0
&&
if
(
my_disk_blocks
>
0
&&
(
!
my_disable_flush_key_blocks
||
type
!=
FLUSH_KEEP
))
{
/* Key cache exists and flush is not disabled */
{
/* Key cache exists and flush is not disabled */
int
error
=
0
;
uint
count
=
0
;
BLOCK_LINK
**
pos
,
**
end
;
...
...
@@ -1467,7 +1510,7 @@ int flush_key_blocks(File file, enum flush_type type)
if
(
block
->
hash_link
->
file
==
file
)
{
count
++
;
KEYCACHE_DBUG_ASSERT
(
count
<=
_
my_blocks_used
);
KEYCACHE_DBUG_ASSERT
(
count
<=
my_blocks_used
);
}
}
/* Allocate a new buffer only if its bigger than the one we have */
...
...
@@ -1488,7 +1531,7 @@ restart:
{
#if defined(KEYCACHE_DEBUG)
cnt
++
;
KEYCACHE_DBUG_ASSERT
(
cnt
<=
_
my_blocks_used
);
KEYCACHE_DBUG_ASSERT
(
cnt
<=
my_blocks_used
);
#endif
next
=
block
->
next_changed
;
if
(
block
->
hash_link
->
file
==
file
)
...
...
@@ -1501,15 +1544,18 @@ restart:
block
->
status
|=
BLOCK_IN_FLUSH
;
if
(
!
(
block
->
status
&
BLOCK_IN_SWITCH
))
{
/*
{
/*
We care only for the blocks for which flushing was not
initiated by other threads as a result of page swapping
*/
reg_requests
(
block
,
1
);
if
(
type
!=
FLUSH_IGNORE_CHANGED
)
{
/* It's not a temporary file */
{
/* It's not a temporary file */
if
(
pos
==
end
)
{
/*
{
/*
This happens only if there is not enough
memory for the big block
*/
...
...
@@ -1527,12 +1573,13 @@ restart:
else
{
/* It's a temporary file */
_
my_blocks_changed
--
;
my_blocks_changed
--
;
free_block
(
block
);
}
}
else
{
/* Link the block into a list of blocks 'in switch' */
{
/* Link the block into a list of blocks 'in switch' */
unlink_changed
(
block
);
link_changed
(
block
,
&
first_in_switch
);
}
...
...
@@ -1561,7 +1608,7 @@ restart:
}
#if defined(KEYCACHE_DEBUG)
cnt
++
;
KEYCACHE_DBUG_ASSERT
(
cnt
<=
_
my_blocks_used
);
KEYCACHE_DBUG_ASSERT
(
cnt
<=
my_blocks_used
);
#endif
}
/* The following happens very seldom */
...
...
@@ -1576,7 +1623,7 @@ restart:
{
#if defined(KEYCACHE_DEBUG)
cnt
++
;
KEYCACHE_DBUG_ASSERT
(
cnt
<=
_
my_blocks_used
);
KEYCACHE_DBUG_ASSERT
(
cnt
<=
my_blocks_used
);
#endif
next
=
block
->
next_changed
;
if
(
block
->
hash_link
->
file
==
file
&&
...
...
@@ -1607,27 +1654,28 @@ restart:
/*
Flush all blocks in the key cache to disk
*/
static
int
flush_all_key_blocks
()
{
#if defined(KEYCACHE_DEBUG)
uint
cnt
=
0
;
#endif
while
(
_
my_blocks_changed
>
0
)
while
(
my_blocks_changed
>
0
)
{
BLOCK_LINK
*
block
;
for
(
block
=
_
my_used_last
->
next_used
;
;
block
=
block
->
next_used
)
for
(
block
=
my_used_last
->
next_used
;
;
block
=
block
->
next_used
)
{
if
(
block
->
hash_link
)
{
#if defined(KEYCACHE_DEBUG)
cnt
++
;
KEYCACHE_DBUG_ASSERT
(
cnt
<=
_
my_blocks_used
);
KEYCACHE_DBUG_ASSERT
(
cnt
<=
my_blocks_used
);
#endif
if
(
flush_key_blocks
(
block
->
hash_link
->
file
,
FLUSH_RELEASE
))
return
1
;
break
;
}
if
(
block
==
_
my_used_last
)
if
(
block
==
my_used_last
)
break
;
}
}
...
...
@@ -1698,11 +1746,11 @@ static void keycache_dump()
}
while
(
thread
!=
last
);
for
(
i
=
0
;
i
<
_
my_blocks_used
;
i
++
)
for
(
i
=
0
;
i
<
my_blocks_used
;
i
++
)
{
int
j
;
block
=
&
_
my_block_root
[
i
];
hash_link
=
block
->
hash_link
;
block
=
&
my_block_root
[
i
];
hash_link
=
block
->
hash_link
;
fprintf
(
keycache_dump_file
,
"block:%u hash_link:%d status:%x #requests=%u waiting_for_readers:%d
\n
"
,
i
,
(
int
)
(
hash_link
?
HASH_LINK_NUMBER
(
hash_link
)
:
-
1
),
...
...
@@ -1713,6 +1761,7 @@ static void keycache_dump()
thread
=
last
=
wqueue
->
last_thread
;
fprintf
(
keycache_dump_file
,
"queue #%d
\n
"
,
j
);
if
(
thread
)
{
do
{
thread
=
thread
->
next
;
...
...
@@ -1724,16 +1773,19 @@ static void keycache_dump()
while
(
thread
!=
last
);
}
}
}
fprintf
(
keycache_dump_file
,
"LRU chain:"
);
block
=
_
my_used_last
;
block
=
my_used_last
;
if
(
block
)
{
do
{
block
=
block
->
next_used
;
fprintf
(
keycache_dump_file
,
"block:%u, "
,
BLOCK_NUMBER
(
block
));
}
while
(
block
!=
_my_used_last
);
while
(
block
!=
my_used_last
);
}
fprintf
(
keycache_dump_file
,
"
\n
"
);
fclose
(
keycache_dump_file
);
...
...
@@ -1867,4 +1919,3 @@ void keycache_debug_log_close(void)
#endif
/* defined(KEYCACHE_DEBUG_LOG) */
#endif
/* defined(KEYCACHE_DEBUG) */
sql/mysqld.cc
View file @
2a9162d3
...
...
@@ -4415,13 +4415,13 @@ struct show_var_st status_vars[]= {
{
"Handler_rollback"
,
(
char
*
)
&
ha_rollback_count
,
SHOW_LONG
},
{
"Handler_update"
,
(
char
*
)
&
ha_update_count
,
SHOW_LONG
},
{
"Handler_write"
,
(
char
*
)
&
ha_write_count
,
SHOW_LONG
},
{
"Key_blocks_used"
,
(
char
*
)
&
_
my_blocks_used
,
SHOW_LONG_CONST
},
{
"Key_read_requests"
,
(
char
*
)
&
_
my_cache_r_requests
,
SHOW_LONG
},
{
"Key_reads"
,
(
char
*
)
&
_
my_cache_read
,
SHOW_LONG
},
{
"Key_write_requests"
,
(
char
*
)
&
_
my_cache_w_requests
,
SHOW_LONG
},
{
"Key_writes"
,
(
char
*
)
&
_
my_cache_write
,
SHOW_LONG
},
{
"Key_blocks_used"
,
(
char
*
)
&
my_blocks_used
,
SHOW_LONG_CONST
},
{
"Key_read_requests"
,
(
char
*
)
&
my_cache_r_requests
,
SHOW_LONG
},
{
"Key_reads"
,
(
char
*
)
&
my_cache_read
,
SHOW_LONG
},
{
"Key_write_requests"
,
(
char
*
)
&
my_cache_w_requests
,
SHOW_LONG
},
{
"Key_writes"
,
(
char
*
)
&
my_cache_write
,
SHOW_LONG
},
{
"Max_used_connections"
,
(
char
*
)
&
max_used_connections
,
SHOW_LONG
},
{
"Not_flushed_key_blocks"
,
(
char
*
)
&
_
my_blocks_changed
,
SHOW_LONG_CONST
},
{
"Not_flushed_key_blocks"
,
(
char
*
)
&
my_blocks_changed
,
SHOW_LONG_CONST
},
{
"Not_flushed_delayed_rows"
,
(
char
*
)
&
delayed_rows_in_use
,
SHOW_LONG_CONST
},
{
"Open_tables"
,
(
char
*
)
0
,
SHOW_OPENTABLES
},
{
"Open_files"
,
(
char
*
)
&
my_file_opened
,
SHOW_LONG_CONST
},
...
...
sql/share/charsets/Index.xml
View file @
2a9162d3
...
...
@@ -240,9 +240,18 @@ To make maintaining easier please:
<alias>
BalticRim
</alias>
<alias>
iso-8859-13
</alias>
<alias>
l7
</alias>
<collation
name=
"estonia"
id=
"20"
order=
"Estonian"
flag=
"primary"
/>
<collation
name=
"latvian"
id=
"41"
order=
"Latvian"
/>
<collation
name=
"latvian1"
id=
"42"
order=
"Latvian"
/>
<collation
name=
"latin7_estonian_ci_as"
id=
"20"
>
<order>
Estonian
</order>
<flag>
primary
</flag>
</collation>
<collation
name=
"latin7_ci_as"
id=
"41"
>
<order>
Latvian
</order>
<order>
Lithuanian
</order>
</collation>
<collation
name=
"latin7_cs_as"
id=
"42"
>
<order>
Latvian
</order>
<order>
Lithuanian
</order>
</collation>
<collation
name=
"latin7_bin"
id=
"79"
order=
"Binary"
flag=
"binary"
/>
</charset>
...
...
sql/share/charsets/latin7.xml
View file @
2a9162d3
...
...
@@ -93,7 +93,7 @@
</unicode>
<collation
name=
"
estonia
"
>
<collation
name=
"
latin7_estonian_ci_as
"
>
<map>
00 02 03 04 05 06 07 08 09 2E 2F 30 31 32 0A 0B
0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B
...
...
@@ -115,7 +115,7 @@
</collation>
<collation
name=
"lat
vian
"
>
<collation
name=
"lat
in7_ci_as
"
>
<!-- Created for case-sensitive record search -->
<!-- by Andis Grasis & Rihards Grasis e-mail:andis@cata.lv -->
<map>
...
...
@@ -139,7 +139,7 @@
</collation>
<collation
name=
"lat
vian1
"
>
<collation
name=
"lat
in7_cs_as
"
>
<!-- Created for case-insensitive record search -->
<!-- Created by Andis & Rihards -->
<map>
...
...
sql/sql_test.cc
View file @
2a9162d3
...
...
@@ -321,8 +321,8 @@ w_requests: %10lu\n\
writes: %10lu
\n
\
r_requests: %10lu
\n
\
reads: %10lu
\n
"
,
_my_blocks_used
,
_my_blocks_changed
,
_
my_cache_w_requests
,
_my_cache_write
,
_my_cache_r_requests
,
_
my_cache_read
);
my_blocks_used
,
my_blocks_changed
,
my_cache_w_requests
,
my_cache_write
,
my_cache_r_requests
,
my_cache_read
);
pthread_mutex_unlock
(
&
THR_LOCK_keycache
);
if
(
thd
)
...
...
strings/ctype-extra.c
View file @
2a9162d3
...
...
@@ -3169,7 +3169,7 @@ CHARSET_INFO compiled_charsets[] = {
20
,
0
,
0
,
/* number */
MY_CS_COMPILED
,
/* state */
"latin7"
,
/* cs name */
"
estonia
"
,
/* name */
"
latin7_estonian_ci_as
"
,
/* name */
""
,
/* comment */
ctype_estonia
,
to_lower_estonia
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment