Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
15e2fdcf
Commit
15e2fdcf
authored
Feb 15, 2006
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge mysql.com:/home/mydev/mysql-5.1
into mysql.com:/home/mydev/mysql-5.1-bug8841
parents
5c504d11
098ae02a
Changes
4
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
1269 additions
and
18 deletions
+1269
-18
mysql-test/r/archive.result
mysql-test/r/archive.result
+1217
-0
mysql-test/t/archive.test
mysql-test/t/archive.test
+4
-2
sql/ha_archive.cc
sql/ha_archive.cc
+40
-14
sql/ha_archive.h
sql/ha_archive.h
+8
-2
No files found.
mysql-test/r/archive.result
View file @
15e2fdcf
This diff is collapsed.
Click to expand it.
mysql-test/t/archive.test
View file @
15e2fdcf
...
@@ -1344,9 +1344,11 @@ CHECK TABLE t2;
...
@@ -1344,9 +1344,11 @@ CHECK TABLE t2;
SELECT
*
FROM
t2
;
SELECT
*
FROM
t2
;
#
Just test syntax, we will never know if the output is right or wrong
#
We won't know exactly about what is going on internally,
#
Must be the last test
#
but we will see if the row makes it in!!
INSERT
DELAYED
INTO
t2
VALUES
(
4
,
011403
,
37
,
'intercepted'
,
'audiology'
,
'tinily'
,
''
);
INSERT
DELAYED
INTO
t2
VALUES
(
4
,
011403
,
37
,
'intercepted'
,
'audiology'
,
'tinily'
,
''
);
FLUSH
TABLE
t2
;
SELECT
*
FROM
t2
;
# Adding test for alter table
# Adding test for alter table
ALTER
TABLE
t2
DROP
COLUMN
fld6
;
ALTER
TABLE
t2
DROP
COLUMN
fld6
;
...
...
sql/ha_archive.cc
View file @
15e2fdcf
...
@@ -126,10 +126,10 @@ static HASH archive_open_tables;
...
@@ -126,10 +126,10 @@ static HASH archive_open_tables;
#define ARN ".ARN" // Files used during an optimize call
#define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file
#define ARM ".ARM" // Meta file
/*
/*
uchar + uchar + ulonglong + ulonglong + ulonglong + uchar
uchar + uchar + ulonglong + ulonglong + ulonglong + u
longlong + u
char
*/
*/
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(u
longlong) + sizeof(u
char)
/*
/*
uchar + uchar
uchar + uchar
...
@@ -313,7 +313,8 @@ error:
...
@@ -313,7 +313,8 @@ error:
*rows will contain the current number of rows in the data file upon success.
*rows will contain the current number of rows in the data file upon success.
*/
*/
int
ha_archive
::
read_meta_file
(
File
meta_file
,
ha_rows
*
rows
,
int
ha_archive
::
read_meta_file
(
File
meta_file
,
ha_rows
*
rows
,
ulonglong
*
auto_increment
)
ulonglong
*
auto_increment
,
ulonglong
*
forced_flushes
)
{
{
uchar
meta_buffer
[
META_BUFFER_SIZE
];
uchar
meta_buffer
[
META_BUFFER_SIZE
];
uchar
*
ptr
=
meta_buffer
;
uchar
*
ptr
=
meta_buffer
;
...
@@ -336,12 +337,15 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
...
@@ -336,12 +337,15 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ptr
+=
sizeof
(
ulonglong
);
// Move past check_point
ptr
+=
sizeof
(
ulonglong
);
// Move past check_point
*
auto_increment
=
uint8korr
(
ptr
);
*
auto_increment
=
uint8korr
(
ptr
);
ptr
+=
sizeof
(
ulonglong
);
// Move past auto_increment
ptr
+=
sizeof
(
ulonglong
);
// Move past auto_increment
*
forced_flushes
=
uint8korr
(
ptr
);
ptr
+=
sizeof
(
ulonglong
);
// Move past forced_flush
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Check %d"
,
(
uint
)
meta_buffer
[
0
]));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Check %d"
,
(
uint
)
meta_buffer
[
0
]));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Version %d"
,
(
uint
)
meta_buffer
[
1
]));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Version %d"
,
(
uint
)
meta_buffer
[
1
]));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Rows %llu"
,
*
rows
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Rows %llu"
,
*
rows
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Checkpoint %llu"
,
check_point
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Checkpoint %llu"
,
check_point
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Auto-Increment %llu"
,
*
auto_increment
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Auto-Increment %llu"
,
*
auto_increment
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Forced Flushes %llu"
,
*
forced_flushes
));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Dirty %d"
,
(
int
)(
*
ptr
)));
DBUG_PRINT
(
"ha_archive::read_meta_file"
,
(
"Dirty %d"
,
(
int
)(
*
ptr
)));
if
((
meta_buffer
[
0
]
!=
(
uchar
)
ARCHIVE_CHECK_HEADER
)
||
if
((
meta_buffer
[
0
]
!=
(
uchar
)
ARCHIVE_CHECK_HEADER
)
||
...
@@ -359,7 +363,9 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
...
@@ -359,7 +363,9 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
Upon ::open() we set to dirty, and upon ::close() we set to clean.
Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
*/
int
ha_archive
::
write_meta_file
(
File
meta_file
,
ha_rows
rows
,
int
ha_archive
::
write_meta_file
(
File
meta_file
,
ha_rows
rows
,
ulonglong
auto_increment
,
bool
dirty
)
ulonglong
auto_increment
,
ulonglong
forced_flushes
,
bool
dirty
)
{
{
uchar
meta_buffer
[
META_BUFFER_SIZE
];
uchar
meta_buffer
[
META_BUFFER_SIZE
];
uchar
*
ptr
=
meta_buffer
;
uchar
*
ptr
=
meta_buffer
;
...
@@ -377,6 +383,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
...
@@ -377,6 +383,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ptr
+=
sizeof
(
ulonglong
);
ptr
+=
sizeof
(
ulonglong
);
int8store
(
ptr
,
auto_increment
);
int8store
(
ptr
,
auto_increment
);
ptr
+=
sizeof
(
ulonglong
);
ptr
+=
sizeof
(
ulonglong
);
int8store
(
ptr
,
forced_flushes
);
ptr
+=
sizeof
(
ulonglong
);
*
ptr
=
(
uchar
)
dirty
;
*
ptr
=
(
uchar
)
dirty
;
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Check %d"
,
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Check %d"
,
(
uint
)
ARCHIVE_CHECK_HEADER
));
(
uint
)
ARCHIVE_CHECK_HEADER
));
...
@@ -386,6 +394,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
...
@@ -386,6 +394,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Checkpoint %llu"
,
check_point
));
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Checkpoint %llu"
,
check_point
));
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Auto Increment %llu"
,
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Auto Increment %llu"
,
auto_increment
));
auto_increment
));
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Forced Flushes %llu"
,
forced_flushes
));
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Dirty %d"
,
(
uint
)
dirty
));
DBUG_PRINT
(
"ha_archive::write_meta_file"
,
(
"Dirty %d"
,
(
uint
)
dirty
));
VOID
(
my_seek
(
meta_file
,
0
,
MY_SEEK_SET
,
MYF
(
0
)));
VOID
(
my_seek
(
meta_file
,
0
,
MY_SEEK_SET
,
MYF
(
0
)));
...
@@ -451,11 +461,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
...
@@ -451,11 +461,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
leave it up to the user to fix.
leave it up to the user to fix.
*/
*/
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
,
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
,
&
share
->
auto_increment_value
))
&
share
->
auto_increment_value
,
&
share
->
forced_flushes
))
share
->
crashed
=
TRUE
;
share
->
crashed
=
TRUE
;
else
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
share
->
auto_increment_value
,
TRUE
);
share
->
auto_increment_value
,
share
->
forced_flushes
,
TRUE
);
/*
/*
It is expensive to open and close the data files and since you can't have
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
a gzip file that can be both read and written we keep a writer open
...
@@ -500,12 +513,18 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
...
@@ -500,12 +513,18 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
hash_delete
(
&
archive_open_tables
,
(
byte
*
)
share
);
hash_delete
(
&
archive_open_tables
,
(
byte
*
)
share
);
thr_lock_delete
(
&
share
->
lock
);
thr_lock_delete
(
&
share
->
lock
);
VOID
(
pthread_mutex_destroy
(
&
share
->
mutex
));
VOID
(
pthread_mutex_destroy
(
&
share
->
mutex
));
if
(
share
->
crashed
)
/*
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
We need to make sure we don't reset the crashed state.
share
->
auto_increment_value
,
TRUE
);
If we open a crashed file, wee need to close it as crashed unless
else
it has been repaired.
Since we will close the data down after this, we go on and count
the flush on close;
*/
share
->
forced_flushes
++
;
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
share
->
auto_increment_value
,
FALSE
);
share
->
auto_increment_value
,
share
->
forced_flushes
,
share
->
crashed
?
TRUE
:
FALSE
);
if
(
azclose
(
&
(
share
->
archive_write
)))
if
(
azclose
(
&
(
share
->
archive_write
)))
rc
=
1
;
rc
=
1
;
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
...
@@ -657,7 +676,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
...
@@ -657,7 +676,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
}
}
}
}
write_meta_file
(
create_file
,
0
,
auto_increment_value
,
FALSE
);
write_meta_file
(
create_file
,
0
,
auto_increment_value
,
0
,
FALSE
);
my_close
(
create_file
,
MYF
(
0
));
my_close
(
create_file
,
MYF
(
0
));
/*
/*
...
@@ -800,6 +819,7 @@ int ha_archive::write_row(byte *buf)
...
@@ -800,6 +819,7 @@ int ha_archive::write_row(byte *buf)
data
data
*/
*/
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
/*
/*
Set the position of the local read thread to the beginning postion.
Set the position of the local read thread to the beginning postion.
*/
*/
...
@@ -897,6 +917,7 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
...
@@ -897,6 +917,7 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
*/
*/
pthread_mutex_lock
(
&
share
->
mutex
);
pthread_mutex_lock
(
&
share
->
mutex
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
pthread_mutex_unlock
(
&
share
->
mutex
);
pthread_mutex_unlock
(
&
share
->
mutex
);
/*
/*
...
@@ -974,6 +995,7 @@ int ha_archive::rnd_init(bool scan)
...
@@ -974,6 +995,7 @@ int ha_archive::rnd_init(bool scan)
{
{
DBUG_PRINT
(
"info"
,
(
"archive flushing out rows for scan"
));
DBUG_PRINT
(
"info"
,
(
"archive flushing out rows for scan"
));
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
share
->
dirty
=
FALSE
;
share
->
dirty
=
FALSE
;
}
}
pthread_mutex_unlock
(
&
share
->
mutex
);
pthread_mutex_unlock
(
&
share
->
mutex
);
...
@@ -1149,6 +1171,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -1149,6 +1171,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
/* Flush any waiting data */
/* Flush any waiting data */
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
/* Lets create a file to contain the new data */
/* Lets create a file to contain the new data */
fn_format
(
writer_filename
,
share
->
table_name
,
""
,
ARN
,
fn_format
(
writer_filename
,
share
->
table_name
,
""
,
ARN
,
...
@@ -1233,13 +1256,15 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -1233,13 +1256,15 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
goto
error
;
goto
error
;
}
}
while
((
read
=
azread
(
&
archive
,
block
,
IO_SIZE
)))
while
((
read
=
azread
(
&
archive
,
block
,
IO_SIZE
))
>
0
)
azwrite
(
&
writer
,
block
,
read
);
azwrite
(
&
writer
,
block
,
read
);
}
}
azclose
(
&
writer
);
azclose
(
&
writer
);
share
->
dirty
=
FALSE
;
share
->
dirty
=
FALSE
;
share
->
forced_flushes
=
0
;
azclose
(
&
(
share
->
archive_write
));
azclose
(
&
(
share
->
archive_write
));
DBUG_PRINT
(
"info"
,
(
"Reopening archive data file"
));
if
(
!
(
azopen
(
&
(
share
->
archive_write
),
share
->
data_file_name
,
if
(
!
(
azopen
(
&
(
share
->
archive_write
),
share
->
data_file_name
,
O_WRONLY
|
O_APPEND
|
O_BINARY
)))
O_WRONLY
|
O_APPEND
|
O_BINARY
)))
{
{
...
@@ -1421,6 +1446,7 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -1421,6 +1446,7 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
thd
->
proc_info
=
"Checking table"
;
thd
->
proc_info
=
"Checking table"
;
/* Flush any waiting data */
/* Flush any waiting data */
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
/*
/*
First we create a buffer that we can use for reading rows, and can pass
First we create a buffer that we can use for reading rows, and can pass
...
...
sql/ha_archive.h
View file @
15e2fdcf
...
@@ -39,6 +39,8 @@ typedef struct st_archive_share {
...
@@ -39,6 +39,8 @@ typedef struct st_archive_share {
bool
crashed
;
/* Meta file is crashed */
bool
crashed
;
/* Meta file is crashed */
ha_rows
rows_recorded
;
/* Number of rows in tables */
ha_rows
rows_recorded
;
/* Number of rows in tables */
ulonglong
auto_increment_value
;
ulonglong
auto_increment_value
;
ulonglong
forced_flushes
;
ulonglong
mean_rec_length
;
}
ARCHIVE_SHARE
;
}
ARCHIVE_SHARE
;
/*
/*
...
@@ -98,9 +100,13 @@ public:
...
@@ -98,9 +100,13 @@ public:
int
rnd_next
(
byte
*
buf
);
int
rnd_next
(
byte
*
buf
);
int
rnd_pos
(
byte
*
buf
,
byte
*
pos
);
int
rnd_pos
(
byte
*
buf
,
byte
*
pos
);
int
get_row
(
azio_stream
*
file_to_read
,
byte
*
buf
);
int
get_row
(
azio_stream
*
file_to_read
,
byte
*
buf
);
int
read_meta_file
(
File
meta_file
,
ha_rows
*
rows
,
ulonglong
*
auto_increment
);
int
read_meta_file
(
File
meta_file
,
ha_rows
*
rows
,
ulonglong
*
auto_increment
,
ulonglong
*
forced_flushes
);
int
write_meta_file
(
File
meta_file
,
ha_rows
rows
,
int
write_meta_file
(
File
meta_file
,
ha_rows
rows
,
ulonglong
auto_increment
,
bool
dirty
);
ulonglong
auto_increment
,
ulonglong
forced_flushes
,
bool
dirty
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
,
int
*
rc
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
,
int
*
rc
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment