Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
ba1139d5
Commit
ba1139d5
authored
Dec 16, 2004
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge
parents
f9b1e2de
b76b2e68
Changes
4
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
1276 additions
and
40 deletions
+1276
-40
mysql-test/r/archive.result
mysql-test/r/archive.result
+1208
-0
mysql-test/t/archive.test
mysql-test/t/archive.test
+2
-0
sql/examples/ha_archive.cc
sql/examples/ha_archive.cc
+63
-39
sql/examples/ha_archive.h
sql/examples/ha_archive.h
+3
-1
No files found.
mysql-test/r/archive.result
View file @
ba1139d5
This diff is collapsed.
Click to expand it.
mysql-test/t/archive.test
View file @
ba1139d5
...
@@ -1299,6 +1299,8 @@ INSERT INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','');
...
@@ -1299,6 +1299,8 @@ INSERT INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','');
SELECT
*
FROM
t2
;
SELECT
*
FROM
t2
;
OPTIMIZE
TABLE
t2
;
OPTIMIZE
TABLE
t2
;
SELECT
*
FROM
t2
;
SELECT
*
FROM
t2
;
REPAIR
TABLE
t2
;
SELECT
*
FROM
t2
;
#
#
# Test bulk inserts
# Test bulk inserts
...
...
sql/examples/ha_archive.cc
View file @
ba1139d5
...
@@ -22,6 +22,7 @@
...
@@ -22,6 +22,7 @@
#ifdef HAVE_ARCHIVE_DB
#ifdef HAVE_ARCHIVE_DB
#include "ha_archive.h"
#include "ha_archive.h"
#include <my_dir.h>
/*
/*
First, if you want to understand storage engines you should look at
First, if you want to understand storage engines you should look at
...
@@ -227,8 +228,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
...
@@ -227,8 +228,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
/*
/*
This method writes out the header of a meta file and returns whether or not it was successful.
This method writes out the header of a meta file and returns whether or not it was successful.
By setting dirty you say whether or not the file represents the actual state of the data file.
By setting dirty you say whether or not the file represents the actual state of the data file.
Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during
Upon ::open() we set to dirty, and upon ::close() we set to clean.
a read that the file was dirty we will force a rebuild of this file.
*/
*/
int
ha_archive
::
write_meta_file
(
File
meta_file
,
ulonglong
rows
,
bool
dirty
)
int
ha_archive
::
write_meta_file
(
File
meta_file
,
ulonglong
rows
,
bool
dirty
)
{
{
...
@@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
...
@@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
share
->
use_count
=
0
;
share
->
use_count
=
0
;
share
->
table_name_length
=
length
;
share
->
table_name_length
=
length
;
share
->
table_name
=
tmp_name
;
share
->
table_name
=
tmp_name
;
share
->
crashed
=
FALSE
;
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
strmov
(
share
->
table_name
,
table_name
);
strmov
(
share
->
table_name
,
table_name
);
...
@@ -315,24 +316,15 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
...
@@ -315,24 +316,15 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
if
((
share
->
meta_file
=
my_open
(
meta_file_name
,
O_RDWR
,
MYF
(
0
)))
==
-
1
)
if
((
share
->
meta_file
=
my_open
(
meta_file_name
,
O_RDWR
,
MYF
(
0
)))
==
-
1
)
goto
error
;
goto
error
;
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
))
{
/*
The problem here is that for some reason, probably a crash, the meta
file has been corrupted. So what do we do? Well we try to rebuild it
ourself. Once that happens, we reread it, but if that fails we just
call it quits and return an error.
*/
if
(
rebuild_meta_file
(
share
->
table_name
,
share
->
meta_file
))
goto
error
;
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
))
goto
error
;
}
/*
/*
After we read, we set the file to dirty. When we close, we will do the
After we read, we set the file to dirty. When we close, we will do the
opposite.
opposite. If the meta file will not open we assume it is crashed and
leave it up to the user to fix.
*/
*/
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
))
share
->
crashed
=
TRUE
;
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
/*
/*
It is expensive to open and close the data files and since you can't have
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
a gzip file that can be both read and written we keep a writer open
...
@@ -408,7 +400,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
...
@@ -408,7 +400,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER
(
"ha_archive::open"
);
DBUG_ENTER
(
"ha_archive::open"
);
if
(
!
(
share
=
get_share
(
name
,
table
)))
if
(
!
(
share
=
get_share
(
name
,
table
)))
DBUG_RETURN
(
1
);
DBUG_RETURN
(
-
1
);
thr_lock_data_init
(
&
share
->
lock
,
&
lock
,
NULL
);
thr_lock_data_init
(
&
share
->
lock
,
&
lock
,
NULL
);
if
((
archive
=
gzopen
(
share
->
data_file_name
,
"rb"
))
==
NULL
)
if
((
archive
=
gzopen
(
share
->
data_file_name
,
"rb"
))
==
NULL
)
...
@@ -530,6 +522,9 @@ int ha_archive::write_row(byte * buf)
...
@@ -530,6 +522,9 @@ int ha_archive::write_row(byte * buf)
z_off_t
written
;
z_off_t
written
;
DBUG_ENTER
(
"ha_archive::write_row"
);
DBUG_ENTER
(
"ha_archive::write_row"
);
if
(
share
->
crashed
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
statistic_increment
(
table
->
in_use
->
status_var
.
ha_write_count
,
&
LOCK_status
);
statistic_increment
(
table
->
in_use
->
status_var
.
ha_write_count
,
&
LOCK_status
);
if
(
table
->
timestamp_field_type
&
TIMESTAMP_AUTO_SET_ON_INSERT
)
if
(
table
->
timestamp_field_type
&
TIMESTAMP_AUTO_SET_ON_INSERT
)
table
->
timestamp_field
->
set_time
();
table
->
timestamp_field
->
set_time
();
...
@@ -578,6 +573,9 @@ int ha_archive::rnd_init(bool scan)
...
@@ -578,6 +573,9 @@ int ha_archive::rnd_init(bool scan)
{
{
DBUG_ENTER
(
"ha_archive::rnd_init"
);
DBUG_ENTER
(
"ha_archive::rnd_init"
);
int
read
;
// gzread() returns int, and we use this to check the header
int
read
;
// gzread() returns int, and we use this to check the header
if
(
share
->
crashed
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
/* We rewind the file so that we can read from the beginning if scan */
/* We rewind the file so that we can read from the beginning if scan */
if
(
scan
)
if
(
scan
)
...
@@ -672,6 +670,9 @@ int ha_archive::rnd_next(byte *buf)
...
@@ -672,6 +670,9 @@ int ha_archive::rnd_next(byte *buf)
int
rc
;
int
rc
;
DBUG_ENTER
(
"ha_archive::rnd_next"
);
DBUG_ENTER
(
"ha_archive::rnd_next"
);
if
(
share
->
crashed
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
if
(
!
scan_rows
)
if
(
!
scan_rows
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
scan_rows
--
;
scan_rows
--
;
...
@@ -722,22 +723,23 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
...
@@ -722,22 +723,23 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
}
}
/*
/*
This method re
build
s the meta file. It does this by walking the datafile and
This method re
pair
s the meta file. It does this by walking the datafile and
rewriting the meta file.
rewriting the meta file.
*/
*/
int
ha_archive
::
re
build_meta_file
(
char
*
table_name
,
File
meta_file
)
int
ha_archive
::
re
pair
(
THD
*
thd
,
HA_CHECK_OPT
*
check_opt
)
{
{
int
rc
;
int
rc
;
byte
*
buf
;
byte
*
buf
;
ulonglong
rows_recorded
=
0
;
ulonglong
rows_recorded
=
0
;
gzFile
rebuild_file
;
/* Archive file we are working with */
gzFile
rebuild_file
;
// Archive file we are working with
File
meta_file
;
// Meta file we use
char
data_file_name
[
FN_REFLEN
];
char
data_file_name
[
FN_REFLEN
];
DBUG_ENTER
(
"ha_archive::re
build_meta_file
"
);
DBUG_ENTER
(
"ha_archive::re
pair
"
);
/*
/*
Open up the meta file to recreate it.
Open up the meta file to recreate it.
*/
*/
fn_format
(
data_file_name
,
table_name
,
""
,
ARZ
,
fn_format
(
data_file_name
,
share
->
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
if
((
rebuild_file
=
gzopen
(
data_file_name
,
"rb"
))
==
NULL
)
if
((
rebuild_file
=
gzopen
(
data_file_name
,
"rb"
))
==
NULL
)
DBUG_RETURN
(
errno
?
errno
:
-
1
);
DBUG_RETURN
(
errno
?
errno
:
-
1
);
...
@@ -767,11 +769,18 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
...
@@ -767,11 +769,18 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
*/
*/
if
(
rc
==
HA_ERR_END_OF_FILE
)
if
(
rc
==
HA_ERR_END_OF_FILE
)
{
{
(
void
)
write_meta_file
(
meta_file
,
rows_recorded
,
FALSE
);
fn_format
(
data_file_name
,
share
->
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
if
((
meta_file
=
my_open
(
data_file_name
,
O_RDWR
,
MYF
(
0
)))
==
-
1
)
{
rc
=
HA_ERR_CRASHED_ON_USAGE
;
goto
error
;
}
(
void
)
write_meta_file
(
meta_file
,
rows_recorded
,
TRUE
);
rc
=
0
;
rc
=
0
;
}
}
my_free
((
gptr
)
buf
,
MYF
(
0
));
my_free
((
gptr
)
buf
,
MYF
(
0
));
share
->
crashed
=
FALSE
;
error:
error:
gzclose
(
rebuild_file
);
gzclose
(
rebuild_file
);
...
@@ -790,13 +799,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -790,13 +799,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
char
block
[
IO_SIZE
];
char
block
[
IO_SIZE
];
char
writer_filename
[
FN_REFLEN
];
char
writer_filename
[
FN_REFLEN
];
/* Closing will cause all data waiting to be flushed, to be flushed */
gzclose
(
share
->
archive_write
);
share
->
archive_write
=
NULL
;
/* Lets create a file to contain the new data */
/* Lets create a file to contain the new data */
fn_format
(
writer_filename
,
share
->
table_name
,
""
,
ARN
,
fn_format
(
writer_filename
,
share
->
table_name
,
""
,
ARN
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
/* Closing will cause all data waiting to be flushed, to be flushed */
gzclose
(
share
->
archive_write
);
if
((
reader
=
gzopen
(
share
->
data_file_name
,
"rb"
))
==
NULL
)
if
((
reader
=
gzopen
(
share
->
data_file_name
,
"rb"
))
==
NULL
)
DBUG_RETURN
(
-
1
);
DBUG_RETURN
(
-
1
);
...
@@ -814,16 +824,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -814,16 +824,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
my_rename
(
writer_filename
,
share
->
data_file_name
,
MYF
(
0
));
my_rename
(
writer_filename
,
share
->
data_file_name
,
MYF
(
0
));
/*
We reopen the file in case some IO is waiting to go through.
In theory the table is closed right after this operation,
but it is possible for IO to still happen.
I may be being a bit too paranoid right here.
*/
if
((
share
->
archive_write
=
gzopen
(
share
->
data_file_name
,
"ab"
))
==
NULL
)
DBUG_RETURN
(
errno
?
errno
:
-
1
);
share
->
dirty
=
FALSE
;
DBUG_RETURN
(
0
);
DBUG_RETURN
(
0
);
}
}
...
@@ -880,13 +880,36 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
...
@@ -880,13 +880,36 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
void
ha_archive
::
info
(
uint
flag
)
void
ha_archive
::
info
(
uint
flag
)
{
{
DBUG_ENTER
(
"ha_archive::info"
);
DBUG_ENTER
(
"ha_archive::info"
);
/*
/*
This should be an accurate number now, though bulk and delayed inserts can
This should be an accurate number now, though bulk and delayed inserts can
cause the number to be inaccurate.
cause the number to be inaccurate.
*/
*/
records
=
share
->
rows_recorded
;
records
=
share
->
rows_recorded
;
deleted
=
0
;
deleted
=
0
;
/* Costs quite a bit more to get all information */
if
(
flag
&
HA_STATUS_TIME
)
{
uint32
alloced_length_mean
=
0
;
uint
number_of_blobs
=
0
;
MY_STAT
file_stat
;
// Stat information for the data file
VOID
(
my_stat
(
share
->
data_file_name
,
&
file_stat
,
MYF
(
MY_WME
)));
for
(
Field_blob
**
field
=
table
->
blob_field
;
*
field
;
field
++
)
number_of_blobs
++
;
if
(
number_of_blobs
)
alloced_length_mean
=
buffer
.
alloced_length
()
/
number_of_blobs
;
mean_rec_length
=
table
->
reclength
+
alloced_length_mean
;
data_file_length
=
file_stat
.
st_size
;
// Its not worth calling stat to find out
create_time
=
file_stat
.
st_ctime
;
update_time
=
file_stat
.
st_mtime
;
max_data_file_length
=
share
->
rows_recorded
*
(
table
->
reclength
+
alloced_length_mean
);
}
delete_length
=
0
;
index_file_length
=
0
;
DBUG_VOID_RETURN
;
DBUG_VOID_RETURN
;
}
}
...
@@ -900,7 +923,7 @@ void ha_archive::info(uint flag)
...
@@ -900,7 +923,7 @@ void ha_archive::info(uint flag)
*/
*/
void
ha_archive
::
start_bulk_insert
(
ha_rows
rows
)
void
ha_archive
::
start_bulk_insert
(
ha_rows
rows
)
{
{
DBUG_ENTER
(
"ha_archive::
info
"
);
DBUG_ENTER
(
"ha_archive::
start_bulk_insert
"
);
bulk_insert
=
TRUE
;
bulk_insert
=
TRUE
;
DBUG_VOID_RETURN
;
DBUG_VOID_RETURN
;
}
}
...
@@ -912,6 +935,7 @@ void ha_archive::start_bulk_insert(ha_rows rows)
...
@@ -912,6 +935,7 @@ void ha_archive::start_bulk_insert(ha_rows rows)
*/
*/
int
ha_archive
::
end_bulk_insert
()
int
ha_archive
::
end_bulk_insert
()
{
{
DBUG_ENTER
(
"ha_archive::end_bulk_insert"
);
bulk_insert
=
FALSE
;
bulk_insert
=
FALSE
;
share
->
dirty
=
TRUE
;
share
->
dirty
=
TRUE
;
DBUG_RETURN
(
0
);
DBUG_RETURN
(
0
);
...
...
sql/examples/ha_archive.h
View file @
ba1139d5
...
@@ -35,6 +35,7 @@ typedef struct st_archive_share {
...
@@ -35,6 +35,7 @@ typedef struct st_archive_share {
File
meta_file
;
/* Meta file we use */
File
meta_file
;
/* Meta file we use */
gzFile
archive_write
;
/* Archive file we are working with */
gzFile
archive_write
;
/* Archive file we are working with */
bool
dirty
;
/* Flag for if a flush should occur */
bool
dirty
;
/* Flag for if a flush should occur */
bool
crashed
;
/* Meta file is crashed */
ulonglong
rows_recorded
;
/* Number of rows in tables */
ulonglong
rows_recorded
;
/* Number of rows in tables */
}
ARCHIVE_SHARE
;
}
ARCHIVE_SHARE
;
...
@@ -91,13 +92,14 @@ class ha_archive: public handler
...
@@ -91,13 +92,14 @@ class ha_archive: public handler
int
write_meta_file
(
File
meta_file
,
ulonglong
rows
,
bool
dirty
);
int
write_meta_file
(
File
meta_file
,
ulonglong
rows
,
bool
dirty
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
rebuild_meta_file
(
char
*
table_name
,
File
meta_file
);
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
int
read_data_header
(
gzFile
file_to_read
);
int
read_data_header
(
gzFile
file_to_read
);
int
write_data_header
(
gzFile
file_to_write
);
int
write_data_header
(
gzFile
file_to_write
);
void
position
(
const
byte
*
record
);
void
position
(
const
byte
*
record
);
void
info
(
uint
);
void
info
(
uint
);
int
create
(
const
char
*
name
,
TABLE
*
form
,
HA_CREATE_INFO
*
create_info
);
int
create
(
const
char
*
name
,
TABLE
*
form
,
HA_CREATE_INFO
*
create_info
);
int
optimize
(
THD
*
thd
,
HA_CHECK_OPT
*
check_opt
);
int
optimize
(
THD
*
thd
,
HA_CHECK_OPT
*
check_opt
);
int
repair
(
THD
*
thd
,
HA_CHECK_OPT
*
check_opt
);
void
start_bulk_insert
(
ha_rows
rows
);
void
start_bulk_insert
(
ha_rows
rows
);
int
end_bulk_insert
();
int
end_bulk_insert
();
THR_LOCK_DATA
**
store_lock
(
THD
*
thd
,
THR_LOCK_DATA
**
to
,
THR_LOCK_DATA
**
store_lock
(
THD
*
thd
,
THR_LOCK_DATA
**
to
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment