Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
59c30342
Commit
59c30342
authored
Apr 26, 2006
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge baker@bk-internal.mysql.com:/home/bk/mysql-5.0
into zim.(none):/home/brian/mysql/mysql-5.0
parents
51cb9016
55b91f3a
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
34 additions
and
11 deletions
+34
-11
sql/ha_archive.cc
sql/ha_archive.cc
+32
-11
sql/ha_archive.h
sql/ha_archive.h
+2
-0
No files found.
sql/ha_archive.cc
View file @
59c30342
...
...
@@ -396,6 +396,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share
->
table_name_length
=
length
;
share
->
table_name
=
tmp_name
;
share
->
crashed
=
FALSE
;
share
->
archive_write_open
=
FALSE
;
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
strmov
(
share
->
table_name
,
table_name
);
...
...
@@ -413,16 +414,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
*/
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
))
share
->
crashed
=
TRUE
;
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if
((
share
->
archive_write
=
gzopen
(
share
->
data_file_name
,
"ab"
))
==
NULL
)
share
->
crashed
=
TRUE
;
VOID
(
my_hash_insert
(
&
archive_open_tables
,
(
byte
*
)
share
));
thr_lock_init
(
&
share
->
lock
);
}
...
...
@@ -460,8 +452,9 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
FALSE
);
if
(
gzclose
(
share
->
archive_write
)
==
Z_ERRNO
)
rc
=
1
;
if
(
share
->
archive_write_open
)
if
(
gzclose
(
share
->
archive_write
)
==
Z_ERRNO
)
rc
=
1
;
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
rc
=
1
;
my_free
((
gptr
)
share
,
MYF
(
0
));
...
...
@@ -471,6 +464,26 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
DBUG_RETURN
(
rc
);
}
int
ha_archive
::
init_archive_writer
()
{
DBUG_ENTER
(
"ha_archive::init_archive_writer"
);
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if
((
share
->
archive_write
=
gzopen
(
share
->
data_file_name
,
"ab"
))
==
NULL
)
{
share
->
crashed
=
TRUE
;
DBUG_RETURN
(
1
);
}
share
->
archive_write_open
=
TRUE
;
DBUG_RETURN
(
0
);
}
/*
We just implement one additional file extension.
...
...
@@ -693,6 +706,10 @@ int ha_archive::write_row(byte *buf)
if
(
table
->
timestamp_field_type
&
TIMESTAMP_AUTO_SET_ON_INSERT
)
table
->
timestamp_field
->
set_time
();
pthread_mutex_lock
(
&
share
->
mutex
);
if
(
!
share
->
archive_write_open
)
if
(
init_archive_writer
())
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
share
->
rows_recorded
++
;
rc
=
real_write_row
(
buf
,
share
->
archive_write
);
pthread_mutex_unlock
(
&
share
->
mutex
);
...
...
@@ -893,6 +910,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
gzFile
writer
;
char
writer_filename
[
FN_REFLEN
];
/* Open up the writer if we haven't yet */
if
(
!
share
->
archive_write_open
)
init_archive_writer
();
/* Flush any waiting data */
gzflush
(
share
->
archive_write
,
Z_SYNC_FLUSH
);
...
...
sql/ha_archive.h
View file @
59c30342
...
...
@@ -34,6 +34,7 @@ typedef struct st_archive_share {
THR_LOCK
lock
;
File
meta_file
;
/* Meta file we use */
gzFile
archive_write
;
/* Archive file we are working with */
bool
archive_write_open
;
bool
dirty
;
/* Flag for if a flush should occur */
bool
crashed
;
/* Meta file is crashed */
ha_rows
rows_recorded
;
/* Number of rows in tables */
...
...
@@ -87,6 +88,7 @@ class ha_archive: public handler
int
write_meta_file
(
File
meta_file
,
ha_rows
rows
,
bool
dirty
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
,
int
*
rc
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
init_archive_writer
();
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
int
read_data_header
(
gzFile
file_to_read
);
int
write_data_header
(
gzFile
file_to_write
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment