Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
8ab2897e
Commit
8ab2897e
authored
Apr 29, 2006
by
brian@zim.(none)
Browse files
Options
Browse Files
Download
Plain Diff
Merge zim.(none):/home/brian/mysql/tmp_merge
into zim.(none):/home/brian/mysql/merge-5.1
parents
7c4ddc8b
11ec75e3
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
41 additions
and
24 deletions
+41
-24
sql/ha_archive.cc
sql/ha_archive.cc
+39
-24
sql/ha_archive.h
sql/ha_archive.h
+2
-0
No files found.
sql/ha_archive.cc
View file @
8ab2897e
...
...
@@ -459,12 +459,11 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share
->
table_name_length
=
length
;
share
->
table_name
=
tmp_name
;
share
->
crashed
=
FALSE
;
share
->
archive_write_open
=
FALSE
;
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
DBUG_PRINT
(
"info"
,
(
"archive opening (1) up write at %s"
,
share
->
data_file_name
));
strmov
(
share
->
table_name
,
table_name
);
/*
We will use this lock for rows.
...
...
@@ -476,38 +475,20 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share
->
data_file_name
));
/*
After we read, we set the file to dirty. When we close, we will do the
opposite. If the meta file will not open we assume it is crashed and
leave it up to the user to fix.
We read the meta file, but do not mark it dirty unless we actually do
a write.
*/
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
,
&
share
->
auto_increment_value
,
&
share
->
forced_flushes
,
share
->
real_path
))
share
->
crashed
=
TRUE
;
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
share
->
auto_increment_value
,
share
->
forced_flushes
,
share
->
real_path
,
TRUE
);
/*
Since we now possibly no real_path, we will use it instead if it exists.
*/
if
(
*
share
->
real_path
)
fn_format
(
share
->
data_file_name
,
share
->
real_path
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if
(
!
(
azopen
(
&
(
share
->
archive_write
),
share
->
data_file_name
,
O_WRONLY
|
O_APPEND
|
O_BINARY
)))
{
DBUG_PRINT
(
"info"
,
(
"Could not open archive write file"
));
share
->
crashed
=
TRUE
;
}
VOID
(
my_hash_insert
(
&
archive_open_tables
,
(
byte
*
)
share
));
thr_lock_init
(
&
share
->
lock
);
}
...
...
@@ -554,8 +535,9 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
share
->
forced_flushes
,
share
->
real_path
,
share
->
crashed
?
TRUE
:
FALSE
);
if
(
azclose
(
&
(
share
->
archive_write
)))
rc
=
1
;
if
(
share
->
archive_write_open
)
if
(
azclose
(
&
(
share
->
archive_write
)))
rc
=
1
;
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
rc
=
1
;
my_free
((
gptr
)
share
,
MYF
(
0
));
...
...
@@ -565,6 +547,32 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
DBUG_RETURN
(
rc
);
}
int
ha_archive
::
init_archive_writer
()
{
DBUG_ENTER
(
"ha_archive::init_archive_writer"
);
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
share
->
auto_increment_value
,
share
->
forced_flushes
,
share
->
real_path
,
TRUE
);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if
(
!
(
azopen
(
&
(
share
->
archive_write
),
share
->
data_file_name
,
O_WRONLY
|
O_APPEND
|
O_BINARY
)))
{
DBUG_PRINT
(
"info"
,
(
"Could not open archive write file"
));
share
->
crashed
=
TRUE
;
DBUG_RETURN
(
1
);
}
share
->
archive_write_open
=
TRUE
;
DBUG_RETURN
(
0
);
}
/*
We just implement one additional file extension.
...
...
@@ -910,6 +918,9 @@ int ha_archive::write_row(byte *buf)
Notice that the global auto_increment has been increased.
In case of a failed row write, we will never try to reuse the value.
*/
if
(
!
share
->
archive_write_open
)
if
(
init_archive_writer
())
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
share
->
rows_recorded
++
;
rc
=
real_write_row
(
buf
,
&
(
share
->
archive_write
));
...
...
@@ -1221,6 +1232,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
azio_stream
writer
;
char
writer_filename
[
FN_REFLEN
];
/* Open up the writer if we haven't yet */
if
(
!
share
->
archive_write_open
)
init_archive_writer
();
/* Flush any waiting data */
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
...
...
sql/ha_archive.h
View file @
8ab2897e
...
...
@@ -35,6 +35,7 @@ typedef struct st_archive_share {
THR_LOCK
lock
;
File
meta_file
;
/* Meta file we use */
azio_stream
archive_write
;
/* Archive file we are working with */
bool
archive_write_open
;
bool
dirty
;
/* Flag for if a flush should occur */
bool
crashed
;
/* Meta file is crashed */
ha_rows
rows_recorded
;
/* Number of rows in tables */
...
...
@@ -112,6 +113,7 @@ class ha_archive: public handler
bool
dirty
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
,
int
*
rc
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
init_archive_writer
();
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
int
read_data_header
(
azio_stream
*
file_to_read
);
int
write_data_header
(
azio_stream
*
file_to_write
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment