Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
30c54408
Commit
30c54408
authored
Nov 29, 2004
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge
parents
f52e64cc
dc80fc2d
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
26 additions
and
10 deletions
+26
-10
sql/examples/ha_archive.cc
sql/examples/ha_archive.cc
+18
-4
sql/examples/ha_archive.h
sql/examples/ha_archive.h
+8
-6
No files found.
sql/examples/ha_archive.cc
View file @
30c54408
...
...
@@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
share
->
use_count
=
0
;
share
->
table_name_length
=
length
;
share
->
table_name
=
tmp_name
;
share
->
delayed
=
FALSE
;
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
strmov
(
share
->
table_name
,
table_name
);
...
...
@@ -536,7 +537,11 @@ int ha_archive::write_row(byte * buf)
pthread_mutex_lock
(
&
share
->
mutex
);
written
=
gzwrite
(
share
->
archive_write
,
buf
,
table
->
reclength
);
DBUG_PRINT
(
"ha_archive::get_row"
,
(
"Wrote %d bytes expected %d"
,
written
,
table
->
reclength
));
share
->
dirty
=
TRUE
;
if
(
!
delayed_insert
)
share
->
dirty
=
TRUE
;
else
share
->
delayed
=
TRUE
;
if
(
written
!=
table
->
reclength
)
goto
error
;
/*
...
...
@@ -594,6 +599,7 @@ int ha_archive::rnd_init(bool scan)
{
gzflush
(
share
->
archive_write
,
Z_SYNC_FLUSH
);
share
->
dirty
=
FALSE
;
share
->
delayed
=
FALSE
;
}
pthread_mutex_unlock
(
&
share
->
mutex
);
}
...
...
@@ -628,9 +634,12 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
if
(
read
==
0
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
/* If the record is the wrong size, the file is probably damaged */
/* If the record is the wrong size, the file is probably damaged, unless
we are dealing with a delayed insert. In that case we can assume the file is ok,
but our row count doesn't match our data since the file has not been flushed.
*/
if
((
ulong
)
read
!=
table
->
reclength
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
DBUG_RETURN
(
share
->
delayed
?
HA_ERR_END_OF_FILE
:
HA_ERR_CRASHED_ON_USAGE
);
/* Calculate blob length, we use this for our buffer */
for
(
field
=
table
->
blob_field
;
*
field
;
field
++
)
...
...
@@ -648,7 +657,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
{
read
=
gzread
(
file_to_read
,
last
,
size
);
if
((
size_t
)
read
!=
size
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
DBUG_RETURN
(
share
->
delayed
?
HA_ERR_END_OF_FILE
:
HA_ERR_CRASHED_ON_USAGE
);
(
*
field
)
->
set_ptr
(
size
,
last
);
last
+=
size
;
}
...
...
@@ -839,6 +848,11 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
THR_LOCK_DATA
**
to
,
enum
thr_lock_type
lock_type
)
{
if
(
lock_type
==
TL_WRITE_DELAYED
)
delayed_insert
=
TRUE
;
else
delayed_insert
=
FALSE
;
if
(
lock_type
!=
TL_IGNORE
&&
lock
.
type
==
TL_UNLOCK
)
{
/*
...
...
sql/examples/ha_archive.h
View file @
30c54408
...
...
@@ -32,10 +32,11 @@ typedef struct st_archive_share {
uint
table_name_length
,
use_count
;
pthread_mutex_t
mutex
;
THR_LOCK
lock
;
File
meta_file
;
/* Meta file we use */
gzFile
archive_write
;
/* Archive file we are working with */
bool
dirty
;
/* Flag for if a flush should occur */
ulonglong
rows_recorded
;
/* Number of rows in tables */
File
meta_file
;
/* Meta file we use */
gzFile
archive_write
;
/* Archive file we are working with */
bool
dirty
;
/* Flag for if a flush should occur */
ulonglong
rows_recorded
;
/* Number of rows in tables */
bool
delayed
;
/* If a delayed insert has happened since opena */
}
ARCHIVE_SHARE
;
/*
...
...
@@ -53,9 +54,10 @@ class ha_archive: public handler
byte
byte_buffer
[
IO_SIZE
];
/* Initial buffer for our string */
String
buffer
;
/* Buffer used for blob storage */
ulonglong
scan_rows
;
/* Number of rows left in scan */
bool
delayed_insert
;
/* If the insert is delayed */
public:
ha_archive
(
TABLE
*
table
)
:
handler
(
table
)
ha_archive
(
TABLE
*
table
)
:
handler
(
table
)
,
delayed_insert
(
0
)
{
/* Set our original buffer from pre-allocated memory */
buffer
.
set
(
byte_buffer
,
IO_SIZE
,
system_charset_info
);
...
...
@@ -72,7 +74,7 @@ class ha_archive: public handler
ulong
table_flags
()
const
{
return
(
HA_REC_NOT_IN_SEQ
|
HA_NOT_EXACT_COUNT
|
HA_NO_AUTO_INCREMENT
|
HA_FILE_BASED
);
HA_FILE_BASED
|
HA_CAN_INSERT_DELAYED
);
}
ulong
index_flags
(
uint
idx
,
uint
part
,
bool
all_parts
)
const
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment