Commit 38c8adaa authored by unknown's avatar unknown

This updates archive to use new header information in az files and makes way...

This updates archive to use new header information in az files and makes way for combining the meta data file with the AZ file. 


storage/archive/azio.c:
  This removes the default gzip information and now uses a custom header.
storage/archive/azlib.h:
  Additions for custom header and future meta data.
storage/archive/ha_archive.cc:
  Removed data header in favor of new header system.
storage/archive/ha_archive.h:
  Removes data_version
parent 020d52e6
......@@ -18,6 +18,7 @@
#include <string.h>
static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */
static int const az_magic[2] = {0xfe, 0x03}; /* az magic header */
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
......@@ -66,6 +67,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
s->crc = crc32(0L, Z_NULL, 0);
s->transparent = 0;
s->mode = 'r';
s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */
if (Flags & O_WRONLY || Flags & O_APPEND)
s->mode = 'w';
......@@ -112,20 +114,24 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
return Z_NULL;
}
if (s->mode == 'w') {
char buffer[10];
/* Write a very simple .gz header:
*/
buffer[0] = gz_magic[0];
buffer[1] = gz_magic[1];
buffer[2] = Z_DEFLATED;
buffer[3] = 0 /*flags*/;
buffer[4] = 0;
buffer[5] = 0;
buffer[6] = 0;
buffer[7] = 0 /*time*/;
buffer[8] = 0 /*xflags*/;
buffer[9] = 0x03;
s->start = 10L;
char buffer[AZHEADER_SIZE];
char *ptr;
/* Write a very simple .gz header: */
bzero(buffer, AZHEADER_SIZE);
buffer[0] = az_magic[0];
buffer[1] = az_magic[1];
buffer[2] = (unsigned char)0; /* Reserved for block size */
buffer[3] = (unsigned char)0; /* Compression Type */
ptr= buffer + 4;
int4store(ptr, 0LL); /* FRM Block */
ptr+= sizeof(unsigned long);
int4store(ptr, 0LL); /* Meta Block */
ptr+= sizeof(unsigned long);
int4store(ptr, (unsigned long)AZHEADER_SIZE); /* Start of Data Block Index Block */
ptr+= sizeof(unsigned long);
s->start = AZHEADER_SIZE;
s->version = (unsigned char)az_magic[1];
my_write(s->file, buffer, (uint)s->start, MYF(0));
/* We use 10L instead of ftell(s->file) to because ftell causes an
* fflush on some systems. This version of the library doesn't use
......@@ -218,41 +224,53 @@ void check_header(azio_stream *s)
}
/* Peek ahead to check the gzip magic header */
if (s->stream.next_in[0] != gz_magic[0] ||
s->stream.next_in[1] != gz_magic[1]) {
s->transparent = 1;
return;
}
s->stream.avail_in -= 2;
s->stream.next_in += 2;
/* Check the rest of the gzip header */
method = get_byte(s);
flags = get_byte(s);
if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
s->z_err = Z_DATA_ERROR;
return;
}
if ( s->stream.next_in[0] == gz_magic[0] && s->stream.next_in[1] == gz_magic[1])
{
s->stream.avail_in -= 2;
s->stream.next_in += 2;
s->version= (unsigned char)2;
/* Check the rest of the gzip header */
method = get_byte(s);
flags = get_byte(s);
if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
s->z_err = Z_DATA_ERROR;
return;
}
/* Discard time, xflags and OS code: */
for (len = 0; len < 6; len++) (void)get_byte(s);
/* Discard time, xflags and OS code: */
for (len = 0; len < 6; len++) (void)get_byte(s);
if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */
len = (uInt)get_byte(s);
len += ((uInt)get_byte(s))<<8;
/* len is garbage if EOF but the loop below will quit anyway */
while (len-- != 0 && get_byte(s) != EOF) ;
}
if ((flags & ORIG_NAME) != 0) { /* skip the original file name */
while ((c = get_byte(s)) != 0 && c != EOF) ;
if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */
len = (uInt)get_byte(s);
len += ((uInt)get_byte(s))<<8;
/* len is garbage if EOF but the loop below will quit anyway */
while (len-- != 0 && get_byte(s) != EOF) ;
}
if ((flags & ORIG_NAME) != 0) { /* skip the original file name */
while ((c = get_byte(s)) != 0 && c != EOF) ;
}
if ((flags & COMMENT) != 0) { /* skip the .gz file comment */
while ((c = get_byte(s)) != 0 && c != EOF) ;
}
if ((flags & HEAD_CRC) != 0) { /* skip the header crc */
for (len = 0; len < 2; len++) (void)get_byte(s);
}
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
}
if ((flags & COMMENT) != 0) { /* skip the .gz file comment */
while ((c = get_byte(s)) != 0 && c != EOF) ;
else if ( s->stream.next_in[0] == az_magic[0] && s->stream.next_in[1] == az_magic[1])
{
s->stream.avail_in -= 2;
s->stream.next_in += 2;
for (len = 0; len < (AZHEADER_SIZE-2); len++) (void)get_byte(s);
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
}
if ((flags & HEAD_CRC) != 0) { /* skip the header crc */
for (len = 0; len < 2; len++) (void)get_byte(s);
else
{
s->transparent = 1;
s->version = (unsigned char)0;
return;
}
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
}
/* ===========================================================================
......@@ -668,10 +686,12 @@ int azclose (azio_stream *s)
if (s == NULL) return Z_STREAM_ERROR;
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return Z_STREAM_ERROR;
#else
if (do_flush (s, Z_FINISH) != Z_OK)
return destroy(s);
......@@ -681,3 +701,42 @@ int azclose (azio_stream *s)
}
return destroy(s);
}
/*
This function reads the header of meta block and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success.
*/
int az_read_meta_block(char *meta_start, unsigned long *rows,
unsigned long long *auto_increment,
unsigned long long *forced_flushes)
{
unsigned char *ptr= meta_start;
ulonglong check_point;
DBUG_ENTER("ha_archive::read_meta_file");
/*
Parse out the meta data, we ignore version at the moment
*/
*rows= (unsigned long long)uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past rows
check_point= uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past auto_increment
*forced_flushes= uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past forced_flush
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu",
(long long unsigned)*rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu",
(long long unsigned) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu",
(long long unsigned)*auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu",
(long long unsigned)*forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
DBUG_RETURN(0);
}
/*
This libary has been modified for use by the MySQL Archive Engine.
-Brian Aker
*/
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.2.3, July 18th, 2005
......@@ -38,6 +40,16 @@
#ifdef __cplusplus
extern "C" {
#endif
/* Start of MySQL Specific Information */
/*
ulonglong + ulonglong + ulonglong + ulonglong + uchar
*/
#define AZMETA_BUFFER_SIZE sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) \
+ sizeof(uchar)
#define AZHEADER_SIZE 16
/*
The 'zlib' compression library provides in-memory compression and
......@@ -171,6 +183,7 @@ typedef struct azio_stream {
my_off_t out; /* bytes out of deflate or inflate */
int back; /* one character push-back */
int last; /* true if push-back is last character */
unsigned char version; /* Version */
} azio_stream;
/* basic functions */
......
......@@ -250,11 +250,17 @@ int ha_archive::read_data_header(azio_stream *file_to_read)
if (azrewind(file_to_read) == -1)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (file_to_read->version >= 3)
DBUG_RETURN(0);
/* Everything below this is just legacy to version 2< */
DBUG_PRINT("ha_archive", ("Reading legacy data header"));
ret= azread(file_to_read, data_buffer, DATA_BUFFER_SIZE, &error);
if (ret != DATA_BUFFER_SIZE)
{
DBUG_PRINT("ha_archive", ("Reading, expected %lu got %lu",
DBUG_PRINT("ha_archive", ("Reading, expected %d got %lu",
DATA_BUFFER_SIZE, ret));
DBUG_RETURN(1);
}
......@@ -268,9 +274,6 @@ int ha_archive::read_data_header(azio_stream *file_to_read)
DBUG_PRINT("ha_archive", ("Check %u", data_buffer[0]));
DBUG_PRINT("ha_archive", ("Version %u", data_buffer[1]));
share->data_version= (uchar)data_buffer[1];
DBUG_PRINT("ha_archive", ("Set Version %u", share->data_version));
if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
(data_buffer[1] != (uchar)ARCHIVE_VERSION))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
......@@ -278,29 +281,6 @@ int ha_archive::read_data_header(azio_stream *file_to_read)
DBUG_RETURN(0);
}
/*
This method writes out the header of a datafile and returns whether or not it was successful.
*/
int ha_archive::write_data_header(azio_stream *file_to_write)
{
uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::write_data_header");
data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
data_buffer[1]= (uchar)ARCHIVE_VERSION;
if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
DATA_BUFFER_SIZE)
goto error;
DBUG_PRINT("ha_archive", ("Check %u", (uint)data_buffer[0]));
DBUG_PRINT("ha_archive", ("Version %u", (uint)data_buffer[1]));
DBUG_RETURN(0);
error:
DBUG_PRINT("ha_archive", ("Could not write full data header"));
DBUG_RETURN(errno);
}
/*
This method reads the header of a meta file and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success.
......@@ -616,6 +596,9 @@ int ha_archive::open(const char *name, int mode, uint open_options)
DBUG_RETURN(rc);
}
DBUG_ASSERT(share);
record_buffer= create_record_buffer(table->s->reclength);
if (!record_buffer)
......@@ -694,6 +677,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
File create_file; // We use to create the datafile and the metafile
char name_buff[FN_REFLEN];
int error;
azio_stream create_stream; /* Archive file we are working with */
DBUG_ENTER("ha_archive::create");
stats.auto_increment_value= (create_info->auto_increment_value ?
......@@ -762,18 +746,13 @@ int ha_archive::create(const char *name, TABLE *table_arg,
goto error;
}
}
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
if (!azdopen(&create_stream, create_file, O_WRONLY|O_BINARY))
{
error= errno;
goto error2;
}
if (write_data_header(&archive))
{
error= errno;
goto error3;
}
if (azclose(&archive))
if (azclose(&create_stream))
{
error= errno;
goto error2;
......@@ -781,9 +760,6 @@ int ha_archive::create(const char *name, TABLE *table_arg,
DBUG_RETURN(0);
error3:
/* We already have an error, so ignore results of azclose. */
(void)azclose(&archive);
error2:
my_close(create_file, MYF(0));
delete_table(name);
......@@ -1140,8 +1116,9 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
int rc;
DBUG_ENTER("ha_archive::get_row");
DBUG_PRINT("ha_archive", ("Picking version for get_row() %d -> %d",
share->data_version, ARCHIVE_VERSION));
if (share->data_version == ARCHIVE_VERSION)
(uchar)file_to_read->version,
ARCHIVE_VERSION));
if (file_to_read->version == ARCHIVE_VERSION)
rc= get_row_version3(file_to_read, buf);
else
rc= get_row_version2(file_to_read, buf);
......@@ -1436,13 +1413,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
start of the file.
*/
rc= read_data_header(&archive);
/*
Assuming now error from rewinding the archive file, we now write out the
new header for out data file.
*/
if (!rc)
rc= write_data_header(&writer);
/*
On success of writing out the new header, we now fetch each row and
......
......@@ -50,7 +50,6 @@ typedef struct st_archive_share {
ulonglong mean_rec_length;
char real_path[FN_REFLEN];
uint meta_version;
uint data_version;
} ARCHIVE_SHARE;
/*
......@@ -137,7 +136,6 @@ class ha_archive: public handler
int init_archive_writer();
bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(azio_stream *file_to_read);
int write_data_header(azio_stream *file_to_write);
void position(const byte *record);
int info(uint);
void update_create_info(HA_CREATE_INFO *create_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment