Commit ced66cec authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

addresses #1705

isolate blob unpacking into a function

git-svn-id: file:///svn/mysql/tokudb-engine/src@11421 c7de825b-a66e-492c-adef-691d508d4ae1
parent f4bcdbf4
...@@ -778,6 +778,8 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t ...@@ -778,6 +778,8 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t
num_added_rows_in_stmt = 0; num_added_rows_in_stmt = 0;
num_deleted_rows_in_stmt = 0; num_deleted_rows_in_stmt = 0;
num_updated_rows_in_stmt = 0; num_updated_rows_in_stmt = 0;
blob_buff = NULL;
num_blob_bytes = 0;
} }
// //
...@@ -1498,6 +1500,28 @@ cleanup: ...@@ -1498,6 +1500,28 @@ cleanup:
return r; return r;
} }
void ha_tokudb::unpack_blobs(
uchar* record,
const uchar* from_tokudb_blob,
u_int32_t num_blob_bytes
)
{
//
// now read blobs
//
const uchar* ptr = from_tokudb_blob;
for (uint i = 0; i < share->num_blobs; i++) {
Field* field = table->field[share->blob_fields[i]];
ptr = field->unpack(
record + field_offset(field, table),
ptr
);
}
}
// //
// take the row passed in as a DBT*, and convert it into a row in MySQL format in record // take the row passed in as a DBT*, and convert it into a row in MySQL format in record
// Parameters: // Parameters:
...@@ -1582,15 +1606,13 @@ void ha_tokudb::unpack_row( ...@@ -1582,15 +1606,13 @@ void ha_tokudb::unpack_row(
var_field_offset_ptr += share->num_offset_bytes; var_field_offset_ptr += share->num_offset_bytes;
var_field_data_ptr += data_end_offset - last_offset; var_field_data_ptr += data_end_offset - last_offset;
last_offset = data_end_offset; last_offset = data_end_offset;
} }
}
for (uint i = 0; i < share->num_blobs; i++) {
Field* field = table->field[share->blob_fields[i]];
var_field_data_ptr = field->unpack(
record + field_offset(field, table),
var_field_data_ptr
);
} }
unpack_blobs(
record,
var_field_data_ptr,
row->size - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data)
);
} }
// //
// in this case, we unpack only what is specified // in this case, we unpack only what is specified
...@@ -1656,35 +1678,36 @@ void ha_tokudb::unpack_row( ...@@ -1656,35 +1678,36 @@ void ha_tokudb::unpack_row(
); );
} }
// if (read_blobs) {
// now the blobs //
// // now the blobs
//
// //
// need to set var_field_data_ptr to point to beginning of blobs, which // need to set var_field_data_ptr to point to beginning of blobs, which
// is at the end of the var stuff (if they exist), if var stuff does not exist // is at the end of the var stuff (if they exist), if var stuff does not exist
// then the bottom variable will be 0, and var_field_data_ptr is already // then the bottom variable will be 0, and var_field_data_ptr is already
// set correctly // set correctly
// //
if (share->mcp_info[index].len_of_offsets) { if (share->mcp_info[index].len_of_offsets) {
switch (share->num_offset_bytes) { switch (share->num_offset_bytes) {
case (1): case (1):
data_end_offset = (var_field_data_ptr - 1)[0]; data_end_offset = (var_field_data_ptr - 1)[0];
break; break;
case (2): case (2):
data_end_offset = uint2korr(var_field_data_ptr - 2); data_end_offset = uint2korr(var_field_data_ptr - 2);
break; break;
default: default:
assert(false); assert(false);
break; break;
}
var_field_data_ptr += data_end_offset;
} }
var_field_data_ptr += data_end_offset;
} unpack_blobs(
for (uint i = 0; i < share->num_blobs; i++) { record,
Field* field = table->field[share->blob_fields[i]]; var_field_data_ptr,
var_field_data_ptr = field->unpack( row->size - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data)
record + field_offset(field, table),
var_field_data_ptr
); );
} }
} }
......
...@@ -142,6 +142,15 @@ private: ...@@ -142,6 +142,15 @@ private:
// //
uchar *primary_key_buff; uchar *primary_key_buff;
//
// when unpacking blobs, we need to store it in a temporary
// buffer that will persist because MySQL just gets a pointer to the
// blob data, a pointer we need to ensure is valid until the next
// query
//
uchar* blob_buff;
u_int32_t num_blob_bytes;
bool unpack_entire_row; bool unpack_entire_row;
// //
...@@ -394,6 +403,11 @@ public: ...@@ -394,6 +403,11 @@ public:
void read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); void read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
void read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); void read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
int read_row(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); int read_row(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
void ha_tokudb::unpack_blobs(
uchar* record,
const uchar* from_tokudb_blob,
u_int32_t num_blob_bytes
);
void unpack_row( void unpack_row(
uchar* record, uchar* record,
DBT const *row, DBT const *row,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment