Commit ced66cec authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

addresses #1705

isolate blob unpacking into a function

git-svn-id: file:///svn/mysql/tokudb-engine/src@11421 c7de825b-a66e-492c-adef-691d508d4ae1
parent f4bcdbf4
...@@ -778,6 +778,8 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t ...@@ -778,6 +778,8 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t
num_added_rows_in_stmt = 0; num_added_rows_in_stmt = 0;
num_deleted_rows_in_stmt = 0; num_deleted_rows_in_stmt = 0;
num_updated_rows_in_stmt = 0; num_updated_rows_in_stmt = 0;
blob_buff = NULL;
num_blob_bytes = 0;
} }
// //
...@@ -1498,6 +1500,28 @@ cleanup: ...@@ -1498,6 +1500,28 @@ cleanup:
return r; return r;
} }
void ha_tokudb::unpack_blobs(
uchar* record,
const uchar* from_tokudb_blob,
u_int32_t num_blob_bytes
)
{
//
// now read blobs
//
const uchar* ptr = from_tokudb_blob;
for (uint i = 0; i < share->num_blobs; i++) {
Field* field = table->field[share->blob_fields[i]];
ptr = field->unpack(
record + field_offset(field, table),
ptr
);
}
}
// //
// take the row passed in as a DBT*, and convert it into a row in MySQL format in record // take the row passed in as a DBT*, and convert it into a row in MySQL format in record
// Parameters: // Parameters:
...@@ -1584,14 +1608,12 @@ void ha_tokudb::unpack_row( ...@@ -1584,14 +1608,12 @@ void ha_tokudb::unpack_row(
last_offset = data_end_offset; last_offset = data_end_offset;
} }
} }
for (uint i = 0; i < share->num_blobs; i++) { unpack_blobs(
Field* field = table->field[share->blob_fields[i]]; record,
var_field_data_ptr = field->unpack( var_field_data_ptr,
record + field_offset(field, table), row->size - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data)
var_field_data_ptr
); );
} }
}
// //
// in this case, we unpack only what is specified // in this case, we unpack only what is specified
// in fixed_cols_for_query and var_cols_for_query // in fixed_cols_for_query and var_cols_for_query
...@@ -1656,6 +1678,7 @@ void ha_tokudb::unpack_row( ...@@ -1656,6 +1678,7 @@ void ha_tokudb::unpack_row(
); );
} }
if (read_blobs) {
// //
// now the blobs // now the blobs
// //
...@@ -1680,11 +1703,11 @@ void ha_tokudb::unpack_row( ...@@ -1680,11 +1703,11 @@ void ha_tokudb::unpack_row(
} }
var_field_data_ptr += data_end_offset; var_field_data_ptr += data_end_offset;
} }
for (uint i = 0; i < share->num_blobs; i++) {
Field* field = table->field[share->blob_fields[i]]; unpack_blobs(
var_field_data_ptr = field->unpack( record,
record + field_offset(field, table), var_field_data_ptr,
var_field_data_ptr row->size - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data)
); );
} }
} }
......
...@@ -142,6 +142,15 @@ private: ...@@ -142,6 +142,15 @@ private:
// //
uchar *primary_key_buff; uchar *primary_key_buff;
//
// when unpacking blobs, we need to store it in a temporary
// buffer that will persist because MySQL just gets a pointer to the
// blob data, a pointer we need to ensure is valid until the next
// query
//
uchar* blob_buff;
u_int32_t num_blob_bytes;
bool unpack_entire_row; bool unpack_entire_row;
// //
...@@ -394,6 +403,11 @@ public: ...@@ -394,6 +403,11 @@ public:
void read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); void read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
void read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); void read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
int read_row(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); int read_row(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
void ha_tokudb::unpack_blobs(
uchar* record,
const uchar* from_tokudb_blob,
u_int32_t num_blob_bytes
);
void unpack_row( void unpack_row(
uchar* record, uchar* record,
DBT const *row, DBT const *row,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment