BUG#22240 Upgrading from cluster 5.0 to 5.1 does not resize VARCHARS as...

BUG#22240 Upgrading from cluster 5.0 to 5.1 does not resize VARCHARS as expected. With --no-upgrade (-u) option, user can decide if ignore the upgrade, the default action is to upgrade them.
parent e1971cfa
...@@ -840,6 +840,21 @@ BackupRestore::table(const TableS & table){ ...@@ -840,6 +840,21 @@ BackupRestore::table(const TableS & table){
copy.setMaxRows(table.getNoOfRecords()); copy.setMaxRows(table.getNoOfRecords());
} }
NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy);
if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){
for(int i= 0; i < copy.getNoOfColumns(); i++)
{
NdbDictionary::Column::Type t = copy.getColumn(i)->getType();
if (t == NdbDictionary::Column::Varchar ||
t == NdbDictionary::Column::Varbinary)
tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar);
if (t == NdbDictionary::Column::Longvarchar ||
t == NdbDictionary::Column::Longvarbinary)
tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar);
}
}
if (dict->createTable(copy) == -1) if (dict->createTable(copy) == -1)
{ {
err << "Create table " << table.getTableName() << " failed: " err << "Create table " << table.getTableName() << " failed: "
...@@ -1080,8 +1095,22 @@ void BackupRestore::tuple_a(restore_callback_t *cb) ...@@ -1080,8 +1095,22 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
int size = attr_desc->size; int size = attr_desc->size;
int arraySize = attr_desc->arraySize; int arraySize = attr_desc->arraySize;
char * dataPtr = attr_data->string_value; char * dataPtr = attr_data->string_value;
Uint32 length = attr_data->size; Uint32 length = 0;
const unsigned char * src = (const unsigned char *)dataPtr;
switch(attr_desc->m_column->getType()){
case NdbDictionary::Column::Varchar:
case NdbDictionary::Column::Varbinary:
length = src[0] + 1;
break;
case NdbDictionary::Column::Longvarchar:
case NdbDictionary::Column::Longvarbinary:
length = src[0] + (src[1] << 8) + 2;
break;
default:
length = attr_data->size;
break;
}
if (j == 0 && tup.getTable()->have_auto_inc(i)) if (j == 0 && tup.getTable()->have_auto_inc(i))
tup.getTable()->update_max_auto_val(dataPtr,size); tup.getTable()->update_max_auto_val(dataPtr,size);
...@@ -1101,7 +1130,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb) ...@@ -1101,7 +1130,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
if (ret < 0) { if (ret < 0) {
ndbout_c("Column: %d type %d %d %d %d",i, ndbout_c("Column: %d type %d %d %d %d",i,
attr_desc->m_column->getType(), attr_desc->m_column->getType(),
size, arraySize, attr_data->size); size, arraySize, length);
break; break;
} }
} }
......
...@@ -51,6 +51,7 @@ public: ...@@ -51,6 +51,7 @@ public:
m_callback = 0; m_callback = 0;
m_free_callback = 0; m_free_callback = 0;
m_temp_error = false; m_temp_error = false;
m_no_upgrade = false;
m_transactions = 0; m_transactions = 0;
m_cache.m_old_table = 0; m_cache.m_old_table = 0;
} }
...@@ -91,6 +92,7 @@ public: ...@@ -91,6 +92,7 @@ public:
bool m_restore_meta; bool m_restore_meta;
bool m_no_restore_disk; bool m_no_restore_disk;
bool m_restore_epoch; bool m_restore_epoch;
bool m_no_upgrade; // for upgrade ArrayType from 5.0 backup file.
Uint32 m_logCount; Uint32 m_logCount;
Uint32 m_dataCount; Uint32 m_dataCount;
......
...@@ -33,6 +33,7 @@ static int ga_nodeId = 0; ...@@ -33,6 +33,7 @@ static int ga_nodeId = 0;
static int ga_nParallelism = 128; static int ga_nParallelism = 128;
static int ga_backupId = 0; static int ga_backupId = 0;
static bool ga_dont_ignore_systab_0 = false; static bool ga_dont_ignore_systab_0 = false;
static bool ga_no_upgrade = false;
static Vector<class BackupConsumer *> g_consumers; static Vector<class BackupConsumer *> g_consumers;
static const char* ga_backupPath = "." DIR_SEPARATOR; static const char* ga_backupPath = "." DIR_SEPARATOR;
...@@ -82,6 +83,10 @@ static struct my_option my_long_options[] = ...@@ -82,6 +83,10 @@ static struct my_option my_long_options[] =
"Restore meta data into NDB Cluster using NDBAPI", "Restore meta data into NDB Cluster using NDBAPI",
(gptr*) &_restore_meta, (gptr*) &_restore_meta, 0, (gptr*) &_restore_meta, (gptr*) &_restore_meta, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-upgrade", 'u',
"Don't upgrade array type for var attributes, which don't resize VAR data and don't change column attributes",
(gptr*) &ga_no_upgrade, (gptr*) &ga_no_upgrade, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-restore-disk-objects", 'd', { "no-restore-disk-objects", 'd',
"Dont restore disk objects (tablespace/logfilegroups etc)", "Dont restore disk objects (tablespace/logfilegroups etc)",
(gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk, 0, (gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk, 0,
...@@ -388,6 +393,11 @@ o verify nodegroup mapping ...@@ -388,6 +393,11 @@ o verify nodegroup mapping
restore->m_no_restore_disk = true; restore->m_no_restore_disk = true;
} }
if (ga_no_upgrade)
{
restore->m_no_upgrade = true;
}
if (ga_restore_epoch) if (ga_restore_epoch)
{ {
restore->m_restore_epoch = true; restore->m_restore_epoch = true;
...@@ -463,6 +473,8 @@ main(int argc, char** argv) ...@@ -463,6 +473,8 @@ main(int argc, char** argv)
g_options.appfmt(" -n %d", ga_nodeId); g_options.appfmt(" -n %d", ga_nodeId);
if (_restore_meta) if (_restore_meta)
g_options.appfmt(" -m"); g_options.appfmt(" -m");
if (ga_no_upgrade)
g_options.appfmt(" -u");
if (ga_skip_table_check) if (ga_skip_table_check)
g_options.appfmt(" -s"); g_options.appfmt(" -s");
if (_restore_data) if (_restore_data)
...@@ -474,7 +486,6 @@ main(int argc, char** argv) ...@@ -474,7 +486,6 @@ main(int argc, char** argv)
g_options.appfmt(" -p %d", ga_nParallelism); g_options.appfmt(" -p %d", ga_nParallelism);
g_connect_string = opt_connect_str; g_connect_string = opt_connect_str;
/** /**
* we must always load meta data, even if we will only print it to stdout * we must always load meta data, even if we will only print it to stdout
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment