Commit b819e8e0 authored by lars@mysql.com's avatar lars@mysql.com

Merge mysql.com:/users/lthalmann/bkroot/mysql-5.1-new

into  mysql.com:/users/lthalmann/bk/mysql-5.1-new
parents caf1fb4b 67835c13
...@@ -9618,13 +9618,16 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond) ...@@ -9618,13 +9618,16 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
NdbDictionary::Dictionary* dict= ndb->getDictionary(); NdbDictionary::Dictionary* dict= ndb->getDictionary();
NdbDictionary::Dictionary::List dflist; NdbDictionary::Dictionary::List dflist;
NdbError ndberr; NdbError ndberr;
unsigned i;
DBUG_ENTER("ndbcluster_fill_files_table");
dict->listObjects(dflist, NdbDictionary::Object::Datafile); dict->listObjects(dflist, NdbDictionary::Object::Datafile);
ndberr= dict->getNdbError(); ndberr= dict->getNdbError();
if (ndberr.classification != ndberror_cl_none) if (ndberr.classification != NdbError::NoError)
return 0; ERR_RETURN(ndberr);
for (unsigned i= 0; i < dflist.count; i++) for (i= 0; i < dflist.count; i++)
{ {
NdbDictionary::Dictionary::List::Element& elt = dflist.elements[i]; NdbDictionary::Dictionary::List::Element& elt = dflist.elements[i];
Ndb_cluster_connection_node_iter iter; Ndb_cluster_connection_node_iter iter;
...@@ -9632,16 +9635,24 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond) ...@@ -9632,16 +9635,24 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
g_ndb_cluster_connection->init_get_next_node(iter); g_ndb_cluster_connection->init_get_next_node(iter);
while (id= g_ndb_cluster_connection->get_next_node(iter)) while ((id= g_ndb_cluster_connection->get_next_node(iter)))
{ {
NdbDictionary::Datafile df= dict->getDatafile(id, elt.name); NdbDictionary::Datafile df= dict->getDatafile(id, elt.name);
ndberr= dict->getNdbError(); ndberr= dict->getNdbError();
if(ndberr.classification != ndberror_cl_none) if(ndberr.classification != NdbError::NoError)
{
if (ndberr.classification == NdbError::SchemaError)
continue; continue;
ERR_RETURN(ndberr);
}
NdbDictionary::Tablespace ts= dict->getTablespace(df.getTablespace()); NdbDictionary::Tablespace ts= dict->getTablespace(df.getTablespace());
ndberr= dict->getNdbError(); ndberr= dict->getNdbError();
if (ndberr.classification != ndberror_cl_none) if (ndberr.classification != NdbError::NoError)
{
if (ndberr.classification == NdbError::SchemaError)
continue; continue;
ERR_RETURN(ndberr);
}
int c= 0; int c= 0;
table->field[c++]->set_null(); // FILE_ID table->field[c++]->set_null(); // FILE_ID
...@@ -9706,10 +9717,10 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond) ...@@ -9706,10 +9717,10 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
dict->listObjects(dflist, NdbDictionary::Object::Undofile); dict->listObjects(dflist, NdbDictionary::Object::Undofile);
ndberr= dict->getNdbError(); ndberr= dict->getNdbError();
if (ndberr.classification != ndberror_cl_none) if (ndberr.classification != NdbError::NoError)
return 0; ERR_RETURN(ndberr);
for (unsigned i= 0; i < dflist.count; i++) for (i= 0; i < dflist.count; i++)
{ {
NdbDictionary::Dictionary::List::Element& elt= dflist.elements[i]; NdbDictionary::Dictionary::List::Element& elt= dflist.elements[i];
Ndb_cluster_connection_node_iter iter; Ndb_cluster_connection_node_iter iter;
...@@ -9717,17 +9728,25 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond) ...@@ -9717,17 +9728,25 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
g_ndb_cluster_connection->init_get_next_node(iter); g_ndb_cluster_connection->init_get_next_node(iter);
while (id= g_ndb_cluster_connection->get_next_node(iter)) while ((id= g_ndb_cluster_connection->get_next_node(iter)))
{ {
NdbDictionary::Undofile uf= dict->getUndofile(id, elt.name); NdbDictionary::Undofile uf= dict->getUndofile(id, elt.name);
ndberr= dict->getNdbError(); ndberr= dict->getNdbError();
if (ndberr.classification != ndberror_cl_none) if (ndberr.classification != NdbError::NoError)
{
if (ndberr.classification == NdbError::SchemaError)
continue; continue;
ERR_RETURN(ndberr);
}
NdbDictionary::LogfileGroup lfg= NdbDictionary::LogfileGroup lfg=
dict->getLogfileGroup(uf.getLogfileGroup()); dict->getLogfileGroup(uf.getLogfileGroup());
ndberr= dict->getNdbError(); ndberr= dict->getNdbError();
if (ndberr.classification != ndberror_cl_none) if (ndberr.classification != NdbError::NoError)
{
if (ndberr.classification == NdbError::SchemaError)
continue; continue;
ERR_RETURN(ndberr);
}
int c= 0; int c= 0;
table->field[c++]->set_null(); // FILE_ID table->field[c++]->set_null(); // FILE_ID
...@@ -9788,5 +9807,5 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond) ...@@ -9788,5 +9807,5 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
schema_table_store_record(thd, table); schema_table_store_record(thd, table);
} }
} }
return 0; DBUG_RETURN(0);
} }
...@@ -1114,9 +1114,12 @@ Dbtup::prepare_initial_insert(KeyReqStruct *req_struct, ...@@ -1114,9 +1114,12 @@ Dbtup::prepare_initial_insert(KeyReqStruct *req_struct,
ptr= ALIGN_WORD(dst->m_data_ptr+regTabPtr->m_offsets[MM].m_max_var_offset); ptr= ALIGN_WORD(dst->m_data_ptr+regTabPtr->m_offsets[MM].m_max_var_offset);
order += regTabPtr->m_attributes[MM].m_no_of_fixsize; order += regTabPtr->m_attributes[MM].m_no_of_fixsize;
Uint32 pos= 0; Uint32 pos= 0;
Uint16 *pos_ptr = req_struct->var_pos_array;
Uint16 *len_ptr = pos_ptr + cnt1;
for(Uint32 i= 0; i<cnt1; i++) for(Uint32 i= 0; i<cnt1; i++)
{ {
dst->m_offset_array_ptr[i]= pos; * pos_ptr++ = pos;
* len_ptr++ = pos;
pos += AttributeDescriptor::getSizeInBytes(tab_descr[*order++].tabDescr); pos += AttributeDescriptor::getSizeInBytes(tab_descr[*order++].tabDescr);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment