Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
ad92514d
Commit
ad92514d
authored
Apr 10, 2005
by
pekka@mysql.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ndb - wl-2451: Increase max schema object to > 1600
parent
4c1e9238
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
605 additions
and
247 deletions
+605
-247
mysql-test/Makefile.am
mysql-test/Makefile.am
+3
-0
ndb/include/debugger/SignalLoggerManager.hpp
ndb/include/debugger/SignalLoggerManager.hpp
+1
-1
ndb/include/kernel/ndb_limits.h
ndb/include/kernel/ndb_limits.h
+1
-1
ndb/src/common/debugger/SignalLoggerManager.cpp
ndb/src/common/debugger/SignalLoggerManager.cpp
+7
-2
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+313
-115
ndb/src/kernel/blocks/dbdict/Dbdict.hpp
ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+43
-12
ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
+37
-5
ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
+147
-56
ndb/src/ndbapi/NdbDictionaryImpl.cpp
ndb/src/ndbapi/NdbDictionaryImpl.cpp
+2
-0
ndb/test/ndbapi/testDict.cpp
ndb/test/ndbapi/testDict.cpp
+51
-55
No files found.
mysql-test/Makefile.am
View file @
ad92514d
...
...
@@ -59,6 +59,9 @@ dist-hook:
$(INSTALL_DATA)
$(srcdir)
/std_data/
*
.frm
$(distdir)
/std_data
install-data-local
:
true
x-install-data-local
:
$(mkinstalldirs)
\
$(DESTDIR)$(testdir)
/t
\
$(DESTDIR)$(testdir)
/r
\
...
...
ndb/include/debugger/SignalLoggerManager.hpp
View file @
ad92514d
...
...
@@ -87,7 +87,7 @@ public:
/**
* Generic messages in the signal log
*/
void
log
(
BlockNumber
bno
,
const
char
*
msg
);
void
log
(
BlockNumber
bno
,
const
char
*
msg
,
...
);
/**
* LogModes
...
...
ndb/include/kernel/ndb_limits.h
View file @
ad92514d
...
...
@@ -50,7 +50,7 @@
**/
#define MAX_TUPLES_PER_PAGE 8191
#define MAX_TUPLES_BITS 13
/* 13 bits = 8191 tuples per page */
#define MAX_TABLES
1600
#define MAX_TABLES
20320
/* SchemaFile.hpp */
#define MAX_TAB_NAME_SIZE 128
#define MAX_ATTR_NAME_SIZE 32
#define MAX_ATTR_DEFAULT_VALUE_SIZE 128
...
...
ndb/src/common/debugger/SignalLoggerManager.cpp
View file @
ad92514d
...
...
@@ -383,7 +383,7 @@ SignalLoggerManager::sendSignalWithDelay(Uint32 delayInMilliSeconds,
* Generic messages in the signal log
*/
void
SignalLoggerManager
::
log
(
BlockNumber
bno
,
const
char
*
msg
)
SignalLoggerManager
::
log
(
BlockNumber
bno
,
const
char
*
msg
,
...
)
{
// Normalise blocknumber for use in logModes array
const
BlockNumber
bno2
=
bno
-
MIN_BLOCK_NO
;
...
...
@@ -391,7 +391,12 @@ SignalLoggerManager::log(BlockNumber bno, const char * msg)
if
(
outputStream
!=
0
&&
logModes
[
bno2
]
!=
LogOff
){
fprintf
(
outputStream
,
"%s: %s
\n
"
,
getBlockName
(
bno
,
"API"
),
msg
);
va_list
ap
;
va_start
(
ap
,
msg
);
fprintf
(
outputStream
,
"%s: "
,
getBlockName
(
bno
,
"API"
));
vfprintf
(
outputStream
,
msg
,
ap
);
fprintf
(
outputStream
,
"
\n
"
,
msg
);
va_end
(
ap
);
}
}
...
...
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
View file @
ad92514d
...
...
@@ -634,7 +634,7 @@ void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq
::
setSyncFlag
(
fsRWReq
->
operationFlag
,
1
);
FsReadWriteReq
::
setFormatFlag
(
fsRWReq
->
operationFlag
,
FsReadWriteReq
::
fsFormatArrayOfPages
);
fsRWReq
->
varIndex
=
Z
ALLOCAT
E
;
fsRWReq
->
varIndex
=
Z
BAT_TABLE_FIL
E
;
fsRWReq
->
numberOfPages
=
c_writeTableRecord
.
noOfPages
;
fsRWReq
->
data
.
arrayOfPages
.
varIndex
=
c_writeTableRecord
.
pageId
;
fsRWReq
->
data
.
arrayOfPages
.
fileOffset
=
0
;
// Write to file page 0
...
...
@@ -711,7 +711,7 @@ void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq
::
setSyncFlag
(
fsRWReq
->
operationFlag
,
0
);
FsReadWriteReq
::
setFormatFlag
(
fsRWReq
->
operationFlag
,
FsReadWriteReq
::
fsFormatArrayOfPages
);
fsRWReq
->
varIndex
=
Z
ALLOCAT
E
;
fsRWReq
->
varIndex
=
Z
BAT_TABLE_FIL
E
;
fsRWReq
->
numberOfPages
=
c_readTableRecord
.
noOfPages
;
fsRWReq
->
data
.
arrayOfPages
.
varIndex
=
c_readTableRecord
.
pageId
;
fsRWReq
->
data
.
arrayOfPages
.
fileOffset
=
0
;
// Write to file page 0
...
...
@@ -777,11 +777,9 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
SchemaFile
::
TableEntry
*
te
,
Callback
*
callback
){
jam
();
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
ndbrequire
(
tableId
<
c_tableRecordPool
.
getSize
());
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
pagePtr
.
p
,
tableId
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
xsf
,
tableId
);
SchemaFile
::
TableState
newState
=
(
SchemaFile
::
TableState
)
te
->
m_tableState
;
...
...
@@ -828,12 +826,15 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
ndbrequire
(
ok
);
*
tableEntry
=
*
te
;
computeChecksum
(
(
SchemaFile
*
)
pagePtr
.
p
);
computeChecksum
(
xsf
,
tableId
/
NDB_SF_PAGE_ENTRIES
);
ndbrequire
(
c_writeSchemaRecord
.
inUse
==
false
);
c_writeSchemaRecord
.
inUse
=
true
;
c_writeSchemaRecord
.
pageId
=
c_schemaRecord
.
schemaPage
;
c_writeSchemaRecord
.
newFile
=
false
;
c_writeSchemaRecord
.
firstPage
=
tableId
/
NDB_SF_PAGE_ENTRIES
;
c_writeSchemaRecord
.
noOfPages
=
1
;
c_writeSchemaRecord
.
m_callback
=
*
callback
;
startWriteSchemaFile
(
signal
);
...
...
@@ -844,14 +845,15 @@ void Dbdict::startWriteSchemaFile(Signal* signal)
FsConnectRecordPtr
fsPtr
;
c_fsConnectRecordPool
.
getPtr
(
fsPtr
,
getFsConnRecord
());
fsPtr
.
p
->
fsState
=
FsConnectRecord
::
OPEN_WRITE_SCHEMA
;
openSchemaFile
(
signal
,
0
,
fsPtr
.
i
,
true
);
openSchemaFile
(
signal
,
0
,
fsPtr
.
i
,
true
,
c_writeSchemaRecord
.
newFile
);
c_writeSchemaRecord
.
noOfSchemaFilesHandled
=
0
;
}
//Dbdict::startWriteSchemaFile()
void
Dbdict
::
openSchemaFile
(
Signal
*
signal
,
Uint32
fileNo
,
Uint32
fsConPtr
,
bool
writeFlag
)
bool
writeFlag
,
bool
newFile
)
{
FsOpenReq
*
const
fsOpenReq
=
(
FsOpenReq
*
)
&
signal
->
theData
[
0
];
fsOpenReq
->
userReference
=
reference
();
...
...
@@ -860,9 +862,11 @@ void Dbdict::openSchemaFile(Signal* signal,
jam
();
fsOpenReq
->
fileFlags
=
FsOpenReq
::
OM_WRITEONLY
|
FsOpenReq
::
OM_TRUNCATE
|
FsOpenReq
::
OM_CREATE
|
FsOpenReq
::
OM_SYNC
;
if
(
newFile
)
fsOpenReq
->
fileFlags
|=
FsOpenReq
::
OM_TRUNCATE
|
FsOpenReq
::
OM_CREATE
;
}
else
{
jam
();
fsOpenReq
->
fileFlags
=
FsOpenReq
::
OM_READONLY
;
...
...
@@ -887,6 +891,12 @@ void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
{
FsReadWriteReq
*
const
fsRWReq
=
(
FsReadWriteReq
*
)
&
signal
->
theData
[
0
];
// check write record
WriteSchemaRecord
&
wr
=
c_writeSchemaRecord
;
ndbrequire
(
wr
.
pageId
==
(
wr
.
pageId
!=
0
)
*
NDB_SF_MAX_PAGES
);
ndbrequire
(
wr
.
noOfPages
!=
0
);
ndbrequire
(
wr
.
firstPage
+
wr
.
noOfPages
<=
NDB_SF_MAX_PAGES
);
fsRWReq
->
filePointer
=
filePtr
;
fsRWReq
->
userReference
=
reference
();
fsRWReq
->
userPointer
=
fsConPtr
;
...
...
@@ -894,11 +904,11 @@ void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq
::
setSyncFlag
(
fsRWReq
->
operationFlag
,
1
);
FsReadWriteReq
::
setFormatFlag
(
fsRWReq
->
operationFlag
,
FsReadWriteReq
::
fsFormatArrayOfPages
);
fsRWReq
->
varIndex
=
Z
ALLOCAT
E
;
fsRWReq
->
numberOfPages
=
1
;
// Write from memory page
fsRWReq
->
data
.
arrayOfPages
.
varIndex
=
c_writeSchemaRecord
.
pageId
;
fsRWReq
->
data
.
arrayOfPages
.
fileOffset
=
0
;
// Write to file page 0
fsRWReq
->
varIndex
=
Z
BAT_SCHEMA_FIL
E
;
fsRWReq
->
numberOfPages
=
wr
.
noOfPages
;
// Write from memory page
fsRWReq
->
data
.
arrayOfPages
.
varIndex
=
wr
.
pageId
+
wr
.
firstPage
;
fsRWReq
->
data
.
arrayOfPages
.
fileOffset
=
wr
.
firstPage
;
sendSignal
(
NDBFS_REF
,
GSN_FSWRITEREQ
,
signal
,
8
,
JBA
);
}
//writeSchemaFile()
...
...
@@ -928,7 +938,7 @@ void Dbdict::closeWriteSchemaConf(Signal* signal,
if
(
c_writeSchemaRecord
.
noOfSchemaFilesHandled
<
2
)
{
jam
();
fsPtr
.
p
->
fsState
=
FsConnectRecord
::
OPEN_WRITE_SCHEMA
;
openSchemaFile
(
signal
,
1
,
fsPtr
.
i
,
true
);
openSchemaFile
(
signal
,
1
,
fsPtr
.
i
,
true
,
c_writeSchemaRecord
.
newFile
);
return
;
}
ndbrequire
(
c_writeSchemaRecord
.
noOfSchemaFilesHandled
==
2
);
...
...
@@ -946,20 +956,26 @@ void Dbdict::startReadSchemaFile(Signal* signal)
FsConnectRecordPtr
fsPtr
;
c_fsConnectRecordPool
.
getPtr
(
fsPtr
,
getFsConnRecord
());
fsPtr
.
p
->
fsState
=
FsConnectRecord
::
OPEN_READ_SCHEMA1
;
openSchemaFile
(
signal
,
0
,
fsPtr
.
i
,
false
);
openSchemaFile
(
signal
,
0
,
fsPtr
.
i
,
false
,
false
);
}
//Dbdict::startReadSchemaFile()
void
Dbdict
::
openReadSchemaRef
(
Signal
*
signal
,
FsConnectRecordPtr
fsPtr
)
{
fsPtr
.
p
->
fsState
=
FsConnectRecord
::
OPEN_READ_SCHEMA2
;
openSchemaFile
(
signal
,
1
,
fsPtr
.
i
,
false
);
openSchemaFile
(
signal
,
1
,
fsPtr
.
i
,
false
,
false
);
}
//Dbdict::openReadSchemaRef()
void
Dbdict
::
readSchemaFile
(
Signal
*
signal
,
Uint32
filePtr
,
Uint32
fsConPtr
)
{
FsReadWriteReq
*
const
fsRWReq
=
(
FsReadWriteReq
*
)
&
signal
->
theData
[
0
];
// check read record
ReadSchemaRecord
&
rr
=
c_readSchemaRecord
;
ndbrequire
(
rr
.
pageId
==
(
rr
.
pageId
!=
0
)
*
NDB_SF_MAX_PAGES
);
ndbrequire
(
rr
.
noOfPages
!=
0
);
ndbrequire
(
rr
.
firstPage
+
rr
.
noOfPages
<=
NDB_SF_MAX_PAGES
);
fsRWReq
->
filePointer
=
filePtr
;
fsRWReq
->
userReference
=
reference
();
fsRWReq
->
userPointer
=
fsConPtr
;
...
...
@@ -967,10 +983,10 @@ void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq
::
setSyncFlag
(
fsRWReq
->
operationFlag
,
0
);
FsReadWriteReq
::
setFormatFlag
(
fsRWReq
->
operationFlag
,
FsReadWriteReq
::
fsFormatArrayOfPages
);
fsRWReq
->
varIndex
=
Z
ALLOCAT
E
;
fsRWReq
->
numberOfPages
=
1
;
fsRWReq
->
data
.
arrayOfPages
.
varIndex
=
c_readSchemaRecord
.
pageId
;
fsRWReq
->
data
.
arrayOfPages
.
fileOffset
=
0
;
fsRWReq
->
varIndex
=
Z
BAT_SCHEMA_FIL
E
;
fsRWReq
->
numberOfPages
=
rr
.
noOfPages
;
fsRWReq
->
data
.
arrayOfPages
.
varIndex
=
rr
.
pageId
+
rr
.
firstPage
;
fsRWReq
->
data
.
arrayOfPages
.
fileOffset
=
rr
.
firstPage
;
sendSignal
(
NDBFS_REF
,
GSN_FSREADREQ
,
signal
,
8
,
JBA
);
}
//readSchemaFile()
...
...
@@ -988,20 +1004,61 @@ void Dbdict::readSchemaConf(Signal* signal,
jam
();
crashInd
=
true
;
}
//if
PageRecordPtr
tmpPagePtr
;
c_pageRecordArray
.
getPtr
(
tmpPagePtr
,
c_readSchemaRecord
.
pageId
);
Uint32
sz
=
ZSIZE_OF_PAGES_IN_WORDS
;
Uint32
chk
=
computeChecksum
((
const
Uint32
*
)
tmpPagePtr
.
p
,
sz
)
;
ReadSchemaRecord
&
rr
=
c_readSchemaRecord
;
XSchemaFile
*
xsf
=
&
c_schemaFile
[
rr
.
pageId
!=
0
]
;
ndbrequire
((
chk
==
0
)
||
!
crashInd
);
if
(
rr
.
schemaReadState
==
ReadSchemaRecord
::
INITIAL_READ_HEAD
)
{
jam
();
ndbrequire
(
rr
.
firstPage
==
0
);
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
0
];
Uint32
noOfPages
;
if
(
sf
->
NdbVersion
<
NDB_SF_VERSION_5_0_5
)
{
jam
();
const
Uint32
pageSize_old
=
32
*
1024
;
noOfPages
=
pageSize_old
/
NDB_SF_PAGE_SIZE
-
1
;
}
else
{
noOfPages
=
sf
->
FileSize
/
NDB_SF_PAGE_SIZE
-
1
;
}
rr
.
schemaReadState
=
ReadSchemaRecord
::
INITIAL_READ
;
if
(
noOfPages
!=
0
)
{
rr
.
firstPage
=
1
;
rr
.
noOfPages
=
noOfPages
;
readSchemaFile
(
signal
,
fsPtr
.
p
->
filePtr
,
fsPtr
.
i
);
return
;
}
}
if
(
chk
!=
0
){
SchemaFile
*
sf0
=
&
xsf
->
schemaPage
[
0
];
xsf
->
noOfPages
=
sf0
->
FileSize
/
NDB_SF_PAGE_SIZE
;
if
(
sf0
->
NdbVersion
<
NDB_SF_VERSION_5_0_5
&&
!
convertSchemaFileTo_5_0_5
(
xsf
))
{
jam
();
ndbrequire
(
!
crashInd
);
ndbrequire
(
fsPtr
.
p
->
fsState
==
FsConnectRecord
::
READ_SCHEMA1
);
readSchemaRef
(
signal
,
fsPtr
);
return
;
}
//if
}
for
(
Uint32
n
=
0
;
n
<
xsf
->
noOfPages
;
n
++
)
{
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
n
];
bool
ok
=
memcmp
(
sf
->
Magic
,
NDB_SF_MAGIC
,
sizeof
(
sf
->
Magic
))
==
0
&&
sf
->
FileSize
!=
0
&&
sf
->
FileSize
%
NDB_SF_PAGE_SIZE
==
0
&&
sf
->
FileSize
==
sf0
->
FileSize
&&
sf
->
PageNumber
==
n
&&
computeChecksum
((
Uint32
*
)
sf
,
NDB_SF_PAGE_SIZE_IN_WORDS
)
==
0
;
ndbrequire
(
ok
||
!
crashInd
);
if
(
!
ok
)
{
jam
();
ndbrequire
(
fsPtr
.
p
->
fsState
==
FsConnectRecord
::
READ_SCHEMA1
);
readSchemaRef
(
signal
,
fsPtr
);
return
;
}
}
fsPtr
.
p
->
fsState
=
FsConnectRecord
::
CLOSE_READ_SCHEMA
;
closeFile
(
signal
,
fsPtr
.
p
->
filePtr
,
fsPtr
.
i
);
return
;
...
...
@@ -1011,7 +1068,7 @@ void Dbdict::readSchemaRef(Signal* signal,
FsConnectRecordPtr
fsPtr
)
{
fsPtr
.
p
->
fsState
=
FsConnectRecord
::
OPEN_READ_SCHEMA2
;
openSchemaFile
(
signal
,
1
,
fsPtr
.
i
,
false
);
openSchemaFile
(
signal
,
1
,
fsPtr
.
i
,
false
,
false
);
return
;
}
//Dbdict::readSchemaRef()
...
...
@@ -1025,7 +1082,27 @@ void Dbdict::closeReadSchemaConf(Signal* signal,
switch
(
state
)
{
case
ReadSchemaRecord
:
:
INITIAL_READ
:
jam
();
sendNDB_STTORRY
(
signal
);
{
// write back both copies
ndbrequire
(
c_writeSchemaRecord
.
inUse
==
false
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
oldSchemaPage
!=
0
];
Uint32
noOfPages
=
(
c_tableRecordPool
.
getSize
()
+
NDB_SF_PAGE_ENTRIES
-
1
)
/
NDB_SF_PAGE_ENTRIES
;
resizeSchemaFile
(
xsf
,
noOfPages
);
c_writeSchemaRecord
.
inUse
=
true
;
c_writeSchemaRecord
.
pageId
=
c_schemaRecord
.
oldSchemaPage
;
c_writeSchemaRecord
.
newFile
=
true
;
c_writeSchemaRecord
.
firstPage
=
0
;
c_writeSchemaRecord
.
noOfPages
=
xsf
->
noOfPages
;
c_writeSchemaRecord
.
m_callback
.
m_callbackFunction
=
safe_cast
(
&
Dbdict
::
initSchemaFile_conf
);
startWriteSchemaFile
(
signal
);
}
break
;
default
:
...
...
@@ -1035,6 +1112,51 @@ void Dbdict::closeReadSchemaConf(Signal* signal,
}
//switch
}
//Dbdict::closeReadSchemaConf()
bool
Dbdict
::
convertSchemaFileTo_5_0_5
(
XSchemaFile
*
xsf
)
{
const
Uint32
pageSize_old
=
32
*
1024
;
Uint32
page_old
[
pageSize_old
>>
2
];
SchemaFile
*
sf_old
=
(
SchemaFile
*
)
page_old
;
if
(
xsf
->
noOfPages
*
NDB_SF_PAGE_SIZE
!=
pageSize_old
)
return
false
;
SchemaFile
*
sf0
=
&
xsf
->
schemaPage
[
0
];
memcpy
(
sf_old
,
sf0
,
pageSize_old
);
// init max number new pages needed
xsf
->
noOfPages
=
(
sf_old
->
NoOfTableEntries
+
NDB_SF_PAGE_ENTRIES
-
1
)
/
NDB_SF_PAGE_ENTRIES
;
initSchemaFile
(
xsf
,
0
,
xsf
->
noOfPages
,
true
);
Uint32
noOfPages
=
1
;
Uint32
n
,
i
,
j
;
for
(
n
=
0
;
n
<
xsf
->
noOfPages
;
n
++
)
{
for
(
i
=
0
;
i
<
NDB_SF_PAGE_ENTRIES
;
i
++
)
{
j
=
n
*
NDB_SF_PAGE_ENTRIES
+
i
;
if
(
j
>=
sf_old
->
NoOfTableEntries
)
continue
;
const
SchemaFile
::
TableEntry_old
&
te_old
=
sf_old
->
TableEntries_old
[
j
];
if
(
te_old
.
m_tableState
==
SchemaFile
::
INIT
||
te_old
.
m_tableState
==
SchemaFile
::
DROP_TABLE_COMMITTED
)
continue
;
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
n
];
SchemaFile
::
TableEntry
&
te
=
sf
->
TableEntries
[
i
];
te
.
m_tableState
=
te_old
.
m_tableState
;
te
.
m_tableVersion
=
te_old
.
m_tableVersion
;
te
.
m_tableType
=
te_old
.
m_tableType
;
te
.
m_noOfPages
=
te_old
.
m_noOfPages
;
te
.
m_gcp
=
te_old
.
m_gcp
;
if
(
noOfPages
<
n
)
noOfPages
=
n
;
}
}
xsf
->
noOfPages
=
noOfPages
;
initSchemaFile
(
xsf
,
0
,
xsf
->
noOfPages
,
false
);
return
true
;
}
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* MODULE: INITIALISATION MODULE ------------------------- */
...
...
@@ -1306,6 +1428,7 @@ void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
void
Dbdict
::
initSchemaRecord
()
{
c_schemaRecord
.
schemaPage
=
RNIL
;
c_schemaRecord
.
oldSchemaPage
=
RNIL
;
}
//Dbdict::initSchemaRecord()
void
Dbdict
::
initRestartRecord
()
...
...
@@ -1327,10 +1450,10 @@ void Dbdict::initNodeRecords()
void
Dbdict
::
initPageRecords
()
{
c_
schemaRecord
.
schemaPage
=
ZMAX_PAGES_OF_TABLE_DEFINITION
;
c_schemaRecord
.
oldSchemaPage
=
ZMAX_PAGES_OF_TABLE_DEFINITION
+
1
;
c_
retrieveRecord
.
retrievePage
=
ZMAX_PAGES_OF_TABLE_DEFINITION
+
2
;
ndbrequire
(
ZNUMBER_OF_PAGES
>=
(
2
*
ZMAX_PAGES_OF_TABLE_DEFINITION
+
2
))
;
c_
retrieveRecord
.
retrievePage
=
ZMAX_PAGES_OF_TABLE_DEFINITION
;
ndbrequire
(
ZNUMBER_OF_PAGES
>=
(
ZMAX_PAGES_OF_TABLE_DEFINITION
+
1
))
;
c_
schemaRecord
.
schemaPage
=
0
;
c_schemaRecord
.
oldSchemaPage
=
NDB_SF_MAX_PAGES
;
}
//Dbdict::initPageRecords()
void
Dbdict
::
initTableRecords
()
...
...
@@ -1598,6 +1721,7 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
c_fsConnectRecordPool
.
setSize
(
ZFS_CONNECT_SIZE
);
c_nodes
.
setSize
(
MAX_NODES
);
c_pageRecordArray
.
setSize
(
ZNUMBER_OF_PAGES
);
c_schemaPageRecordArray
.
setSize
(
2
*
NDB_SF_MAX_PAGES
);
c_tableRecordPool
.
setSize
(
tablerecSize
);
c_tableRecordHash
.
setSize
(
tablerecSize
);
c_triggerRecordPool
.
setSize
(
c_maxNoOfTriggers
);
...
...
@@ -1616,12 +1740,23 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
c_opCreateTrigger
.
setSize
(
8
);
c_opDropTrigger
.
setSize
(
8
);
c_opAlterTrigger
.
setSize
(
8
);
// Initialize schema file copies
c_schemaFile
[
0
].
schemaPage
=
(
SchemaFile
*
)
c_schemaPageRecordArray
.
getPtr
(
0
*
NDB_SF_MAX_PAGES
);
c_schemaFile
[
0
].
noOfPages
=
0
;
c_schemaFile
[
1
].
schemaPage
=
(
SchemaFile
*
)
c_schemaPageRecordArray
.
getPtr
(
1
*
NDB_SF_MAX_PAGES
);
c_schemaFile
[
1
].
noOfPages
=
0
;
// Initialize BAT for interface to file system
PageRecordPtr
pageRecPtr
;
c_pageRecordArray
.
getPtr
(
pageRecPtr
,
0
);
NewVARIABLE
*
bat
=
allocateBat
(
2
);
bat
[
1
].
WA
=
&
pageRecPtr
.
p
->
word
[
0
];
bat
[
0
].
WA
=
&
c_schemaPageRecordArray
.
getPtr
(
0
)
->
word
[
0
];
bat
[
0
].
nrr
=
2
*
NDB_SF_MAX_PAGES
;
bat
[
0
].
ClusterSize
=
NDB_SF_PAGE_SIZE
;
bat
[
0
].
bits
.
q
=
NDB_SF_PAGE_SIZE_IN_WORDS_LOG2
;
bat
[
0
].
bits
.
v
=
5
;
// 32 bits per element
bat
[
1
].
WA
=
&
c_pageRecordArray
.
getPtr
(
0
)
->
word
[
0
];
bat
[
1
].
nrr
=
ZNUMBER_OF_PAGES
;
bat
[
1
].
ClusterSize
=
ZSIZE_OF_PAGES_IN_WORDS
*
4
;
bat
[
1
].
bits
.
q
=
ZLOG_SIZE_OF_PAGES_IN_WORDS
;
// 2**13 = 8192 elements
...
...
@@ -1766,16 +1901,23 @@ void Dbdict::execHOT_SPAREREP(Signal* signal)
void
Dbdict
::
initSchemaFile
(
Signal
*
signal
)
{
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
SchemaFile
*
schemaFile
=
(
SchemaFile
*
)
pagePtr
.
p
;
initSchemaFile
(
schemaFile
,
4
*
ZSIZE_OF_PAGES_IN_WORDS
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
xsf
->
noOfPages
=
(
c_tableRecordPool
.
getSize
()
+
NDB_SF_PAGE_ENTRIES
-
1
)
/
NDB_SF_PAGE_ENTRIES
;
initSchemaFile
(
xsf
,
0
,
xsf
->
noOfPages
,
true
);
// init alt copy too for INR
XSchemaFile
*
oldxsf
=
&
c_schemaFile
[
c_schemaRecord
.
oldSchemaPage
!=
0
];
oldxsf
->
noOfPages
=
xsf
->
noOfPages
;
memcpy
(
&
oldxsf
->
schemaPage
[
0
],
&
xsf
->
schemaPage
[
0
],
xsf
->
schemaPage
[
0
].
FileSize
);
if
(
c_initialStart
||
c_initialNodeRestart
)
{
jam
();
ndbrequire
(
c_writeSchemaRecord
.
inUse
==
false
);
c_writeSchemaRecord
.
inUse
=
true
;
c_writeSchemaRecord
.
pageId
=
c_schemaRecord
.
schemaPage
;
c_writeSchemaRecord
.
newFile
=
true
;
c_writeSchemaRecord
.
firstPage
=
0
;
c_writeSchemaRecord
.
noOfPages
=
xsf
->
noOfPages
;
c_writeSchemaRecord
.
m_callback
.
m_callbackFunction
=
safe_cast
(
&
Dbdict
::
initSchemaFile_conf
);
...
...
@@ -1785,7 +1927,9 @@ void Dbdict::initSchemaFile(Signal* signal)
jam
();
ndbrequire
(
c_readSchemaRecord
.
schemaReadState
==
ReadSchemaRecord
::
IDLE
);
c_readSchemaRecord
.
pageId
=
c_schemaRecord
.
oldSchemaPage
;
c_readSchemaRecord
.
schemaReadState
=
ReadSchemaRecord
::
INITIAL_READ
;
c_readSchemaRecord
.
firstPage
=
0
;
c_readSchemaRecord
.
noOfPages
=
1
;
c_readSchemaRecord
.
schemaReadState
=
ReadSchemaRecord
::
INITIAL_READ_HEAD
;
startReadSchemaFile
(
signal
);
}
else
{
ndbrequire
(
false
);
...
...
@@ -1924,7 +2068,7 @@ void Dbdict::execDICTSTARTREQ(Signal* signal)
safe_cast
(
&
Dbdict
::
masterRestart_checkSchemaStatusComplete
);
c_restartRecord
.
activeTable
=
0
;
c_schemaRecord
.
schemaPage
=
c_schemaRecord
.
oldSchemaPage
;
c_schemaRecord
.
schemaPage
=
c_schemaRecord
.
oldSchemaPage
;
// ugly
checkSchemaStatus
(
signal
);
}
//execDICTSTARTREQ()
...
...
@@ -1933,15 +2077,13 @@ Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
Uint32
callbackData
,
Uint32
returnCode
){
c_schemaRecord
.
schemaPage
=
ZMAX_PAGES_OF_TABLE_DEFINITION
;
c_schemaRecord
.
schemaPage
=
0
;
// ugly
XSchemaFile
*
oldxsf
=
&
c_schemaFile
[
c_schemaRecord
.
oldSchemaPage
!=
0
];
ndbrequire
(
oldxsf
->
noOfPages
!=
0
);
LinearSectionPtr
ptr
[
3
];
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
oldSchemaPage
);
ptr
[
0
].
p
=
&
pagePtr
.
p
->
word
[
0
];
ptr
[
0
].
sz
=
ZSIZE_OF_PAGES_IN_WORDS
;
ptr
[
0
].
p
=
(
Uint32
*
)
&
oldxsf
->
schemaPage
[
0
];
ptr
[
0
].
sz
=
oldxsf
->
noOfPages
*
NDB_SF_PAGE_SIZE_IN_WORDS
;
c_sendSchemaRecord
.
m_SCHEMAINFO_Counter
=
c_aliveNodes
;
NodeReceiverGroup
rg
(
DBDICT
,
c_aliveNodes
);
...
...
@@ -1957,10 +2099,10 @@ Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
1
,
c
);
PageRecordPtr
newPagePtr
;
c_pageRecordArray
.
getPtr
(
newPagePtr
,
c_schemaRecord
.
schemaPage
)
;
memcpy
(
&
new
PagePtr
.
p
->
word
[
0
],
&
pagePtr
.
p
->
word
[
0
],
4
*
ZSIZE_OF_PAGES_IN_WORDS
);
XSchemaFile
*
newxsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
]
;
newxsf
->
noOfPages
=
oldxsf
->
noOfPages
;
memcpy
(
&
new
xsf
->
schemaPage
[
0
],
&
oldxsf
->
schemaPage
[
0
],
oldxsf
->
noOfPages
*
NDB_SF_PAGE_SIZE
);
signal
->
theData
[
0
]
=
getOwnNodeId
();
sendSignal
(
reference
(),
GSN_SCHEMA_INFOCONF
,
signal
,
1
,
JBB
);
...
...
@@ -1977,11 +2119,11 @@ Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
LinearSectionPtr
ptr
[
3
];
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
]
;
ndbrequire
(
xsf
->
noOfPages
!=
0
);
ptr
[
0
].
p
=
&
pagePtr
.
p
->
word
[
0
];
ptr
[
0
].
sz
=
ZSIZE_OF_PAGES
_IN_WORDS
;
ptr
[
0
].
p
=
(
Uint32
*
)
&
xsf
->
schemaPage
[
0
];
ptr
[
0
].
sz
=
xsf
->
noOfPages
*
NDB_SF_PAGE_SIZE
_IN_WORDS
;
Callback
c
=
{
safe_cast
(
&
Dbdict
::
sendSchemaComplete
),
0
};
sendFragmentedSignal
(
ref
,
...
...
@@ -2023,12 +2165,22 @@ void Dbdict::execSCHEMA_INFO(Signal* signal)
SegmentedSectionPtr
schemaDataPtr
;
signal
->
getSection
(
schemaDataPtr
,
0
);
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
copy
(
&
pagePtr
.
p
->
word
[
0
],
schemaDataPtr
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
ndbrequire
(
schemaDataPtr
.
sz
%
NDB_SF_PAGE_SIZE_IN_WORDS
==
0
);
xsf
->
noOfPages
=
schemaDataPtr
.
sz
/
NDB_SF_PAGE_SIZE_IN_WORDS
;
copy
((
Uint32
*
)
&
xsf
->
schemaPage
[
0
],
schemaDataPtr
);
releaseSections
(
signal
);
SchemaFile
*
sf0
=
&
xsf
->
schemaPage
[
0
];
if
(
sf0
->
NdbVersion
<
NDB_SF_VERSION_5_0_5
)
{
bool
ok
=
convertSchemaFileTo_5_0_5
(
xsf
);
ndbrequire
(
ok
);
}
validateChecksum
((
SchemaFile
*
)
pagePtr
.
p
);
validateChecksum
(
xsf
);
XSchemaFile
*
oldxsf
=
&
c_schemaFile
[
c_schemaRecord
.
oldSchemaPage
!=
0
];
resizeSchemaFile
(
xsf
,
oldxsf
->
noOfPages
);
ndbrequire
(
signal
->
getSendersBlockRef
()
!=
reference
());
...
...
@@ -2053,7 +2205,11 @@ Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
ndbrequire
(
c_writeSchemaRecord
.
inUse
==
false
);
c_writeSchemaRecord
.
inUse
=
true
;
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
c_writeSchemaRecord
.
pageId
=
c_schemaRecord
.
schemaPage
;
c_writeSchemaRecord
.
newFile
=
true
;
c_writeSchemaRecord
.
firstPage
=
0
;
c_writeSchemaRecord
.
noOfPages
=
xsf
->
noOfPages
;
c_writeSchemaRecord
.
m_callback
.
m_callbackData
=
0
;
c_writeSchemaRecord
.
m_callback
.
m_callbackFunction
=
safe_cast
(
&
Dbdict
::
restart_writeSchemaConf
);
...
...
@@ -2102,20 +2258,18 @@ void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
void
Dbdict
::
checkSchemaStatus
(
Signal
*
signal
)
{
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
XSchemaFile
*
newxsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
XSchemaFile
*
oldxsf
=
&
c_schemaFile
[
c_schemaRecord
.
oldSchemaPage
!=
0
];
ndbrequire
(
newxsf
->
noOfPages
==
oldxsf
->
noOfPages
);
const
Uint32
noOfEntries
=
newxsf
->
noOfPages
*
NDB_SF_PAGE_ENTRIES
;
PageRecordPtr
oldPagePtr
;
c_pageRecordArray
.
getPtr
(
oldPagePtr
,
c_schemaRecord
.
oldSchemaPage
);
for
(;
c_restartRecord
.
activeTable
<
MAX_TABLES
;
for
(;
c_restartRecord
.
activeTable
<
noOfEntries
;
c_restartRecord
.
activeTable
++
)
{
jam
();
Uint32
tableId
=
c_restartRecord
.
activeTable
;
SchemaFile
::
TableEntry
*
newEntry
=
getTableEntry
(
pagePtr
.
p
,
tableId
);
SchemaFile
::
TableEntry
*
oldEntry
=
getTableEntry
(
oldPagePtr
.
p
,
tableId
,
true
);
SchemaFile
::
TableEntry
*
newEntry
=
getTableEntry
(
newxsf
,
tableId
);
SchemaFile
::
TableEntry
*
oldEntry
=
getTableEntry
(
oldxsf
,
tableId
);
SchemaFile
::
TableState
schemaState
=
(
SchemaFile
::
TableState
)
newEntry
->
m_tableState
;
SchemaFile
::
TableState
oldSchemaState
=
...
...
@@ -3139,6 +3293,7 @@ Dbdict::execALTER_TAB_REQ(Signal * signal)
tabEntry
.
m_gcp
=
gci
;
tabEntry
.
m_noOfPages
=
DIV
(
tabInfoPtr
.
sz
+
ZPAGE_HEADER_SIZE
,
ZSIZE_OF_PAGES_IN_WORDS
);
memset
(
tabEntry
.
m_unused
,
0
,
sizeof
(
tabEntry
.
m_unused
));
Callback
callback
;
callback
.
m_callbackData
=
senderData
;
...
...
@@ -3652,9 +3807,8 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
/**
* Update table version
*/
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
SchemaFile
::
TableEntry
*
tabEntry
=
getTableEntry
(
pagePtr
.
p
,
tabPtr
.
i
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
SchemaFile
::
TableEntry
*
tabEntry
=
getTableEntry
(
xsf
,
tabPtr
.
i
);
tabPtr
.
p
->
tableVersion
=
tabEntry
->
m_tableVersion
+
1
;
...
...
@@ -3962,6 +4116,7 @@ Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){
tabEntry
.
m_gcp
=
gci
;
tabEntry
.
m_noOfPages
=
DIV
(
tabInfoPtr
.
sz
+
ZPAGE_HEADER_SIZE
,
ZSIZE_OF_PAGES_IN_WORDS
);
memset
(
tabEntry
.
m_unused
,
0
,
sizeof
(
tabEntry
.
m_unused
));
Callback
callback
;
callback
.
m_callbackData
=
createTabPtr
.
p
->
key
;
...
...
@@ -4418,6 +4573,7 @@ Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){
tabEntry
.
m_gcp
=
tabPtr
.
p
->
gciTableCreated
;
tabEntry
.
m_noOfPages
=
DIV
(
tabPtr
.
p
->
packedSize
+
ZPAGE_HEADER_SIZE
,
ZSIZE_OF_PAGES_IN_WORDS
);
memset
(
tabEntry
.
m_unused
,
0
,
sizeof
(
tabEntry
.
m_unused
));
Callback
callback
;
callback
.
m_callbackData
=
createTabPtr
.
p
->
key
;
...
...
@@ -4518,10 +4674,9 @@ Dbdict::createTab_dropComplete(Signal* signal,
c_tableRecordPool
.
getPtr
(
tabPtr
,
createTabPtr
.
p
->
m_tablePtrI
);
releaseTableObject
(
tabPtr
.
i
);
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
pagePtr
.
p
,
tabPtr
.
i
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
xsf
,
tabPtr
.
i
);
tableEntry
->
m_tableState
=
SchemaFile
::
DROP_TABLE_COMMITTED
;
//@todo check error
...
...
@@ -5353,21 +5508,22 @@ Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){
/**
* Modify schema
*/
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
pagePtr
.
p
,
tablePtr
.
i
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
xsf
,
tablePtr
.
i
);
SchemaFile
::
TableState
tabState
=
(
SchemaFile
::
TableState
)
tableEntry
->
m_tableState
;
ndbrequire
(
tabState
==
SchemaFile
::
TABLE_ADD_COMMITTED
||
tabState
==
SchemaFile
::
ALTER_TABLE_COMMITTED
);
tableEntry
->
m_tableState
=
SchemaFile
::
DROP_TABLE_STARTED
;
computeChecksum
(
(
SchemaFile
*
)
pagePtr
.
p
);
computeChecksum
(
xsf
,
tablePtr
.
i
/
NDB_SF_PAGE_ENTRIES
);
ndbrequire
(
c_writeSchemaRecord
.
inUse
==
false
);
c_writeSchemaRecord
.
inUse
=
true
;
c_writeSchemaRecord
.
pageId
=
c_schemaRecord
.
schemaPage
;
c_writeSchemaRecord
.
newFile
=
false
;
c_writeSchemaRecord
.
firstPage
=
tablePtr
.
i
/
NDB_SF_PAGE_ENTRIES
;
c_writeSchemaRecord
.
noOfPages
=
1
;
c_writeSchemaRecord
.
m_callback
.
m_callbackData
=
dropTabPtr
.
p
->
key
;
c_writeSchemaRecord
.
m_callback
.
m_callbackFunction
=
safe_cast
(
&
Dbdict
::
prepDropTab_writeSchemaConf
);
...
...
@@ -5528,20 +5684,20 @@ Dbdict::dropTab_complete(Signal* signal,
/**
* Write to schema file
*/
PageRecordPtr
pagePtr
;
c_pageRecordArray
.
getPtr
(
pagePtr
,
c_schemaRecord
.
schemaPage
);
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
pagePtr
.
p
,
tableId
);
XSchemaFile
*
xsf
=
&
c_schemaFile
[
c_schemaRecord
.
schemaPage
!=
0
];
SchemaFile
::
TableEntry
*
tableEntry
=
getTableEntry
(
xsf
,
tableId
);
SchemaFile
::
TableState
tabState
=
(
SchemaFile
::
TableState
)
tableEntry
->
m_tableState
;
ndbrequire
(
tabState
==
SchemaFile
::
DROP_TABLE_STARTED
);
tableEntry
->
m_tableState
=
SchemaFile
::
DROP_TABLE_COMMITTED
;
computeChecksum
(
(
SchemaFile
*
)
pagePtr
.
p
);
computeChecksum
(
xsf
,
tableId
/
NDB_SF_PAGE_ENTRIES
);
ndbrequire
(
c_writeSchemaRecord
.
inUse
==
false
);
c_writeSchemaRecord
.
inUse
=
true
;
c_writeSchemaRecord
.
pageId
=
c_schemaRecord
.
schemaPage
;
c_writeSchemaRecord
.
firstPage
=
tableId
/
NDB_SF_PAGE_ENTRIES
;
c_writeSchemaRecord
.
noOfPages
=
1
;
c_writeSchemaRecord
.
m_callback
.
m_callbackData
=
dropTabPtr
.
p
->
key
;
c_writeSchemaRecord
.
m_callback
.
m_callbackFunction
=
safe_cast
(
&
Dbdict
::
dropTab_writeSchemaConf
);
...
...
@@ -11734,36 +11890,75 @@ Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask)
/* **************************************************************** */
void
Dbdict
::
initSchemaFile
(
SchemaFile
*
sf
,
Uint32
fileSz
){
memcpy
(
sf
->
Magic
,
"NDBSCHMA"
,
sizeof
(
sf
->
Magic
));
sf
->
ByteOrder
=
0x12345678
;
sf
->
NdbVersion
=
NDB_VERSION
;
sf
->
FileSize
=
fileSz
;
sf
->
CheckSum
=
0
;
Uint32
headSz
=
(
sizeof
(
SchemaFile
)
-
sizeof
(
SchemaFile
::
TableEntry
));
Uint32
noEntries
=
(
fileSz
-
headSz
)
/
sizeof
(
SchemaFile
::
TableEntry
);
Uint32
slack
=
(
fileSz
-
headSz
)
-
noEntries
*
sizeof
(
SchemaFile
::
TableEntry
);
ndbrequire
(
noEntries
>
MAX_TABLES
);
Dbdict
::
initSchemaFile
(
XSchemaFile
*
xsf
,
Uint32
firstPage
,
Uint32
lastPage
,
bool
initEntries
)
{
ndbrequire
(
lastPage
<=
xsf
->
noOfPages
);
for
(
Uint32
n
=
firstPage
;
n
<
lastPage
;
n
++
)
{
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
n
];
if
(
initEntries
)
memset
(
sf
,
0
,
NDB_SF_PAGE_SIZE
);
sf
->
NoOfTableEntries
=
noEntries
;
memset
(
sf
->
TableEntries
,
0
,
noEntries
*
sizeof
(
SchemaFile
::
TableEntry
));
memset
(
&
(
sf
->
TableEntries
[
noEntries
]),
0
,
slack
);
computeChecksum
(
sf
);
Uint32
ndb_version
=
NDB_VERSION
;
if
(
ndb_version
<
NDB_SF_VERSION_5_0_5
)
ndb_version
=
NDB_SF_VERSION_5_0_5
;
memcpy
(
sf
->
Magic
,
NDB_SF_MAGIC
,
sizeof
(
sf
->
Magic
));
sf
->
ByteOrder
=
0x12345678
;
sf
->
NdbVersion
=
ndb_version
;
sf
->
FileSize
=
xsf
->
noOfPages
*
NDB_SF_PAGE_SIZE
;
sf
->
PageNumber
=
n
;
sf
->
CheckSum
=
0
;
sf
->
NoOfTableEntries
=
NDB_SF_PAGE_ENTRIES
;
computeChecksum
(
xsf
,
n
);
}
}
void
Dbdict
::
computeChecksum
(
SchemaFile
*
sf
){
Dbdict
::
resizeSchemaFile
(
XSchemaFile
*
xsf
,
Uint32
noOfPages
)
{
ndbrequire
(
noOfPages
<=
NDB_SF_MAX_PAGES
);
if
(
xsf
->
noOfPages
<
noOfPages
)
{
jam
();
Uint32
firstPage
=
xsf
->
noOfPages
;
xsf
->
noOfPages
=
noOfPages
;
initSchemaFile
(
xsf
,
0
,
firstPage
,
false
);
initSchemaFile
(
xsf
,
firstPage
,
xsf
->
noOfPages
,
true
);
}
if
(
xsf
->
noOfPages
>
noOfPages
)
{
jam
();
Uint32
tableId
=
noOfPages
*
NDB_SF_PAGE_ENTRIES
;
while
(
tableId
<
xsf
->
noOfPages
*
NDB_SF_PAGE_ENTRIES
)
{
SchemaFile
::
TableEntry
*
te
=
getTableEntry
(
xsf
,
tableId
);
if
(
te
->
m_tableState
!=
SchemaFile
::
INIT
&&
te
->
m_tableState
!=
SchemaFile
::
DROP_TABLE_COMMITTED
)
{
ndbrequire
(
false
);
}
tableId
++
;
}
xsf
->
noOfPages
=
noOfPages
;
initSchemaFile
(
xsf
,
0
,
xsf
->
noOfPages
,
false
);
}
}
void
Dbdict
::
computeChecksum
(
XSchemaFile
*
xsf
,
Uint32
pageNo
){
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
pageNo
];
sf
->
CheckSum
=
0
;
sf
->
CheckSum
=
computeChecksum
((
const
Uint32
*
)
sf
,
sf
->
FileSize
/
4
);
sf
->
CheckSum
=
computeChecksum
((
Uint32
*
)
sf
,
NDB_SF_PAGE_SIZE_IN_WORDS
);
}
bool
Dbdict
::
validateChecksum
(
const
SchemaFile
*
sf
){
Dbdict
::
validateChecksum
(
const
XSchemaFile
*
x
sf
){
Uint32
c
=
computeChecksum
((
const
Uint32
*
)
sf
,
sf
->
FileSize
/
4
);
return
c
==
0
;
for
(
Uint32
n
=
0
;
n
<
xsf
->
noOfPages
;
n
++
)
{
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
n
];
Uint32
c
=
computeChecksum
((
Uint32
*
)
sf
,
NDB_SF_PAGE_SIZE_IN_WORDS
);
if
(
c
!=
0
)
return
false
;
}
return
true
;
}
Uint32
...
...
@@ -11775,11 +11970,14 @@ Dbdict::computeChecksum(const Uint32 * src, Uint32 len){
}
SchemaFile
::
TableEntry
*
Dbdict
::
getTableEntry
(
void
*
p
,
Uint32
tableId
,
bool
allowTooBig
){
SchemaFile
*
sf
=
(
SchemaFile
*
)
p
;
ndbrequire
(
allowTooBig
||
tableId
<
sf
->
NoOfTableEntries
);
return
&
sf
->
TableEntries
[
tableId
];
Dbdict
::
getTableEntry
(
XSchemaFile
*
xsf
,
Uint32
tableId
)
{
Uint32
n
=
tableId
/
NDB_SF_PAGE_ENTRIES
;
Uint32
i
=
tableId
%
NDB_SF_PAGE_ENTRIES
;
ndbrequire
(
n
<
xsf
->
noOfPages
);
SchemaFile
*
sf
=
&
xsf
->
schemaPage
[
n
];
return
&
sf
->
TableEntries
[
i
];
}
// global metadata support
...
...
ndb/src/kernel/blocks/dbdict/Dbdict.hpp
View file @
ad92514d
...
...
@@ -78,7 +78,8 @@
/*--------------------------------------------------------------*/
// Page constants
/*--------------------------------------------------------------*/
#define ZALLOCATE 1 //Variable number of page for NDBFS
#define ZBAT_SCHEMA_FILE 0 //Variable number of page for NDBFS
#define ZBAT_TABLE_FILE 1 //Variable number of page for NDBFS
#define ZPAGE_HEADER_SIZE 32
#define ZPOS_PAGE_SIZE 16
#define ZPOS_CHECKSUM 17
...
...
@@ -92,7 +93,7 @@
#define ZSIZE_OF_PAGES_IN_WORDS 8192
#define ZLOG_SIZE_OF_PAGES_IN_WORDS 13
#define ZMAX_PAGES_OF_TABLE_DEFINITION 8
#define ZNUMBER_OF_PAGES (
2 * ZMAX_PAGES_OF_TABLE_DEFINITION + 2
)
#define ZNUMBER_OF_PAGES (
ZMAX_PAGES_OF_TABLE_DEFINITION + 1
)
#define ZNO_OF_FRAGRECORD 5
/*--------------------------------------------------------------*/
...
...
@@ -429,6 +430,12 @@ public:
typedef
Ptr
<
PageRecord
>
PageRecordPtr
;
CArray
<
PageRecord
>
c_pageRecordArray
;
struct
SchemaPageRecord
{
Uint32
word
[
NDB_SF_PAGE_SIZE_IN_WORDS
];
};
CArray
<
SchemaPageRecord
>
c_schemaPageRecordArray
;
/**
* A page for create index table signal.
*/
...
...
@@ -655,16 +662,20 @@ private:
struct
ReadSchemaRecord
{
/** Page Id of schema page */
Uint32
pageId
;
/** First page to read */
Uint32
firstPage
;
/** Number of pages to read */
Uint32
noOfPages
;
/** State, indicates from where it was called */
enum
SchemaReadState
{
IDLE
=
0
,
INITIAL_READ
=
1
INITIAL_READ_HEAD
=
1
,
INITIAL_READ
=
2
};
SchemaReadState
schemaReadState
;
};
ReadSchemaRecord
c_readSchemaRecord
;
private:
/**
* This record stores all the state needed
* when a schema file is being written to disk
...
...
@@ -672,6 +683,12 @@ private:
struct
WriteSchemaRecord
{
/** Page Id of schema page */
Uint32
pageId
;
/** Rewrite entire file */
Uint32
newFile
;
/** First page to write */
Uint32
firstPage
;
/** Number of pages to write */
Uint32
noOfPages
;
/** Schema Files Handled, local state variable */
Uint32
noOfSchemaFilesHandled
;
...
...
@@ -752,21 +769,33 @@ private:
* Word 4: Currently zero
****************************************************************************/
struct
SchemaRecord
{
/** Schema
page
*/
/** Schema
file first page (0)
*/
Uint32
schemaPage
;
/** Old Schema
page (used at node restart)
*/
/** Old Schema
file first page (used at node restart)
*/
Uint32
oldSchemaPage
;
Callback
m_callback
;
};
SchemaRecord
c_schemaRecord
;
void
initSchemaFile
(
SchemaFile
*
,
Uint32
sz
);
void
computeChecksum
(
SchemaFile
*
);
bool
validateChecksum
(
const
SchemaFile
*
);
SchemaFile
::
TableEntry
*
getTableEntry
(
void
*
buf
,
Uint32
tableId
,
bool
allowTooBig
=
false
);
/*
* Schema file, list of schema pages. Use an array until a pool
* exists and NDBFS interface can use it.
*/
struct
XSchemaFile
{
SchemaFile
*
schemaPage
;
Uint32
noOfPages
;
};
// 0-normal 1-old
XSchemaFile
c_schemaFile
[
2
];
void
initSchemaFile
(
XSchemaFile
*
,
Uint32
firstPage
,
Uint32
lastPage
,
bool
initEntries
);
void
resizeSchemaFile
(
XSchemaFile
*
xsf
,
Uint32
noOfPages
);
void
computeChecksum
(
XSchemaFile
*
,
Uint32
pageNo
);
bool
validateChecksum
(
const
XSchemaFile
*
);
SchemaFile
::
TableEntry
*
getTableEntry
(
XSchemaFile
*
,
Uint32
tableId
);
Uint32
computeChecksum
(
const
Uint32
*
src
,
Uint32
len
);
...
...
@@ -1631,7 +1660,8 @@ private:
void
openSchemaFile
(
Signal
*
signal
,
Uint32
fileNo
,
Uint32
fsPtr
,
bool
writeFlag
);
bool
writeFlag
,
bool
newFile
);
void
writeSchemaFile
(
Signal
*
signal
,
Uint32
filePtr
,
Uint32
fsPtr
);
void
writeSchemaConf
(
Signal
*
signal
,
FsConnectRecordPtr
fsPtr
);
...
...
@@ -1673,6 +1703,7 @@ private:
void
readSchemaRef
(
Signal
*
signal
,
FsConnectRecordPtr
fsPtr
);
void
closeReadSchemaConf
(
Signal
*
signal
,
FsConnectRecordPtr
fsPtr
);
bool
convertSchemaFileTo_5_0_5
(
XSchemaFile
*
);
/* ------------------------------------------------------------ */
// Get table definitions
...
...
ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
View file @
ad92514d
...
...
@@ -18,16 +18,35 @@
#define DBDICT_SCHEMA_FILE_HPP
#include <ndb_types.h>
#include <ndb_version.h>
#include <string.h>
#define NDB_SF_MAGIC "NDBSCHMA"
// page size 4k
#define NDB_SF_PAGE_SIZE_IN_WORDS_LOG2 10
#define NDB_SF_PAGE_SIZE_IN_WORDS (1 << NDB_SF_PAGE_SIZE_IN_WORDS_LOG2)
#define NDB_SF_PAGE_SIZE (NDB_SF_PAGE_SIZE_IN_WORDS << 2)
// 4k = (1 + 127) * 32
#define NDB_SF_PAGE_ENTRIES 127
// 160 pages = 20320 objects
#define NDB_SF_MAX_PAGES 160
// versions where format changed
#define NDB_SF_VERSION_5_0_5 MAKE_VERSION(5, 0, 5)
// One page in schema file.
struct
SchemaFile
{
// header size 32 bytes
char
Magic
[
8
];
Uint32
ByteOrder
;
Uint32
NdbVersion
;
Uint32
FileSize
;
// In bytes
Uint32
Unused
;
Uint32
CheckSum
;
Uint32
PageNumber
;
Uint32
CheckSum
;
// Of this page
Uint32
NoOfTableEntries
;
// On this page (NDB_SF_PAGE_ENTRIES)
enum
TableState
{
INIT
=
0
,
...
...
@@ -38,20 +57,33 @@ struct SchemaFile {
ALTER_TABLE_COMMITTED
=
5
};
// entry size 32 bytes
struct
TableEntry
{
Uint32
m_tableState
;
Uint32
m_tableVersion
;
Uint32
m_tableType
;
Uint32
m_noOfPages
;
Uint32
m_gcp
;
Uint32
m_unused
[
3
];
bool
operator
==
(
const
TableEntry
&
o
)
const
{
return
memcmp
(
this
,
&
o
,
sizeof
(
*
this
))
==
0
;
}
};
// pre-5.0.5
struct
TableEntry_old
{
Uint32
m_tableState
;
Uint32
m_tableVersion
;
Uint32
m_tableType
;
Uint32
m_noOfPages
;
Uint32
m_gcp
;
};
Uint32
NoOfTableEntries
;
TableEntry
TableEntries
[
1
];
union
{
TableEntry
TableEntries
[
NDB_SF_PAGE_ENTRIES
];
TableEntry_old
TableEntries_old
[
1
];
};
};
#endif
ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
View file @
ad92514d
#if 0
make -f Makefile -f - printSchemaFile <<'_eof_'
printSchemaFile: printSchemaFile.cpp
printSchemaFile: printSchemaFile.cpp
SchemaFile.hpp
$(CXXCOMPILE) -o $@ $@.cpp -L../../../common/util/.libs -lgeneral
_eof_
exit $?
...
...
@@ -24,19 +24,28 @@ exit $?
#include <ndb_global.h>
#include <ndb_version.h>
#include <NdbMain.h>
#include <NdbOut.hpp>
#include <SchemaFile.hpp>
void
usage
(
const
char
*
prg
){
ndbout
<<
"Usage "
<<
prg
<<
" P0.SchemaLog"
<<
endl
;
static
const
char
*
progname
=
0
;
static
bool
allflag
=
false
;
static
bool
checkonly
=
false
;
static
int
xitcode
=
0
;
static
void
usage
()
{
ndbout
<<
"Usage "
<<
progname
<<
" [-ac]"
<<
" P0.SchemaLog"
<<
endl
;
}
void
fill
(
const
char
*
buf
,
int
mod
){
static
void
fill
(
const
char
*
buf
,
int
mod
)
{
int
len
=
strlen
(
buf
)
+
1
;
ndbout
<<
buf
<<
" "
;
while
((
len
%
mod
)
!=
0
){
...
...
@@ -45,19 +54,34 @@ fill(const char * buf, int mod){
}
}
void
print
(
const
char
*
filename
,
const
SchemaFile
*
file
){
ndbout
<<
"----- Schemafile: "
<<
filename
<<
" -----"
<<
endl
;
ndbout_c
(
"Magic: %.*s ByteOrder: %.8x NdbVersion: %d FileSize: %d"
,
sizeof
(
file
->
Magic
),
file
->
Magic
,
file
->
ByteOrder
,
file
->
NdbVersion
,
file
->
FileSize
);
for
(
Uint32
i
=
0
;
i
<
file
->
NoOfTableEntries
;
i
++
){
SchemaFile
::
TableEntry
te
=
file
->
TableEntries
[
i
];
if
(
te
.
m_tableState
!=
SchemaFile
::
INIT
){
ndbout
<<
"Table "
<<
i
<<
": State = "
<<
te
.
m_tableState
static
void
print_head
(
const
char
*
filename
,
const
SchemaFile
*
sf
)
{
if
(
!
checkonly
)
{
ndbout
<<
"----- Schemafile: "
<<
filename
<<
" -----"
<<
endl
;
ndbout_c
(
"Magic: %.*s ByteOrder: %.8x NdbVersion: %d.%d.%d FileSize: %d"
,
sizeof
(
sf
->
Magic
),
sf
->
Magic
,
sf
->
ByteOrder
,
sf
->
NdbVersion
>>
16
,
(
sf
->
NdbVersion
>>
8
)
&
0xFF
,
sf
->
NdbVersion
&
0xFF
,
sf
->
FileSize
);
}
}
static
void
print_old
(
const
char
*
filename
,
const
SchemaFile
*
sf
)
{
print_head
(
filename
,
sf
);
for
(
Uint32
i
=
0
;
i
<
sf
->
NoOfTableEntries
;
i
++
)
{
SchemaFile
::
TableEntry_old
te
=
sf
->
TableEntries_old
[
i
];
if
(
allflag
||
(
te
.
m_tableState
!=
SchemaFile
::
INIT
&&
te
.
m_tableState
!=
SchemaFile
::
DROP_TABLE_COMMITTED
))
{
ndbout
<<
"Table "
<<
i
<<
":"
<<
" State = "
<<
te
.
m_tableState
<<
" version = "
<<
te
.
m_tableVersion
<<
" type = "
<<
te
.
m_tableType
<<
" noOfPages = "
<<
te
.
m_noOfPages
...
...
@@ -66,47 +90,114 @@ print(const char * filename, const SchemaFile * file){
}
}
NDB_COMMAND
(
printSchemafile
,
"printSchemafile"
,
"printSchemafile"
,
"Prints a schemafile"
,
16384
){
if
(
argc
<
2
){
usage
(
argv
[
0
]);
return
0
;
}
static
void
print
(
const
char
*
filename
,
const
SchemaFile
*
xsf
,
Uint32
sz
)
{
int
retcode
=
0
;
const
char
*
filename
=
argv
[
1
]
;
print_head
(
filename
,
xsf
)
;
struct
stat
sbuf
;
const
int
res
=
stat
(
filename
,
&
sbuf
);
if
(
res
!=
0
){
ndbout
<<
"Could not find file:
\"
"
<<
filename
<<
"
\"
"
<<
endl
;
return
0
;
assert
(
sizeof
(
SchemaFile
)
==
NDB_SF_PAGE_SIZE
);
if
(
xsf
->
FileSize
!=
sz
||
xsf
->
FileSize
%
NDB_SF_PAGE_SIZE
!=
0
)
{
ndbout
<<
"***** invalid FileSize "
<<
xsf
->
FileSize
<<
endl
;
retcode
=
1
;
}
const
Uint32
bytes
=
sbuf
.
st_size
;
Uint32
*
buf
=
new
Uint32
[
bytes
/
4
+
1
];
FILE
*
f
=
fopen
(
filename
,
"rb"
);
if
(
f
==
0
){
ndbout
<<
"Failed to open file"
<<
endl
;
delete
[]
buf
;
return
0
;
Uint32
noOfPages
=
xsf
->
FileSize
/
NDB_SF_PAGE_SIZE
;
for
(
Uint32
n
=
0
;
n
<
noOfPages
;
n
++
)
{
if
(
!
checkonly
)
{
ndbout
<<
"----- Page: "
<<
n
<<
" ("
<<
noOfPages
<<
") -----"
<<
endl
;
}
const
SchemaFile
*
sf
=
&
xsf
[
n
];
if
(
sf
->
FileSize
!=
xsf
->
FileSize
)
{
ndbout
<<
"***** page "
<<
n
<<
" FileSize changed to "
<<
sf
->
FileSize
<<
"!="
<<
xsf
->
FileSize
<<
endl
;
retcode
=
1
;
}
Uint32
cs
=
0
;
for
(
Uint32
j
=
0
;
j
<
NDB_SF_PAGE_SIZE_IN_WORDS
;
j
++
)
cs
^=
((
const
Uint32
*
)
sf
)[
j
];
if
(
cs
!=
0
)
{
ndbout
<<
"***** page "
<<
n
<<
" invalid CheckSum"
<<
endl
;
retcode
=
1
;
}
if
(
sf
->
NoOfTableEntries
!=
NDB_SF_PAGE_ENTRIES
)
{
ndbout
<<
"***** page "
<<
n
<<
" invalid NoOfTableEntries "
<<
sf
->
NoOfTableEntries
<<
endl
;
retcode
=
1
;
}
for
(
Uint32
i
=
0
;
i
<
NDB_SF_PAGE_ENTRIES
;
i
++
)
{
SchemaFile
::
TableEntry
te
=
sf
->
TableEntries
[
i
];
Uint32
j
=
n
*
NDB_SF_PAGE_ENTRIES
+
i
;
if
(
allflag
||
(
te
.
m_tableState
!=
SchemaFile
::
INIT
&&
te
.
m_tableState
!=
SchemaFile
::
DROP_TABLE_COMMITTED
))
{
if
(
!
checkonly
)
ndbout
<<
"Table "
<<
j
<<
":"
<<
" State = "
<<
te
.
m_tableState
<<
" version = "
<<
te
.
m_tableVersion
<<
" type = "
<<
te
.
m_tableType
<<
" noOfPages = "
<<
te
.
m_noOfPages
<<
" gcp: "
<<
te
.
m_gcp
<<
endl
;
}
if
(
te
.
m_unused
[
0
]
!=
0
||
te
.
m_unused
[
1
]
!=
0
||
te
.
m_unused
[
2
]
!=
0
)
{
ndbout
<<
"***** entry "
<<
j
<<
" garbage in m_unused[3]"
<<
endl
;
retcode
=
1
;
}
}
}
Uint32
sz
=
fread
(
buf
,
1
,
bytes
,
f
);
fclose
(
f
);
if
(
sz
!=
bytes
){
ndbout
<<
"Failure while reading file"
<<
endl
;
delete
[]
buf
;
return
0
;
if
(
retcode
!=
0
)
xitcode
=
1
;
else
if
(
checkonly
)
ndbout
<<
"ok: "
<<
filename
<<
endl
;
}
NDB_COMMAND
(
printSchemafile
,
"printSchemafile"
,
"printSchemafile"
,
"Prints a schemafile"
,
16384
)
{
progname
=
argv
[
0
];
while
(
argv
[
1
][
0
]
==
'-'
)
{
if
(
strchr
(
argv
[
1
],
'a'
)
!=
0
)
allflag
=
true
;
if
(
strchr
(
argv
[
1
],
'c'
)
!=
0
)
checkonly
=
true
;
argc
--
,
argv
++
;
}
print
(
filename
,
(
SchemaFile
*
)
&
buf
[
0
]);
Uint32
chk
=
0
,
i
;
for
(
i
=
0
;
i
<
bytes
/
4
;
i
++
)
chk
^=
buf
[
i
];
if
(
chk
!=
0
)
ndbout
<<
"Invalid checksum!"
<<
endl
;
while
(
argc
>
1
)
{
const
char
*
filename
=
argv
[
1
];
argc
--
,
argv
++
;
struct
stat
sbuf
;
const
int
res
=
stat
(
filename
,
&
sbuf
);
if
(
res
!=
0
)
{
ndbout
<<
"Could not find file:
\"
"
<<
filename
<<
"
\"
"
<<
endl
;
return
1
;
}
const
Uint32
bytes
=
sbuf
.
st_size
;
Uint32
*
buf
=
new
Uint32
[
bytes
/
4
+
1
];
FILE
*
f
=
fopen
(
filename
,
"rb"
);
if
(
f
==
0
)
{
ndbout
<<
"Failed to open file"
<<
endl
;
delete
[]
buf
;
return
1
;
}
Uint32
sz
=
fread
(
buf
,
1
,
bytes
,
f
);
fclose
(
f
);
if
(
sz
!=
bytes
)
{
ndbout
<<
"Failure while reading file"
<<
endl
;
delete
[]
buf
;
return
1
;
}
SchemaFile
*
sf
=
(
SchemaFile
*
)
&
buf
[
0
];
if
(
sf
->
NdbVersion
<
NDB_SF_VERSION_5_0_5
)
print_old
(
filename
,
sf
);
else
print
(
filename
,
sf
,
sz
);
delete
[]
buf
;
}
delete
[]
buf
;
return
0
;
return
xitcode
;
}
ndb/src/ndbapi/NdbDictionaryImpl.cpp
View file @
ad92514d
...
...
@@ -208,11 +208,13 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
if
(
m_nullable
!=
col
.
m_nullable
){
DBUG_RETURN
(
false
);
}
#ifdef ndb_dictionary_dkey_fixed
if
(
m_pk
){
if
(
m_distributionKey
!=
col
.
m_distributionKey
){
DBUG_RETURN
(
false
);
}
}
#endif
if
(
m_precision
!=
col
.
m_precision
||
m_scale
!=
col
.
m_scale
||
m_length
!=
col
.
m_length
||
...
...
ndb/test/ndbapi/testDict.cpp
View file @
ad92514d
...
...
@@ -428,103 +428,99 @@ int runUseTableUntilStopped(NDBT_Context* ctx, NDBT_Step* step){
}
int
runCreateMaxTables
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
){
int
failures
=
0
;
int
runCreateMaxTables
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
char
tabName
[
256
];
int
numTables
=
ctx
->
getProperty
(
"tables"
,
1000
);
Ndb
*
pNdb
=
GETNDB
(
step
);
for
(
int
i
=
0
;
i
<
numTables
&&
failures
<
5
;
i
++
){
NdbDictionary
::
Dictionary
*
pDic
=
pNdb
->
getDictionary
();
int
i
=
0
;
for
(
i
=
0
;
i
<
numTables
;
i
++
)
{
BaseString
::
snprintf
(
tabName
,
256
,
"MAXTAB%d"
,
i
);
if
(
pNdb
->
waitUntilReady
(
30
)
!=
0
){
if
(
pNdb
->
waitUntilReady
(
30
)
!=
0
)
{
// Db is not ready, return with failure
return
NDBT_FAILED
;
}
const
NdbDictionary
::
Table
*
pTab
=
ctx
->
getTab
();
ndbout
<<
"|- "
<<
tabName
<<
endl
;
//ndbout << "|- " << tabName << endl;
// Set new name for T1
NdbDictionary
::
Table
newTab
(
*
pTab
);
newTab
.
setName
(
tabName
);
// Drop any old (or try to)
(
void
)
pDic
->
dropTable
(
newTab
.
getName
());
// Try to create table in db
if
(
newTab
.
createTableInDb
(
pNdb
)
!=
0
){
ndbout
<<
tabName
<<
" coult not be created"
<<
endl
;
failures
++
;
continue
;
if
(
newTab
.
createTableInDb
(
pNdb
)
!=
0
)
{
ndbout
<<
tabName
<<
" could not be created: "
<<
pDic
->
getNdbError
()
<<
endl
;
if
(
pDic
->
getNdbError
().
code
==
707
||
pDic
->
getNdbError
().
code
==
708
||
pDic
->
getNdbError
().
code
==
826
||
pDic
->
getNdbError
().
code
==
827
)
break
;
return
NDBT_FAILED
;
}
// Verify that table exists in db
const
NdbDictionary
::
Table
*
pTab3
=
NDBT_Table
::
discoverTableFromDb
(
pNdb
,
tabName
)
;
if
(
pTab3
==
NULL
){
ndbout
<<
tabName
<<
" was not found in DB
"
<<
endl
;
failures
++
;
continue
;
ndbout
<<
tabName
<<
" was not found in DB
: "
<<
pDic
->
getNdbError
()
<<
endl
;
return
NDBT_FAILED
;
}
if
(
pTab
->
equal
(
*
pTab3
)
==
false
){
ndbout
<<
"It was not equal"
<<
endl
;
failures
++
;
if
(
!
newTab
.
equal
(
*
pTab3
))
{
ndbout
<<
"It was not equal"
<<
endl
;
abort
();
return
NDBT_FAILED
;
}
int
records
=
1000
;
int
records
=
ctx
->
getNumRecords
();
HugoTransactions
hugoTrans
(
*
pTab3
);
if
(
hugoTrans
.
loadTable
(
pNdb
,
records
)
!=
0
){
if
(
hugoTrans
.
loadTable
(
pNdb
,
records
)
!=
0
)
{
ndbout
<<
"It can NOT be loaded"
<<
endl
;
}
else
{
ndbout
<<
"It can be loaded"
<<
endl
;
UtilTransactions
utilTrans
(
*
pTab3
);
if
(
utilTrans
.
clearTable
(
pNdb
,
records
,
64
)
!=
0
){
ndbout
<<
"It can NOT be cleared"
<<
endl
;
}
else
{
ndbout
<<
"It can be cleared"
<<
endl
;
}
return
NDBT_FAILED
;
}
UtilTransactions
utilTrans
(
*
pTab3
);
if
(
utilTrans
.
clearTable
(
pNdb
,
records
,
64
)
!=
0
)
{
ndbout
<<
"It can NOT be cleared"
<<
endl
;
return
NDBT_FAILED
;
}
}
if
(
pNdb
->
waitUntilReady
(
30
)
!=
0
){
if
(
pNdb
->
waitUntilReady
(
30
)
!=
0
)
{
// Db is not ready, return with failure
return
NDBT_FAILED
;
}
ctx
->
setProperty
(
"maxtables"
,
i
);
// HURRAAA!
return
NDBT_OK
;
}
int
runDropMaxTables
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
int
result
=
NDBT_OK
;
int
runDropMaxTables
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
char
tabName
[
256
];
int
numTables
=
ctx
->
getProperty
(
"
tables"
,
100
0
);
int
numTables
=
ctx
->
getProperty
(
"
maxtables"
,
(
Uint32
)
0
);
Ndb
*
pNdb
=
GETNDB
(
step
);
for
(
int
i
=
0
;
i
<
numTables
;
i
++
){
NdbDictionary
::
Dictionary
*
pDic
=
pNdb
->
getDictionary
();
for
(
int
i
=
0
;
i
<
numTables
;
i
++
)
{
BaseString
::
snprintf
(
tabName
,
256
,
"MAXTAB%d"
,
i
);
if
(
pNdb
->
waitUntilReady
(
30
)
!=
0
){
if
(
pNdb
->
waitUntilReady
(
30
)
!=
0
)
{
// Db is not ready, return with failure
return
NDBT_FAILED
;
}
// Verify that table exists in db
const
NdbDictionary
::
Table
*
pTab3
=
NDBT_Table
::
discoverTableFromDb
(
pNdb
,
tabName
)
;
if
(
pTab3
==
NULL
){
ndbout
<<
tabName
<<
" was not found in DB"
<<
endl
;
continue
;
if
(
pTab3
==
NULL
)
{
ndbout
<<
tabName
<<
" was not found in DB: "
<<
pDic
->
getNdbError
()
<<
endl
;
return
NDBT_FAILED
;
}
// Try to drop table in db
if
(
pNdb
->
getDictionary
()
->
dropTable
(
pTab3
->
getName
())
!=
0
){
ndbout
<<
tabName
<<
" coult not be dropped"
<<
endl
;
result
=
NDBT_FAILED
;
if
(
pDic
->
dropTable
(
pTab3
->
getName
())
!=
0
)
{
ndbout
<<
tabName
<<
" could not be dropped: "
<<
pDic
->
getNdbError
()
<<
endl
;
return
NDBT_FAILED
;
}
}
return
result
;
return
NDBT_OK
;
}
int
runTestFragmentTypes
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
){
...
...
@@ -1622,7 +1618,7 @@ TESTCASE("CreateMaxTables",
"Create tables until db says that it can't create any more
\n
"
){
TC_PROPERTY
(
"tables"
,
1000
);
INITIALIZER
(
runCreateMaxTables
);
FIN
ALIZER
(
runDropMaxTables
);
INITI
ALIZER
(
runDropMaxTables
);
}
TESTCASE
(
"PkSizes"
,
"Create tables with all different primary key sizes.
\n
"
\
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment