Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
1192517a
Commit
1192517a
authored
Aug 26, 2004
by
tomas@poseidon.(none)
Browse files
Options
Browse Files
Download
Plain Diff
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-4.1-ndb
into poseidon.(none):/home/tomas/mysql-4.1-ndb
parents
e97e08b2
eaff9908
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
54 additions
and
29 deletions
+54
-29
ndb/src/kernel/blocks/ERROR_codes.txt
ndb/src/kernel/blocks/ERROR_codes.txt
+6
-1
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+13
-10
ndb/src/kernel/vm/Configuration.cpp
ndb/src/kernel/vm/Configuration.cpp
+6
-2
ndb/src/kernel/vm/pc.hpp
ndb/src/kernel/vm/pc.hpp
+7
-0
ndb/test/ndbapi/testDict.cpp
ndb/test/ndbapi/testDict.cpp
+22
-16
No files found.
ndb/src/kernel/blocks/ERROR_codes.txt
View file @
1192517a
...
...
@@ -5,7 +5,7 @@ Next DBACC 3001
Next DBTUP 4007
Next DBLQH 5040
Next DBDICT 6006
Next DBDIH 717
3
Next DBDIH 717
4
Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
...
...
@@ -387,6 +387,11 @@ Backup Stuff:
5028: Crash when receiving LQHKEYREQ (in non-master)
Failed Create Table:
--------------------
7173: Create table failed due to not sufficient number of fragment or
replica records.
Drop Table/Index:
-----------------
4001: Crash on REL_TABMEMREQ in TUP
...
...
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
View file @
1192517a
...
...
@@ -6425,6 +6425,10 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
tabPtr
.
p
->
totalfragments
=
noFragments
;
ndbrequire
(
noReplicas
==
cnoReplicas
);
// Only allowed
if
(
ERROR_INSERTED
(
7173
))
{
addtabrefuseLab
(
signal
,
connectPtr
,
ZREPLERROR1
);
return
;
}
if
((
noReplicas
*
noFragments
)
>
cnoFreeReplicaRec
)
{
jam
();
addtabrefuseLab
(
signal
,
connectPtr
,
ZREPLERROR1
);
...
...
@@ -6736,13 +6740,15 @@ void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
void
Dbdih
::
releaseTable
(
TabRecordPtr
tabPtr
)
{
FragmentstorePtr
fragPtr
;
for
(
Uint32
fragId
=
0
;
fragId
<
tabPtr
.
p
->
totalfragments
;
fragId
++
)
{
jam
();
getFragstore
(
tabPtr
.
p
,
fragId
,
fragPtr
);
releaseReplicas
(
fragPtr
.
p
->
storedReplicas
);
releaseReplicas
(
fragPtr
.
p
->
oldStoredReplicas
);
}
//for
releaseFragments
(
tabPtr
);
if
(
tabPtr
.
p
->
noOfFragChunks
>
0
)
{
for
(
Uint32
fragId
=
0
;
fragId
<
tabPtr
.
p
->
totalfragments
;
fragId
++
)
{
jam
();
getFragstore
(
tabPtr
.
p
,
fragId
,
fragPtr
);
releaseReplicas
(
fragPtr
.
p
->
storedReplicas
);
releaseReplicas
(
fragPtr
.
p
->
oldStoredReplicas
);
}
//for
releaseFragments
(
tabPtr
);
}
if
(
tabPtr
.
p
->
tabFile
[
0
]
!=
RNIL
)
{
jam
();
releaseFile
(
tabPtr
.
p
->
tabFile
[
0
]);
...
...
@@ -6875,9 +6881,6 @@ Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
return
nodeCount
;
}
//Dbdih::extractNodeInfo()
#define NO_OF_FRAGS_PER_CHUNK 16
#define LOG_NO_OF_FRAGS_PER_CHUNK 4
void
Dbdih
::
getFragstore
(
TabRecord
*
tab
,
//In parameter
Uint32
fragNo
,
//In parameter
...
...
ndb/src/kernel/vm/Configuration.cpp
View file @
1192517a
...
...
@@ -643,8 +643,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
cfg
.
put
(
CFG_DIH_CONNECT
,
noOfOperations
+
noOfTransactions
+
46
);
Uint32
noFragPerTable
=
((
noOfDBNodes
+
NO_OF_FRAGS_PER_CHUNK
-
1
)
>>
LOG_NO_OF_FRAGS_PER_CHUNK
)
<<
LOG_NO_OF_FRAGS_PER_CHUNK
;
cfg
.
put
(
CFG_DIH_FRAG_CONNECT
,
NO_OF_FRAG_PER_NODE
*
noOfMetaTables
*
noOfDBNod
es
);
noFragPerTable
*
noOfMetaTabl
es
);
int
temp
;
temp
=
noOfReplicas
-
2
;
...
...
@@ -655,7 +659,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
cfg
.
put
(
CFG_DIH_MORE_NODES
,
temp
*
NO_OF_FRAG_PER_NODE
*
noOfMetaTables
*
noOfDBNodes
);
cfg
.
put
(
CFG_DIH_REPLICAS
,
NO_OF_FRAG_PER_NODE
*
noOfMetaTables
*
noOfDBNodes
*
noOfReplicas
);
...
...
ndb/src/kernel/vm/pc.hpp
View file @
1192517a
...
...
@@ -150,6 +150,13 @@
#define NO_OF_FRAG_PER_NODE 1
#define MAX_FRAG_PER_NODE 8
/**
* DIH allocates fragments in chunk for fast find of fragment record.
* These parameters define chunk size and log of chunk size.
*/
#define NO_OF_FRAGS_PER_CHUNK 8
#define LOG_NO_OF_FRAGS_PER_CHUNK 3
/* ---------------------------------------------------------------- */
// To avoid synching too big chunks at a time we synch after writing
// a certain number of data/UNDO pages. (e.g. 2 MBytes).
...
...
ndb/test/ndbapi/testDict.cpp
View file @
1192517a
...
...
@@ -1002,11 +1002,13 @@ int runGetPrimaryKey(NDBT_Context* ctx, NDBT_Step* step){
return
result
;
}
int
struct
ErrorCodes
{
int
error_id
;
bool
crash
;};
ErrorCodes
NF_codes
[]
=
{
6003
,
6004
//,6005
{
6003
,
true
},
{
6004
,
true
},
//,6005, true,
{
7173
,
false
}
};
int
...
...
@@ -1042,7 +1044,9 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
for
(
int
i
=
0
;
i
<
sz
;
i
++
){
int
rand
=
myRandom48
(
restarter
.
getNumDbNodes
());
int
nodeId
=
restarter
.
getRandomNotMasterNodeId
(
rand
);
int
error
=
NF_codes
[
i
];
struct
ErrorCodes
err_struct
=
NF_codes
[
i
];
int
error
=
err_struct
.
error_id
;
bool
crash
=
err_struct
.
crash
;
g_info
<<
"NF1: node = "
<<
nodeId
<<
" error code = "
<<
error
<<
endl
;
...
...
@@ -1057,31 +1061,33 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
CHECK2
(
dict
->
createTable
(
*
pTab
)
==
0
,
"failed to create table"
);
CHECK2
(
restarter
.
waitNodesNoStart
(
&
nodeId
,
1
)
==
0
,
if
(
crash
)
{
CHECK2
(
restarter
.
waitNodesNoStart
(
&
nodeId
,
1
)
==
0
,
"waitNodesNoStart failed"
);
if
(
myRandom48
(
100
)
>
50
){
CHECK2
(
restarter
.
startNodes
(
&
nodeId
,
1
)
==
0
,
if
(
myRandom48
(
100
)
>
50
){
CHECK2
(
restarter
.
startNodes
(
&
nodeId
,
1
)
==
0
,
"failed to start node"
);
CHECK2
(
restarter
.
waitClusterStarted
()
==
0
,
CHECK2
(
restarter
.
waitClusterStarted
()
==
0
,
"waitClusterStarted failed"
);
CHECK2
(
dict
->
dropTable
(
pTab
->
getName
())
==
0
,
CHECK2
(
dict
->
dropTable
(
pTab
->
getName
())
==
0
,
"drop table failed"
);
}
else
{
CHECK2
(
dict
->
dropTable
(
pTab
->
getName
())
==
0
,
}
else
{
CHECK2
(
dict
->
dropTable
(
pTab
->
getName
())
==
0
,
"drop table failed"
);
CHECK2
(
restarter
.
startNodes
(
&
nodeId
,
1
)
==
0
,
CHECK2
(
restarter
.
startNodes
(
&
nodeId
,
1
)
==
0
,
"failed to start node"
);
CHECK2
(
restarter
.
waitClusterStarted
()
==
0
,
CHECK2
(
restarter
.
waitClusterStarted
()
==
0
,
"waitClusterStarted failed"
);
}
}
CHECK2
(
restarter
.
dumpStateOneNode
(
nodeId
,
&
val
,
1
)
==
0
,
CHECK2
(
restarter
.
dumpStateOneNode
(
nodeId
,
&
val
,
1
)
==
0
,
"Failed to set LCP to min value"
);
}
}
}
end:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment