Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
16f4f1b4
Commit
16f4f1b4
authored
Oct 28, 2004
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-4.1
into poseidon.ndb.mysql.com:/home/tomas/mysql-4.1-ndb
parents
5be6c328
9d07242b
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
457 additions
and
52 deletions
+457
-52
mysql-test/r/ndb_basic.result
mysql-test/r/ndb_basic.result
+147
-0
mysql-test/t/ndb_basic.test
mysql-test/t/ndb_basic.test
+151
-0
ndb/include/kernel/ndb_limits.h
ndb/include/kernel/ndb_limits.h
+5
-0
ndb/include/ndbapi/ndbapi_limits.h
ndb/include/ndbapi/ndbapi_limits.h
+3
-2
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+2
-3
ndb/src/kernel/vm/LongSignal.hpp
ndb/src/kernel/vm/LongSignal.hpp
+1
-1
ndb/src/ndbapi/NdbScanOperation.cpp
ndb/src/ndbapi/NdbScanOperation.cpp
+2
-2
ndb/src/ndbapi/TransporterFacade.cpp
ndb/src/ndbapi/TransporterFacade.cpp
+124
-35
ndb/src/ndbapi/TransporterFacade.hpp
ndb/src/ndbapi/TransporterFacade.hpp
+4
-6
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+18
-3
No files found.
mysql-test/r/ndb_basic.result
View file @
16f4f1b4
...
...
@@ -414,3 +414,150 @@ select * from t1 where b IS NOT NULL;
a b
1
drop table t1;
create table t1 (
c1 int,
c2 int,
c3 int,
c4 int,
c5 int,
c6 int,
c7 int,
c8 int,
c9 int,
c10 int,
c11 int,
c12 int,
c13 int,
c14 int,
c15 int,
c16 int,
c17 int,
c18 int,
c19 int,
c20 int,
c21 int,
c22 int,
c23 int,
c24 int,
c25 int,
c26 int,
c27 int,
c28 int,
c29 int,
c30 int,
c31 int,
c32 int,
c33 int,
c34 int,
c35 int,
c36 int,
c37 int,
c38 int,
c39 int,
c40 int,
c41 int,
c42 int,
c43 int,
c44 int,
c45 int,
c46 int,
c47 int,
c48 int,
c49 int,
c50 int,
c51 int,
c52 int,
c53 int,
c54 int,
c55 int,
c56 int,
c57 int,
c58 int,
c59 int,
c60 int,
c61 int,
c62 int,
c63 int,
c64 int,
c65 int,
c66 int,
c67 int,
c68 int,
c69 int,
c70 int,
c71 int,
c72 int,
c73 int,
c74 int,
c75 int,
c76 int,
c77 int,
c78 int,
c79 int,
c80 int,
c81 int,
c82 int,
c83 int,
c84 int,
c85 int,
c86 int,
c87 int,
c88 int,
c89 int,
c90 int,
c91 int,
c92 int,
c93 int,
c94 int,
c95 int,
c96 int,
c97 int,
c98 int,
c99 int,
c100 int,
c101 int,
c102 int,
c103 int,
c104 int,
c105 int,
c106 int,
c107 int,
c108 int,
c109 int,
c110 int,
c111 int,
c112 int,
c113 int,
c114 int,
c115 int,
c116 int,
c117 int,
c118 int,
c119 int,
c120 int,
c121 int,
c122 int,
c123 int,
c124 int,
c125 int,
c126 int,
c127 int,
c128 int,
primary key(c1)) engine=ndb;
drop table t1;
create table t1 (
a1234567890123456789012345678901234567890 int primary key,
a12345678901234567890123456789a1234567890 int,
index(a12345678901234567890123456789a1234567890)
) engine=ndb;
show tables;
Tables_in_test
t1
insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1);
explain select * from t1 where a12345678901234567890123456789a1234567890=2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const 10 Using where
select * from t1 where a12345678901234567890123456789a1234567890=2;
a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890
5 2
drop table t1;
mysql-test/t/ndb_basic.test
View file @
16f4f1b4
...
...
@@ -371,3 +371,154 @@ select * from t1 order by b;
select
*
from
t1
where
b
IS
NULL
;
select
*
from
t1
where
b
IS
NOT
NULL
;
drop
table
t1
;
#
# test the limit of no of attributes in one table
#
create
table
t1
(
c1
int
,
c2
int
,
c3
int
,
c4
int
,
c5
int
,
c6
int
,
c7
int
,
c8
int
,
c9
int
,
c10
int
,
c11
int
,
c12
int
,
c13
int
,
c14
int
,
c15
int
,
c16
int
,
c17
int
,
c18
int
,
c19
int
,
c20
int
,
c21
int
,
c22
int
,
c23
int
,
c24
int
,
c25
int
,
c26
int
,
c27
int
,
c28
int
,
c29
int
,
c30
int
,
c31
int
,
c32
int
,
c33
int
,
c34
int
,
c35
int
,
c36
int
,
c37
int
,
c38
int
,
c39
int
,
c40
int
,
c41
int
,
c42
int
,
c43
int
,
c44
int
,
c45
int
,
c46
int
,
c47
int
,
c48
int
,
c49
int
,
c50
int
,
c51
int
,
c52
int
,
c53
int
,
c54
int
,
c55
int
,
c56
int
,
c57
int
,
c58
int
,
c59
int
,
c60
int
,
c61
int
,
c62
int
,
c63
int
,
c64
int
,
c65
int
,
c66
int
,
c67
int
,
c68
int
,
c69
int
,
c70
int
,
c71
int
,
c72
int
,
c73
int
,
c74
int
,
c75
int
,
c76
int
,
c77
int
,
c78
int
,
c79
int
,
c80
int
,
c81
int
,
c82
int
,
c83
int
,
c84
int
,
c85
int
,
c86
int
,
c87
int
,
c88
int
,
c89
int
,
c90
int
,
c91
int
,
c92
int
,
c93
int
,
c94
int
,
c95
int
,
c96
int
,
c97
int
,
c98
int
,
c99
int
,
c100
int
,
c101
int
,
c102
int
,
c103
int
,
c104
int
,
c105
int
,
c106
int
,
c107
int
,
c108
int
,
c109
int
,
c110
int
,
c111
int
,
c112
int
,
c113
int
,
c114
int
,
c115
int
,
c116
int
,
c117
int
,
c118
int
,
c119
int
,
c120
int
,
c121
int
,
c122
int
,
c123
int
,
c124
int
,
c125
int
,
c126
int
,
c127
int
,
c128
int
,
primary
key
(
c1
))
engine
=
ndb
;
drop
table
t1
;
#
# test max size of attribute name and truncation
#
create
table
t1
(
a1234567890123456789012345678901234567890
int
primary
key
,
a12345678901234567890123456789a1234567890
int
,
index
(
a12345678901234567890123456789a1234567890
)
)
engine
=
ndb
;
show
tables
;
insert
into
t1
values
(
1
,
1
),(
2
,
1
),(
3
,
1
),(
4
,
1
),(
5
,
2
),(
6
,
1
),(
7
,
1
);
explain
select
*
from
t1
where
a12345678901234567890123456789a1234567890
=
2
;
select
*
from
t1
where
a12345678901234567890123456789a1234567890
=
2
;
drop
table
t1
;
ndb/include/kernel/ndb_limits.h
View file @
16f4f1b4
...
...
@@ -117,4 +117,9 @@
*/
#define NDB_BLOB_HEAD_SIZE 2
/* sizeof(NdbBlob::Head) >> 2 */
/*
* Long signals
*/
#define NDB_SECTION_SEGMENT_SZ 60
#endif
ndb/include/ndbapi/ndbapi_limits.h
View file @
16f4f1b4
...
...
@@ -22,9 +22,10 @@
#define NDB_MAX_DATABASE_NAME_SIZE 128
#define NDB_MAX_SCHEMA_NAME_SIZE 128
#define NDB_MAX_TAB_NAME_SIZE 128
#define NDB_MAX_ATTRIBUTES_IN_TABLE 91
#define NDB_MAX_ATTR_NAME_SIZE 32
#define NDB_MAX_ATTRIBUTES_IN_TABLE 128
#define NDB_MAX_TUPLE_SIZE_IN_WORDS
102
3
#define NDB_MAX_TUPLE_SIZE_IN_WORDS
201
3
#define NDB_MAX_KEYSIZE_IN_WORDS 1023
#define NDB_MAX_KEY_SIZE NDB_MAX_KEYSIZE_IN_WORDS*sizeof(Uint32)
#define NDB_MAX_TUPLE_SIZE NDB_MAX_TUPLE_SIZE_IN_WORDS*sizeof(uint32)
...
...
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
View file @
16f4f1b4
...
...
@@ -3661,9 +3661,8 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
req
->
tableId
=
tabPtr
.
i
;
req
->
tableVersion
=
tabEntry
->
m_tableVersion
+
1
;
sendSignal
(
rg
,
GSN_CREATE_TAB_REQ
,
signal
,
CreateTabReq
::
SignalLength
,
JBB
);
sendFragmentedSignal
(
rg
,
GSN_CREATE_TAB_REQ
,
signal
,
CreateTabReq
::
SignalLength
,
JBB
);
return
;
}
...
...
ndb/src/kernel/vm/LongSignal.hpp
View file @
16f4f1b4
...
...
@@ -25,7 +25,7 @@
*/
struct
SectionSegment
{
STATIC_CONST
(
DataLength
=
60
);
STATIC_CONST
(
DataLength
=
NDB_SECTION_SEGMENT_SZ
);
Uint32
m_ownerRef
;
Uint32
m_sz
;
...
...
ndb/src/ndbapi/NdbScanOperation.cpp
View file @
16f4f1b4
...
...
@@ -612,7 +612,7 @@ NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag){
LinearSectionPtr
ptr
[
3
];
ptr
[
0
].
p
=
prep_array
;
ptr
[
0
].
sz
=
cnt
;
ret
=
tp
->
send
Fragmented
Signal
(
&
tSignal
,
nodeId
,
ptr
,
1
);
ret
=
tp
->
sendSignal
(
&
tSignal
,
nodeId
,
ptr
,
1
);
}
else
{
tSignal
.
setLength
(
4
+
cnt
);
ret
=
tp
->
sendSignal
(
&
tSignal
,
nodeId
);
...
...
@@ -803,7 +803,7 @@ NdbScanOperation::doSendScan(int aProcessorId)
LinearSectionPtr
ptr
[
3
];
ptr
[
0
].
p
=
m_prepared_receivers
;
ptr
[
0
].
sz
=
theParallelism
;
if
(
tp
->
send
Fragmented
Signal
(
tSignal
,
aProcessorId
,
ptr
,
1
)
==
-
1
)
{
if
(
tp
->
sendSignal
(
tSignal
,
aProcessorId
,
ptr
,
1
)
==
-
1
)
{
setErrorCode
(
4002
);
return
-
1
;
}
...
...
ndb/src/ndbapi/TransporterFacade.cpp
View file @
16f4f1b4
...
...
@@ -34,6 +34,7 @@
#include <NdbConfig.h>
#include <ndb_version.h>
#include <SignalLoggerManager.hpp>
#include <kernel/ndb_limits.h>
//#define REPORT_TRANSPORTER
//#define API_TRACE;
...
...
@@ -475,7 +476,8 @@ TransporterFacade::TransporterFacade() :
theTransporterRegistry
(
0
),
theStopReceive
(
0
),
theSendThread
(
NULL
),
theReceiveThread
(
NULL
)
theReceiveThread
(
NULL
),
m_fragmented_signal_id
(
0
)
{
theOwnId
=
0
;
...
...
@@ -833,9 +835,129 @@ TransporterFacade::sendSignalUnCond(NdbApiSignal * aSignal, NodeId aNode){
return
(
ss
==
SEND_OK
?
0
:
-
1
);
}
#define CHUNK_SZ NDB_SECTION_SEGMENT_SZ*64 // related to MAX_MESSAGE_SIZE
int
TransporterFacade
::
sendFragmentedSignal
(
NdbApiSignal
*
aSignal
,
NodeId
aNode
,
LinearSectionPtr
ptr
[
3
],
Uint32
secs
){
LinearSectionPtr
ptr
[
3
],
Uint32
secs
)
{
if
(
getIsNodeSendable
(
aNode
)
!=
true
)
return
-
1
;
#ifdef API_TRACE
if
(
setSignalLog
()
&&
TRACE_GSN
(
aSignal
->
theVerId_signalNumber
)){
Uint32
tmp
=
aSignal
->
theSendersBlockRef
;
aSignal
->
theSendersBlockRef
=
numberToRef
(
tmp
,
theOwnId
);
signalLogger
.
sendSignal
(
*
aSignal
,
1
,
aSignal
->
getDataPtrSend
(),
aNode
,
ptr
,
secs
);
aSignal
->
theSendersBlockRef
=
tmp
;
}
#endif
NdbApiSignal
tmp_signal
(
*
(
SignalHeader
*
)
aSignal
);
LinearSectionPtr
tmp_ptr
[
3
];
Uint32
unique_id
=
m_fragmented_signal_id
++
;
// next unique id
unsigned
i
;
for
(
i
=
0
;
i
<
secs
;
i
++
)
tmp_ptr
[
i
]
=
ptr
[
i
];
unsigned
start_i
=
0
;
unsigned
chunk_sz
=
0
;
unsigned
fragment_info
=
0
;
Uint32
*
tmp_data
=
tmp_signal
.
getDataPtrSend
();
for
(
i
=
0
;
i
<
secs
;)
{
unsigned
save_sz
=
tmp_ptr
[
i
].
sz
;
tmp_data
[
i
-
start_i
]
=
i
;
if
(
chunk_sz
+
save_sz
>
CHUNK_SZ
)
{
// truncate
unsigned
send_sz
=
CHUNK_SZ
-
chunk_sz
;
if
(
i
!=
start_i
)
// first piece of a new section has to be a multiple of NDB_SECTION_SEGMENT_SZ
{
send_sz
=
NDB_SECTION_SEGMENT_SZ
*
(
send_sz
+
NDB_SECTION_SEGMENT_SZ
-
1
)
/
NDB_SECTION_SEGMENT_SZ
;
if
(
send_sz
>
save_sz
)
send_sz
=
save_sz
;
}
tmp_ptr
[
i
].
sz
=
send_sz
;
if
(
fragment_info
<
2
)
// 1 = first fragment, 2 = middle fragments
fragment_info
++
;
// send tmp_signal
tmp_data
[
i
-
start_i
+
1
]
=
unique_id
;
tmp_signal
.
setLength
(
i
-
start_i
+
2
);
tmp_signal
.
m_fragmentInfo
=
fragment_info
;
tmp_signal
.
m_noOfSections
=
i
-
start_i
+
1
;
// do prepare send
{
SendStatus
ss
=
theTransporterRegistry
->
prepareSend
(
&
tmp_signal
,
1
,
/*JBB*/
tmp_data
,
aNode
,
&
tmp_ptr
[
start_i
]);
assert
(
ss
!=
SEND_MESSAGE_TOO_BIG
);
if
(
ss
!=
SEND_OK
)
return
-
1
;
}
// setup variables for next signal
start_i
=
i
;
chunk_sz
=
0
;
tmp_ptr
[
i
].
sz
=
save_sz
-
send_sz
;
tmp_ptr
[
i
].
p
+=
send_sz
;
if
(
tmp_ptr
[
i
].
sz
==
0
)
i
++
;
}
else
{
chunk_sz
+=
save_sz
;
i
++
;
}
}
unsigned
a_sz
=
aSignal
->
getLength
();
if
(
fragment_info
>
0
)
{
// update the original signal to include section info
Uint32
*
a_data
=
aSignal
->
getDataPtrSend
();
unsigned
tmp_sz
=
i
-
start_i
;
memcpy
(
a_data
+
a_sz
,
tmp_data
,
tmp_sz
*
sizeof
(
Uint32
));
a_data
[
a_sz
+
tmp_sz
]
=
unique_id
;
aSignal
->
setLength
(
a_sz
+
tmp_sz
+
1
);
// send last fragment
aSignal
->
m_fragmentInfo
=
3
;
// 3 = last fragment
aSignal
->
m_noOfSections
=
i
-
start_i
;
}
else
{
aSignal
->
m_noOfSections
=
secs
;
}
// send aSignal
int
ret
;
{
SendStatus
ss
=
theTransporterRegistry
->
prepareSend
(
aSignal
,
1
/*JBB*/
,
aSignal
->
getDataPtrSend
(),
aNode
,
&
tmp_ptr
[
start_i
]);
assert
(
ss
!=
SEND_MESSAGE_TOO_BIG
);
ret
=
(
ss
==
SEND_OK
?
0
:
-
1
);
}
aSignal
->
m_noOfSections
=
0
;
aSignal
->
m_fragmentInfo
=
0
;
aSignal
->
setLength
(
a_sz
);
return
ret
;
}
int
TransporterFacade
::
sendSignal
(
NdbApiSignal
*
aSignal
,
NodeId
aNode
,
LinearSectionPtr
ptr
[
3
],
Uint32
secs
){
aSignal
->
m_noOfSections
=
secs
;
if
(
getIsNodeSendable
(
aNode
)
==
true
){
#ifdef API_TRACE
...
...
@@ -865,39 +987,6 @@ TransporterFacade::sendFragmentedSignal(NdbApiSignal* aSignal, NodeId aNode,
return
-
1
;
}
int
TransporterFacade
::
sendFragmentedSignalUnCond
(
NdbApiSignal
*
aSignal
,
NodeId
aNode
,
LinearSectionPtr
ptr
[
3
],
Uint32
secs
){
aSignal
->
m_noOfSections
=
secs
;
#ifdef API_TRACE
if
(
setSignalLog
()
&&
TRACE_GSN
(
aSignal
->
theVerId_signalNumber
)){
Uint32
tmp
=
aSignal
->
theSendersBlockRef
;
aSignal
->
theSendersBlockRef
=
numberToRef
(
tmp
,
theOwnId
);
signalLogger
.
sendSignal
(
*
aSignal
,
1
,
aSignal
->
getDataPtrSend
(),
aNode
,
ptr
,
secs
);
aSignal
->
theSendersBlockRef
=
tmp
;
}
#endif
SendStatus
ss
=
theTransporterRegistry
->
prepareSend
(
aSignal
,
1
,
// JBB
aSignal
->
getDataPtrSend
(),
aNode
,
ptr
);
assert
(
ss
!=
SEND_MESSAGE_TOO_BIG
);
aSignal
->
m_noOfSections
=
0
;
return
(
ss
==
SEND_OK
?
0
:
-
1
);
}
/******************************************************************************
* CONNECTION METHODS Etc
******************************************************************************/
...
...
ndb/src/ndbapi/TransporterFacade.hpp
View file @
16f4f1b4
...
...
@@ -69,14 +69,11 @@ public:
// Only sends to nodes which are alive
int
sendSignal
(
NdbApiSignal
*
signal
,
NodeId
nodeId
);
int
sendSignal
(
NdbApiSignal
*
,
NodeId
,
LinearSectionPtr
ptr
[
3
],
Uint32
secs
);
int
sendFragmentedSignal
(
NdbApiSignal
*
,
NodeId
,
LinearSectionPtr
ptr
[
3
],
Uint32
secs
);
//Dirrrrty
int
sendFragmentedSignalUnCond
(
NdbApiSignal
*
,
NodeId
,
LinearSectionPtr
ptr
[
3
],
Uint32
secs
);
// Is node available for running transactions
bool
get_node_alive
(
NodeId
nodeId
)
const
;
bool
get_node_stopping
(
NodeId
nodeId
)
const
;
...
...
@@ -224,7 +221,8 @@ private:
}
m_threads
;
Uint32
m_max_trans_id
;
Uint32
m_fragmented_signal_id
;
/**
* execute function
*/
...
...
sql/ha_ndbcluster.cc
View file @
16f4f1b4
...
...
@@ -1393,8 +1393,13 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
// Set bound if not cancelled via type -1
if
(
p
.
bound_type
!=
-
1
)
if
(
op
->
setBound
(
field
->
field_name
,
p
.
bound_type
,
p
.
bound_ptr
))
{
char
truncated_field_name
[
NDB_MAX_ATTR_NAME_SIZE
];
strnmov
(
truncated_field_name
,
field
->
field_name
,
sizeof
(
truncated_field_name
));
truncated_field_name
[
sizeof
(
truncated_field_name
)
-
1
]
=
'\0'
;
if
(
op
->
setBound
(
truncated_field_name
,
p
.
bound_type
,
p
.
bound_ptr
))
ERR_RETURN
(
op
->
getNdbError
());
}
}
}
...
...
@@ -3102,7 +3107,12 @@ static int create_ndb_column(NDBCOL &col,
HA_CREATE_INFO
*
info
)
{
// Set name
col
.
setName
(
field
->
field_name
);
{
char
truncated_field_name
[
NDB_MAX_ATTR_NAME_SIZE
];
strnmov
(
truncated_field_name
,
field
->
field_name
,
sizeof
(
truncated_field_name
));
truncated_field_name
[
sizeof
(
truncated_field_name
)
-
1
]
=
'\0'
;
col
.
setName
(
truncated_field_name
);
}
// Get char set
CHARSET_INFO
*
cs
=
field
->
charset
();
// Set type and sizes
...
...
@@ -3430,7 +3440,12 @@ int ha_ndbcluster::create_index(const char *name,
{
Field
*
field
=
key_part
->
field
;
DBUG_PRINT
(
"info"
,
(
"attr: %s"
,
field
->
field_name
));
ndb_index
.
addColumnName
(
field
->
field_name
);
{
char
truncated_field_name
[
NDB_MAX_ATTR_NAME_SIZE
];
strnmov
(
truncated_field_name
,
field
->
field_name
,
sizeof
(
truncated_field_name
));
truncated_field_name
[
sizeof
(
truncated_field_name
)
-
1
]
=
'\0'
;
ndb_index
.
addColumnName
(
truncated_field_name
);
}
}
if
(
dict
->
createIndex
(
ndb_index
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment