Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
2f4db892
Commit
2f4db892
authored
Jan 27, 2005
by
tomas@poseidon.ndb.mysql.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
moved sorting and duplicate check for unique indexes from api to kernel
parent
3b1703b1
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
40 additions
and
43 deletions
+40
-43
ndb/Makefile.am
ndb/Makefile.am
+2
-1
ndb/include/kernel/signaldata/CreateIndx.hpp
ndb/include/kernel/signaldata/CreateIndx.hpp
+1
-1
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+37
-21
ndb/src/ndbapi/NdbDictionaryImpl.cpp
ndb/src/ndbapi/NdbDictionaryImpl.cpp
+0
-20
No files found.
ndb/Makefile.am
View file @
2f4db892
SUBDIRS
=
src tools
.
include @ndb_opt_subdirs@
DIST_SUBDIRS
=
src tools include
test
docs
EXTRA_DIST
=
config
EXTRA_DIST
=
config
ndbapi-examples
include
$(top_srcdir)/ndb/config/common.mk.am
dist-hook
:
-
rm
-rf
`
find
$(distdir)
-type
d
-name
SCCS
`
-
rm
-rf
`
find
$(distdir)
-type
d
-name
old_files
`
-
rm
-rf
`
find
$(distdir)
/ndbapi-examples
-name
'*.o'
`
list
=
'
$(SUBDIRS)
'
;
for
subdir
in
$$
list
;
do
\
if
test
"
$$
subdir"
!=
"."
-a
"
$$
subdir"
!=
"include"
;
then
\
files
=
"
`
find
$$
subdir
-name
'*\.h'
`
`
find
$$
subdir
-name
'*\.hpp'
`
"
;
\
...
...
ndb/include/kernel/signaldata/CreateIndx.hpp
View file @
2f4db892
...
...
@@ -207,7 +207,7 @@ public:
NotUnique
=
4251
,
AllocationError
=
4252
,
CreateIndexTableFailed
=
4253
,
InvalidAttributeOrder
=
4255
DuplicateAttributes
=
4258
};
private:
...
...
ndb/src/kernel/blocks/dbdict/Dbdict.cpp
View file @
2f4db892
...
...
@@ -6301,6 +6301,7 @@ Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr)
void
Dbdict
::
createIndex_toCreateTable
(
Signal
*
signal
,
OpCreateIndexPtr
opPtr
)
{
Uint32
attrid_map
[
MAX_ATTRIBUTES_IN_INDEX
];
Uint32
k
;
jam
();
const
CreateIndxReq
*
const
req
=
&
opPtr
.
p
->
m_request
;
...
...
@@ -6369,39 +6370,49 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
// tree node size in words (make configurable later)
indexPtr
.
p
->
tupKeyLength
=
MAX_TTREE_NODE_SIZE
;
}
// hash index attributes must currently be in table order
Uint32
prevAttrId
=
RNIL
;
AttributeMask
mask
;
mask
.
clear
();
for
(
k
=
0
;
k
<
opPtr
.
p
->
m_attrList
.
sz
;
k
++
)
{
jam
();
bool
found
=
false
;
for
(
Uint32
tAttr
=
tablePtr
.
p
->
firstAttribute
;
tAttr
!=
RNIL
;
)
{
AttributeRecord
*
aRec
=
c_attributeRecordPool
.
getPtr
(
tAttr
);
tAttr
=
aRec
->
nextAttrInTable
;
if
(
aRec
->
attributeId
!=
opPtr
.
p
->
m_attrList
.
id
[
k
])
unsigned
current_id
=
opPtr
.
p
->
m_attrList
.
id
[
k
];
AttributeRecord
*
aRec
=
NULL
;
Uint32
tAttr
=
tablePtr
.
p
->
firstAttribute
;
for
(;
tAttr
!=
RNIL
;
tAttr
=
aRec
->
nextAttrInTable
)
{
aRec
=
c_attributeRecordPool
.
getPtr
(
tAttr
);
if
(
aRec
->
attributeId
!=
current_id
)
continue
;
jam
();
found
=
true
;
const
Uint32
a
=
aRec
->
attributeDescriptor
;
if
(
indexPtr
.
p
->
isHashIndex
())
{
const
Uint32
s1
=
AttributeDescriptor
::
getSize
(
a
);
const
Uint32
s2
=
AttributeDescriptor
::
getArraySize
(
a
);
indexPtr
.
p
->
tupKeyLength
+=
((
1
<<
s1
)
*
s2
+
31
)
>>
5
;
}
break
;
}
if
(
!
found
)
{
if
(
tAttr
==
RNIL
)
{
jam
();
opPtr
.
p
->
m_errorCode
=
CreateIndxRef
::
BadRequestType
;
opPtr
.
p
->
m_errorLine
=
__LINE__
;
return
;
}
if
(
indexPtr
.
p
->
isHashIndex
()
&&
k
>
0
&&
prevAttrId
>=
opPtr
.
p
->
m_attrList
.
id
[
k
])
{
if
(
mask
.
get
(
current_id
))
{
jam
();
opPtr
.
p
->
m_errorCode
=
CreateIndxRef
::
InvalidAttributeOrder
;
opPtr
.
p
->
m_errorCode
=
CreateIndxRef
::
DuplicateAttributes
;
opPtr
.
p
->
m_errorLine
=
__LINE__
;
return
;
}
prevAttrId
=
opPtr
.
p
->
m_attrList
.
id
[
k
];
mask
.
set
(
current_id
);
const
Uint32
a
=
aRec
->
attributeDescriptor
;
unsigned
kk
=
k
;
if
(
indexPtr
.
p
->
isHashIndex
())
{
const
Uint32
s1
=
AttributeDescriptor
::
getSize
(
a
);
const
Uint32
s2
=
AttributeDescriptor
::
getArraySize
(
a
);
indexPtr
.
p
->
tupKeyLength
+=
((
1
<<
s1
)
*
s2
+
31
)
>>
5
;
// reorder the attributes according to the tableid order
// for unque indexes
for
(;
kk
>
0
&&
current_id
<
attrid_map
[
kk
-
1
]
>>
16
;
kk
--
)
attrid_map
[
kk
]
=
attrid_map
[
kk
-
1
];
}
attrid_map
[
kk
]
=
k
|
(
current_id
<<
16
);
}
indexPtr
.
p
->
noOfPrimkey
=
indexPtr
.
p
->
noOfAttributes
;
// plus concatenated primary table key attribute
...
...
@@ -6421,12 +6432,17 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
// write index key attributes
AttributeRecordPtr
aRecPtr
;
c_attributeRecordPool
.
getPtr
(
aRecPtr
,
tablePtr
.
p
->
firstAttribute
);
for
(
k
=
0
;
k
<
opPtr
.
p
->
m_attrList
.
sz
;
k
++
)
{
for
(
unsigned
k
=
0
;
k
<
opPtr
.
p
->
m_attrList
.
sz
;
k
++
)
{
// insert the attributes in the order decided above in attrid_map
// k is new order, current_id is in previous order
// ToDo: make sure "current_id" is stored with the table and
// passed up to NdbDictionary
unsigned
current_id
=
opPtr
.
p
->
m_attrList
.
id
[
attrid_map
[
k
]
&
0xffff
];
jam
();
for
(
Uint32
tAttr
=
tablePtr
.
p
->
firstAttribute
;
tAttr
!=
RNIL
;
)
{
AttributeRecord
*
aRec
=
c_attributeRecordPool
.
getPtr
(
tAttr
);
tAttr
=
aRec
->
nextAttrInTable
;
if
(
aRec
->
attributeId
!=
opPtr
.
p
->
m_attrList
.
id
[
k
]
)
if
(
aRec
->
attributeId
!=
current_id
)
continue
;
jam
();
const
Uint32
a
=
aRec
->
attributeDescriptor
;
...
...
ndb/src/ndbapi/NdbDictionaryImpl.cpp
View file @
2f4db892
...
...
@@ -2148,26 +2148,6 @@ NdbDictInterface::createIndex(Ndb & ndb,
}
attributeList
.
id
[
i
]
=
col
->
m_attrId
;
}
if
(
it
==
DictTabInfo
::
UniqueHashIndex
)
{
// Sort index attributes according to primary table (using insertion sort)
for
(
i
=
1
;
i
<
attributeList
.
sz
;
i
++
)
{
unsigned
int
temp
=
attributeList
.
id
[
i
];
unsigned
int
j
=
i
;
while
((
j
>
0
)
&&
(
attributeList
.
id
[
j
-
1
]
>
temp
))
{
attributeList
.
id
[
j
]
=
attributeList
.
id
[
j
-
1
];
j
--
;
}
attributeList
.
id
[
j
]
=
temp
;
}
// Check for illegal duplicate attributes
for
(
i
=
0
;
i
<
attributeList
.
sz
;
i
++
)
{
if
((
i
!=
(
attributeList
.
sz
-
1
))
&&
(
attributeList
.
id
[
i
]
==
attributeList
.
id
[
i
+
1
]))
{
m_error
.
code
=
4258
;
return
-
1
;
}
}
}
LinearSectionPtr
ptr
[
3
];
ptr
[
0
].
p
=
(
Uint32
*
)
&
attributeList
;
ptr
[
0
].
sz
=
1
+
attributeList
.
sz
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment