Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
c19a8c06
Commit
c19a8c06
authored
Oct 02, 2007
by
unknown
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Bug#25817 UPDATE IGNORE doesn't check write_set when checking unique indexes: Added checks
parent
f7886540
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
57 additions
and
5 deletions
+57
-5
mysql-test/r/ndb_update.result
mysql-test/r/ndb_update.result
+8
-0
mysql-test/t/ndb_update.test
mysql-test/t/ndb_update.test
+5
-0
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+36
-4
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+8
-1
No files found.
mysql-test/r/ndb_update.result
View file @
c19a8c06
...
...
@@ -39,4 +39,12 @@ pk1 b c
10 0 0
12 2 2
14 1 1
create unique index ib on t1(b);
update t1 set c = 4 where pk1 = 12;
update ignore t1 set b = 55 where pk1 = 14;
select * from t1 order by pk1;
pk1 b c
10 0 0
12 2 4
14 55 1
DROP TABLE IF EXISTS t1;
mysql-test/t/ndb_update.test
View file @
c19a8c06
...
...
@@ -33,6 +33,11 @@ UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4;
select
*
from
t1
order
by
pk1
;
UPDATE
t1
set
pk1
=
pk1
+
10
;
select
*
from
t1
order
by
pk1
;
# bug#25817
create
unique
index
ib
on
t1
(
b
);
update
t1
set
c
=
4
where
pk1
=
12
;
update
ignore
t1
set
b
=
55
where
pk1
=
14
;
select
*
from
t1
order
by
pk1
;
--
disable_warnings
DROP
TABLE
IF
EXISTS
t1
;
...
...
sql/ha_ndbcluster.cc
View file @
c19a8c06
...
...
@@ -1356,6 +1356,30 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec
DBUG_RETURN
(
0
);
}
bool
ha_ndbcluster
::
check_index_fields_in_write_set
(
uint
keyno
)
{
KEY
*
key_info
=
table
->
key_info
+
keyno
;
KEY_PART_INFO
*
key_part
=
key_info
->
key_part
;
KEY_PART_INFO
*
end
=
key_part
+
key_info
->
key_parts
;
uint
i
;
DBUG_ENTER
(
"check_index_fields_in_write_set"
);
if
(
m_retrieve_all_fields
)
{
DBUG_RETURN
(
true
);
}
for
(
i
=
0
;
key_part
!=
end
;
key_part
++
,
i
++
)
{
Field
*
field
=
key_part
->
field
;
if
(
field
->
query_id
!=
current_thd
->
query_id
)
{
DBUG_RETURN
(
false
);
}
}
DBUG_RETURN
(
true
);
}
int
ha_ndbcluster
::
set_index_key_from_record
(
NdbOperation
*
op
,
const
byte
*
record
,
uint
keyno
)
{
KEY
*
key_info
=
table
->
key_info
+
keyno
;
...
...
@@ -1643,7 +1667,8 @@ check_null_in_record(const KEY* key_info, const byte *record)
* primary key or unique index values
*/
int
ha_ndbcluster
::
peek_indexed_rows
(
const
byte
*
record
,
bool
check_pk
)
int
ha_ndbcluster
::
peek_indexed_rows
(
const
byte
*
record
,
NDB_WRITE_OP
write_op
)
{
NdbTransaction
*
trans
=
m_active_trans
;
NdbOperation
*
op
;
...
...
@@ -1656,7 +1681,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
(
NdbOperation
::
LockMode
)
get_ndb_lock_type
(
m_lock
.
type
);
first
=
NULL
;
if
(
check_pk
&&
table
->
s
->
primary_key
!=
MAX_KEY
)
if
(
write_op
!=
NDB_UPDATE
&&
table
->
s
->
primary_key
!=
MAX_KEY
)
{
/*
* Fetch any row with colliding primary key
...
...
@@ -1690,6 +1715,12 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
DBUG_PRINT
(
"info"
,
(
"skipping check for key with NULL"
));
continue
;
}
if
(
write_op
!=
NDB_INSERT
&&
!
check_index_fields_in_write_set
(
i
))
{
DBUG_PRINT
(
"info"
,
(
"skipping check for key %u not in write_set"
,
i
));
continue
;
}
NdbIndexOperation
*
iop
;
NDBINDEX
*
unique_index
=
(
NDBINDEX
*
)
m_index
[
i
].
unique_index
;
key_part
=
key_info
->
key_part
;
...
...
@@ -2268,7 +2299,7 @@ int ha_ndbcluster::write_row(byte *record)
start_bulk_insert will set parameters to ensure that each
write_row is committed individually
*/
int
peek_res
=
peek_indexed_rows
(
record
,
true
);
int
peek_res
=
peek_indexed_rows
(
record
,
NDB_INSERT
);
if
(
!
peek_res
)
{
...
...
@@ -2456,7 +2487,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if
(
m_ignore_dup_key
&&
(
thd
->
lex
->
sql_command
==
SQLCOM_UPDATE
||
thd
->
lex
->
sql_command
==
SQLCOM_UPDATE_MULTI
))
{
int
peek_res
=
peek_indexed_rows
(
new_data
,
pk_update
);
NDB_WRITE_OP
write_op
=
(
pk_update
)
?
NDB_PK_UPDATE
:
NDB_UPDATE
;
int
peek_res
=
peek_indexed_rows
(
new_data
,
write_op
);
if
(
!
peek_res
)
{
...
...
sql/ha_ndbcluster.h
View file @
c19a8c06
...
...
@@ -59,6 +59,12 @@ typedef struct ndb_index_data {
bool
null_in_unique_index
;
}
NDB_INDEX_DATA
;
typedef
enum
ndb_write_op
{
NDB_INSERT
=
0
,
NDB_UPDATE
=
1
,
NDB_PK_UPDATE
=
2
}
NDB_WRITE_OP
;
typedef
struct
st_ndbcluster_share
{
THR_LOCK
lock
;
pthread_mutex_t
mutex
;
...
...
@@ -251,7 +257,7 @@ private:
const
NdbOperation
*
first
,
const
NdbOperation
*
last
,
uint
errcode
);
int
peek_indexed_rows
(
const
byte
*
record
,
bool
check_pk
);
int
peek_indexed_rows
(
const
byte
*
record
,
NDB_WRITE_OP
write_op
);
int
unique_index_read
(
const
byte
*
key
,
uint
key_len
,
byte
*
buf
);
int
ordered_index_scan
(
const
key_range
*
start_key
,
...
...
@@ -286,6 +292,7 @@ private:
int
get_ndb_blobs_value
(
NdbBlob
*
last_ndb_blob
,
my_ptrdiff_t
ptrdiff
);
int
set_primary_key
(
NdbOperation
*
op
,
const
byte
*
key
);
int
set_primary_key_from_record
(
NdbOperation
*
op
,
const
byte
*
record
);
bool
check_index_fields_in_write_set
(
uint
keyno
);
int
set_index_key_from_record
(
NdbOperation
*
op
,
const
byte
*
record
,
uint
keyno
);
int
set_bounds
(
NdbIndexScanOperation
*
,
const
key_range
*
keys
[
2
],
uint
=
0
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment