Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
0153488a
Commit
0153488a
authored
Jan 19, 2004
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge bk-internal.mysql.com:/home/bk/mysql-4.0
into mysql.com:/home/my/mysql-4.0
parents
36d82d18
45591dec
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
155 additions
and
35 deletions
+155
-35
myisam/mi_dynrec.c
myisam/mi_dynrec.c
+95
-33
mysql-test/r/myisam-blob.result
mysql-test/r/myisam-blob.result
+27
-0
mysql-test/t/myisam-blob-master.opt
mysql-test/t/myisam-blob-master.opt
+1
-0
mysql-test/t/myisam-blob.test
mysql-test/t/myisam-blob.test
+30
-0
sql/sql_select.cc
sql/sql_select.cc
+2
-2
No files found.
myisam/mi_dynrec.c
View file @
0153488a
...
...
@@ -14,7 +14,15 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Functions to handle space-packed-records and blobs */
/*
Functions to handle space-packed-records and blobs
A row may be stored in one or more linked blocks.
The block size is between MI_MIN_BLOCK_LENGTH and MI_MAX_BLOCK_LENGTH.
Each block is aligned on MI_DYN_ALIGN_SIZE.
The reson for the max block size is to not have too many different types
of blocks. For the differnet block types, look at _mi_get_block_info()
*/
#include "myisamdef.h"
#include <assert.h>
...
...
@@ -264,37 +272,62 @@ static bool unlink_deleted_block(MI_INFO *info, MI_BLOCK_INFO *block_info)
DBUG_RETURN
(
0
);
}
/* Delete datarecord from database */
/* info->rec_cache.seek_not_done is updated in cmp_record */
static
int
delete_dynamic_record
(
MI_INFO
*
info
,
my_off_t
filepos
,
uint
second_read
)
/*
Add a backward link to delete block
SYNOPSIS
update_backward_delete_link()
info MyISAM handler
delete_block Position to delete block to update.
If this is 'HA_OFFSET_ERROR', nothing will be done
filepos Position to block that 'delete_block' should point to
RETURN
0 ok
1 error. In this case my_error is set.
*/
static
int
update_backward_delete_link
(
MI_INFO
*
info
,
my_off_t
delete_block
,
my_off_t
filepos
)
{
uint
length
,
b_type
;
MI_BLOCK_INFO
block_info
,
del_block
;
int
error
=
0
;
my_bool
remove_next_block
;
DBUG_ENTER
(
"delete_dynamic_record"
);
MI_BLOCK_INFO
block_info
;
DBUG_ENTER
(
"update_backward_delete_link"
);
/* First add a link from the last block to the new one */
if
(
info
->
s
->
state
.
dellink
!=
HA_OFFSET_ERROR
)
if
(
delete_block
!=
HA_OFFSET_ERROR
)
{
block_info
.
second_read
=
0
;
if
(
_mi_get_block_info
(
&
block_info
,
info
->
dfile
,
info
->
s
->
state
.
dellin
k
)
if
(
_mi_get_block_info
(
&
block_info
,
info
->
dfile
,
delete_bloc
k
)
&
BLOCK_DELETED
)
{
char
buff
[
8
];
mi_sizestore
(
buff
,
filepos
);
if
(
my_pwrite
(
info
->
dfile
,
buff
,
8
,
info
->
s
->
state
.
dellink
+
12
,
MYF
(
MY_NABP
)))
error
=
1
;
/* Error on write */
if
(
my_pwrite
(
info
->
dfile
,
buff
,
8
,
delete_block
+
12
,
MYF
(
MY_NABP
)))
DBUG_RETURN
(
1
);
/* Error on write */
}
else
{
error
=
1
;
/* Wrong delete link */
my_errno
=
HA_ERR_WRONG_IN_RECORD
;
DBUG_RETURN
(
1
);
/* Wrong delete link */
}
}
return
0
;
}
/* Delete datarecord from database */
/* info->rec_cache.seek_not_done is updated in cmp_record */
static
int
delete_dynamic_record
(
MI_INFO
*
info
,
my_off_t
filepos
,
uint
second_read
)
{
uint
length
,
b_type
;
MI_BLOCK_INFO
block_info
,
del_block
;
int
error
;
my_bool
remove_next_block
;
DBUG_ENTER
(
"delete_dynamic_record"
);
/* First add a link from the last block to the new one */
error
=
update_backward_delete_link
(
info
,
info
->
s
->
state
.
dellink
,
filepos
);
block_info
.
second_read
=
second_read
;
do
...
...
@@ -518,21 +551,11 @@ int _mi_write_part_record(MI_INFO *info,
*
reclength
-=
(
length
-
head_length
);
*
flag
=
6
;
if
(
del_length
&&
next_delete_block
!=
HA_OFFSET_ERROR
)
if
(
del_length
)
{
/* link the next delete block to this */
MI_BLOCK_INFO
del_block
;
del_block
.
second_read
=
0
;
if
(
!
(
_mi_get_block_info
(
&
del_block
,
info
->
dfile
,
next_delete_block
)
&
BLOCK_DELETED
))
{
my_errno
=
HA_ERR_WRONG_IN_RECORD
;
goto
err
;
}
mi_sizestore
(
del_block
.
header
+
12
,
info
->
s
->
state
.
dellink
);
if
(
my_pwrite
(
info
->
dfile
,(
char
*
)
del_block
.
header
+
12
,
8
,
next_delete_block
+
12
,
MYF
(
MY_NABP
)))
if
(
update_backward_delete_link
(
info
,
next_delete_block
,
info
->
s
->
state
.
dellink
))
goto
err
;
}
...
...
@@ -574,6 +597,8 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record,
{
uint
tmp
=
MY_ALIGN
(
reclength
-
length
+
3
+
test
(
reclength
>=
65520L
),
MI_DYN_ALIGN_SIZE
);
/* Don't create a block bigger than MI_MAX_BLOCK_LENGTH */
tmp
=
min
(
length
+
tmp
,
MI_MAX_BLOCK_LENGTH
)
-
length
;
/* Check if we can extend this block */
if
(
block_info
.
filepos
+
block_info
.
block_len
==
info
->
state
->
data_file_length
&&
...
...
@@ -588,9 +613,15 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record,
info
->
update
|=
HA_STATE_WRITE_AT_END
|
HA_STATE_EXTEND_BLOCK
;
length
+=
tmp
;
}
else
else
if
(
length
<
MI_MAX_BLOCK_LENGTH
-
MI_MIN_BLOCK_LENGTH
)
{
/* Check if next block is a deleted block */
/*
Check if next block is a deleted block
Above we have MI_MIN_BLOCK_LENGTH to avoid the problem where
the next block is so small it can't be splited which could
casue problems
*/
MI_BLOCK_INFO
del_block
;
del_block
.
second_read
=
0
;
if
(
_mi_get_block_info
(
&
del_block
,
info
->
dfile
,
...
...
@@ -601,7 +632,35 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record,
DBUG_PRINT
(
"info"
,(
"Extending current block"
));
if
(
unlink_deleted_block
(
info
,
&
del_block
))
goto
err
;
length
+=
del_block
.
block_len
;
if
((
length
+=
del_block
.
block_len
)
>
MI_MAX_BLOCK_LENGTH
)
{
/*
New block was too big, link overflow part back to
delete list
*/
my_off_t
next_pos
;
ulong
rest_length
=
length
-
MI_MAX_BLOCK_LENGTH
;
set_if_bigger
(
rest_length
,
MI_MIN_BLOCK_LENGTH
);
next_pos
=
del_block
.
filepos
+
del_block
.
block_len
-
rest_length
;
if
(
update_backward_delete_link
(
info
,
info
->
s
->
state
.
dellink
,
next_pos
))
DBUG_RETURN
(
1
);
/* create delete link for data that didn't fit into the page */
del_block
.
header
[
0
]
=
0
;
mi_int3store
(
del_block
.
header
+
1
,
rest_length
);
mi_sizestore
(
del_block
.
header
+
4
,
info
->
s
->
state
.
dellink
);
bfill
(
del_block
.
header
+
12
,
8
,
255
);
if
(
my_pwrite
(
info
->
dfile
,(
byte
*
)
del_block
.
header
,
20
,
next_pos
,
MYF
(
MY_NABP
)))
DBUG_RETURN
(
1
);
info
->
s
->
state
.
dellink
=
next_pos
;
info
->
s
->
state
.
split
++
;
info
->
state
->
del
++
;
info
->
state
->
empty
+=
rest_length
;
length
-=
rest_length
;
}
}
}
}
...
...
@@ -615,7 +674,10 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record,
&
record
,
&
reclength
,
&
flag
))
goto
err
;
if
((
filepos
=
block_info
.
next_filepos
)
==
HA_OFFSET_ERROR
)
{
/* Start writing data on deleted blocks */
filepos
=
info
->
s
->
state
.
dellink
;
}
}
if
(
block_info
.
next_filepos
!=
HA_OFFSET_ERROR
)
...
...
mysql-test/r/myisam-blob.result
0 → 100644
View file @
0153488a
drop table if exists t1;
CREATE TABLE t1 (data LONGBLOB) ENGINE=myisam;
INSERT INTO t1 (data) VALUES (NULL);
UPDATE t1 set data=repeat('a',18*1024*1024);
select length(data) from t1;
length(data)
18874368
delete from t1 where left(data,1)='a';
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
truncate table t1;
INSERT INTO t1 (data) VALUES (repeat('a',1*1024*1024));
INSERT INTO t1 (data) VALUES (repeat('b',16*1024*1024-1024));
delete from t1 where left(data,1)='b';
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
UPDATE t1 set data=repeat('c',17*1024*1024);
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
delete from t1 where left(data,1)='c';
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
drop table t1;
mysql-test/t/myisam-blob-master.opt
0 → 100644
View file @
0153488a
--max-allowed-packet=24M --skip-innodb --key-buffer-size=1M
mysql-test/t/myisam-blob.test
0 → 100644
View file @
0153488a
#
# Test bugs in the MyISAM code with blobs
#
--
disable_warnings
drop
table
if
exists
t1
;
--
enable_warnings
# Bug #2159 (Problem with update of blob to > 16M)
CREATE
TABLE
t1
(
data
LONGBLOB
)
ENGINE
=
myisam
;
INSERT
INTO
t1
(
data
)
VALUES
(
NULL
);
UPDATE
t1
set
data
=
repeat
(
'a'
,
18
*
1024
*
1024
);
select
length
(
data
)
from
t1
;
delete
from
t1
where
left
(
data
,
1
)
=
'a'
;
check
table
t1
;
truncate
table
t1
;
INSERT
INTO
t1
(
data
)
VALUES
(
repeat
(
'a'
,
1
*
1024
*
1024
));
INSERT
INTO
t1
(
data
)
VALUES
(
repeat
(
'b'
,
16
*
1024
*
1024
-
1024
));
delete
from
t1
where
left
(
data
,
1
)
=
'b'
;
check
table
t1
;
# now we have two blocks in the table, first is a 1M record and second is
# a 16M delete block.
UPDATE
t1
set
data
=
repeat
(
'c'
,
17
*
1024
*
1024
);
check
table
t1
;
delete
from
t1
where
left
(
data
,
1
)
=
'c'
;
check
table
t1
;
drop
table
t1
;
sql/sql_select.cc
View file @
0153488a
...
...
@@ -1475,8 +1475,8 @@ add_key_field(KEY_FIELD **key_fields,uint and_level,
bool
optimizable
=
0
;
for
(
uint
i
=
0
;
i
<
num_values
;
i
++
)
{
used_tables
|=
(
*
value
)
->
used_tables
();
if
(
!
((
*
value
)
->
used_tables
()
&
(
field
->
table
->
map
|
RAND_TABLE_BIT
)))
used_tables
|=
(
value
[
i
]
)
->
used_tables
();
if
(
!
((
value
[
i
]
)
->
used_tables
()
&
(
field
->
table
->
map
|
RAND_TABLE_BIT
)))
optimizable
=
1
;
}
if
(
!
optimizable
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment