Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
59f9dcd5
Commit
59f9dcd5
authored
Mar 20, 2004
by
monty@mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge mysql.com:/home/my/mysql-4.0 into mysql.com:/home/my/mysql-4.1
parents
b6e5c23c
a8da91f9
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
66 additions
and
40 deletions
+66
-40
Build-tools/Do-pkg
Build-tools/Do-pkg
+11
-2
innobase/btr/btr0btr.c
innobase/btr/btr0btr.c
+30
-33
innobase/btr/btr0cur.c
innobase/btr/btr0cur.c
+20
-4
mysys/mf_soundex.c
mysys/mf_soundex.c
+4
-0
scripts/mysql_install_db.sh
scripts/mysql_install_db.sh
+1
-1
No files found.
Build-tools/Do-pkg
View file @
59f9dcd5
...
@@ -61,6 +61,12 @@ else
...
@@ -61,6 +61,12 @@ else
}
}
$PM
=
"
/Developer/Applications/PackageMaker.app/Contents/MacOS/PackageMaker
";
$PM
=
"
/Developer/Applications/PackageMaker.app/Contents/MacOS/PackageMaker
";
# Try another location on 10.3.3
unless
(
-
e
"
$PM
")
{
$PM
=
"
/Developer/Applications/Utilities/PackageMaker.app/Contents/MacOS/PackageMaker
";
}
$TMP
=
$ENV
{
TMPDIR
};
$TMP
=
$ENV
{
TMPDIR
};
$TMP
eq
""
?
$TMP
=
$TMP
.
"
/PKGBUILD.$$
":
$TMP
=
"
/tmp/PKGBUILD.$$
";
$TMP
eq
""
?
$TMP
=
$TMP
.
"
/PKGBUILD.$$
":
$TMP
=
"
/tmp/PKGBUILD.$$
";
$PKGROOT
=
"
$TMP
/PMROOT
";
$PKGROOT
=
"
$TMP
/PMROOT
";
...
@@ -79,6 +85,9 @@ $BUILDDIR= "$PWD/$HOST";
...
@@ -79,6 +85,9 @@ $BUILDDIR= "$PWD/$HOST";
$SRCBASEDIR
=
<
$BUILDDIR
/
mysql
*-
$VERSION
>
;
$SRCBASEDIR
=
<
$BUILDDIR
/
mysql
*-
$VERSION
>
;
$SUPFILEDIR
=
<
$SRCBASEDIR
/support-files/
MacOSX
>
;
$SUPFILEDIR
=
<
$SRCBASEDIR
/support-files/
MacOSX
>
;
$TAR
=
<
$BUILDDIR
/
$NAME
-
apple
-
darwin
*-
powerpc
.
tar
.
gz
>
;
$TAR
=
<
$BUILDDIR
/
$NAME
-
apple
-
darwin
*-
powerpc
.
tar
.
gz
>
;
$TAR
=~
/.*\/$NAME(.*)\.tar\.gz$/
;
$ARCH
=
$1
;
$NAME
=
$NAME
.
$ARCH
;
$INFO
=
<
$SUPFILEDIR
/
Info
.
plist
>
;
$INFO
=
<
$SUPFILEDIR
/
Info
.
plist
>
;
$DESC
=
<
$SUPFILEDIR
/
Description
.
plist
>
;
$DESC
=
<
$SUPFILEDIR
/
Description
.
plist
>
;
$SI_INFO
=
<
$SUPFILEDIR
/
StartupItem
.
Info
.
plist
>
;
$SI_INFO
=
<
$SUPFILEDIR
/
StartupItem
.
Info
.
plist
>
;
...
@@ -167,7 +176,7 @@ unless (-f "$RESOURCE_DIR/License.txt");
...
@@ -167,7 +176,7 @@ unless (-f "$RESOURCE_DIR/License.txt");
&
logger
("
Extracting
$TAR
to
$PKGROOT
");
&
logger
("
Extracting
$TAR
to
$PKGROOT
");
&
run_command
("
gnutar zxf
$TAR
-C
$PKGROOT
",
"
Unable to extract
$TAR
!
");
&
run_command
("
gnutar zxf
$TAR
-C
$PKGROOT
",
"
Unable to extract
$TAR
!
");
&
run_command
("
cd
$PKGROOT
; ln -s mysql* ./mysql
",
"
Unable to create symlink!
");
&
run_command
("
cd
$PKGROOT
; ln -s mysql* ./mysql
",
"
Unable to create symlink!
");
&
run_command
("
chown -R root
.
wheel
$PKGROOT
/*
",
"
Cannot chown
$PKGROOT
!
");
&
run_command
("
chown -R root
:
wheel
$PKGROOT
/*
",
"
Cannot chown
$PKGROOT
!
");
# Now build the PGK using PackageMaker
# Now build the PGK using PackageMaker
# The "|| true" is a nasty hack to work around a problem with Package Maker
# The "|| true" is a nasty hack to work around a problem with Package Maker
...
@@ -196,7 +205,7 @@ unless ($opt_skip_si)
...
@@ -196,7 +205,7 @@ unless ($opt_skip_si)
copy
("
$SI_PARAMS
",
"
$PKGROOT
/MySQL/
")
copy
("
$SI_PARAMS
",
"
$PKGROOT
/MySQL/
")
or
&
abort
("
Error copying
$SI_PARAMS
!
");
or
&
abort
("
Error copying
$SI_PARAMS
!
");
chmod
(
0644
,
"
$PKGROOT
/MySQL/
"
.
basename
("
$SI_PARAMS
"));
chmod
(
0644
,
"
$PKGROOT
/MySQL/
"
.
basename
("
$SI_PARAMS
"));
&
run_command
("
chown -R root
.
wheel
$PKGROOT
/*
",
"
Cannot chown
$PKGROOT
!
");
&
run_command
("
chown -R root
:
wheel
$PKGROOT
/*
",
"
Cannot chown
$PKGROOT
!
");
copy
("
$SI_POST
",
"
$RESOURCE_DIR
/postinstall
")
copy
("
$SI_POST
",
"
$RESOURCE_DIR
/postinstall
")
or
&
abort
("
Error copying
$SI_POST
!
");
or
&
abort
("
Error copying
$SI_POST
!
");
chmod
(
0644
,
"
$RESOURCE_DIR
/postinstall
");
chmod
(
0644
,
"
$RESOURCE_DIR
/postinstall
");
...
...
innobase/btr/btr0btr.c
View file @
59f9dcd5
...
@@ -76,9 +76,6 @@ make them consecutive on disk if possible. From the other file segment
...
@@ -76,9 +76,6 @@ make them consecutive on disk if possible. From the other file segment
we allocate pages for the non-leaf levels of the tree.
we allocate pages for the non-leaf levels of the tree.
*/
*/
/* If this many inserts occur sequentially, it affects page split */
#define BTR_PAGE_SEQ_INSERT_LIMIT 5
/******************************************************************
/******************************************************************
Creates a new index page to the tree (not the root, and also not
Creates a new index page to the tree (not the root, and also not
used in page reorganization). */
used in page reorganization). */
...
@@ -1089,18 +1086,18 @@ btr_page_get_split_rec_to_left(
...
@@ -1089,18 +1086,18 @@ btr_page_get_split_rec_to_left(
page
=
btr_cur_get_page
(
cursor
);
page
=
btr_cur_get_page
(
cursor
);
insert_point
=
btr_cur_get_rec
(
cursor
);
insert_point
=
btr_cur_get_rec
(
cursor
);
if
((
page_header_get_ptr
(
page
,
PAGE_LAST_INSERT
)
if
(
page_header_get_ptr
(
page
,
PAGE_LAST_INSERT
)
==
page_rec_get_next
(
insert_point
))
==
page_rec_get_next
(
insert_point
))
{
&&
(
page_header_get_field
(
page
,
PAGE_DIRECTION
)
==
PAGE_LEFT
)
&&
((
page_header_get_field
(
page
,
PAGE_N_DIRECTION
)
>=
BTR_PAGE_SEQ_INSERT_LIMIT
)
||
(
page_header_get_field
(
page
,
PAGE_N_DIRECTION
)
+
1
>=
page_get_n_recs
(
page
))))
{
infimum
=
page_get_infimum_rec
(
page
);
infimum
=
page_get_infimum_rec
(
page
);
if
((
infimum
!=
insert_point
)
/* If the convergence is in the middle of a page, include also
&&
(
page_rec_get_next
(
infimum
)
!=
insert_point
))
{
the record immediately before the new insert to the upper
page. Otherwise, we could repeatedly move from page to page
lots of records smaller than the convergence point. */
if
(
infimum
!=
insert_point
&&
page_rec_get_next
(
infimum
)
!=
insert_point
)
{
*
split_rec
=
insert_point
;
*
split_rec
=
insert_point
;
}
else
{
}
else
{
...
@@ -1134,29 +1131,29 @@ btr_page_get_split_rec_to_right(
...
@@ -1134,29 +1131,29 @@ btr_page_get_split_rec_to_right(
page
=
btr_cur_get_page
(
cursor
);
page
=
btr_cur_get_page
(
cursor
);
insert_point
=
btr_cur_get_rec
(
cursor
);
insert_point
=
btr_cur_get_rec
(
cursor
);
if
((
page_header_get_ptr
(
page
,
PAGE_LAST_INSERT
)
==
insert_point
)
/* We use eager heuristics: if the new insert would be right after
&&
(
page_header_get_field
(
page
,
PAGE_DIRECTION
)
==
PAGE_RIGHT
)
the previous insert on the same page, we assume that there is a
&&
((
page_header_get_field
(
page
,
PAGE_N_DIRECTION
)
pattern of sequential inserts here. */
>=
BTR_PAGE_SEQ_INSERT_LIMIT
)
||
(
page_header_get_field
(
page
,
PAGE_N_DIRECTION
)
+
1
if
(
page_header_get_ptr
(
page
,
PAGE_LAST_INSERT
)
==
insert_point
)
{
>=
page_get_n_recs
(
page
))))
{
supremum
=
page_get_supremum_rec
(
page
);
supremum
=
page_get_supremum_rec
(
page
);
if
((
page_rec_get_next
(
insert_point
)
!=
supremum
)
if
(
page_rec_get_next
(
insert_point
)
!=
supremum
&&
(
page_rec_get_next
(
page_rec_get_next
(
insert_point
))
&&
page_rec_get_next
(
page_rec_get_next
(
insert_point
))
!=
supremum
)
!=
supremum
)
{
&&
(
page_rec_get_next
(
page_rec_get_next
(
page_rec_get_next
(
insert_point
)))
!=
supremum
))
{
/* If there are >= 3 user records up from the insert
/* If there are >= 2 user records up from the insert
point, split all but 2 off */
point, split all but 1 off. We want to keep one because
then sequential inserts can use the adaptive hash
index, as they can do the necessary checks of the right
search position just by looking at the records on this
page. */
*
split_rec
=
page_rec_get_next
(
page_rec_get_next
(
*
split_rec
=
page_rec_get_next
(
page_rec_get_next
(
insert_point
))
)
;
page_rec_get_next
(
insert_point
));
}
else
{
}
else
{
/* Else split at
inserted record
*/
/* Else split at
the new record to insert
*/
*
split_rec
=
NULL
;
*
split_rec
=
NULL
;
}
}
...
...
innobase/btr/btr0cur.c
View file @
59f9dcd5
...
@@ -2682,10 +2682,11 @@ btr_estimate_number_of_different_key_vals(
...
@@ -2682,10 +2682,11 @@ btr_estimate_number_of_different_key_vals(
btr_cur_open_at_rnd_pos
(
index
,
BTR_SEARCH_LEAF
,
&
cursor
,
&
mtr
);
btr_cur_open_at_rnd_pos
(
index
,
BTR_SEARCH_LEAF
,
&
cursor
,
&
mtr
);
/* Count the number of different key values minus one
/* Count the number of different key values for each prefix of
for each prefix of the key on this index page: we subtract
the key on this index page. If the prefix does not determine
one because otherwise our algorithm would give a wrong
the index record uniquely in te B-tree, then we subtract one
estimate for an index where there is just one key value */
because otherwise our algorithm would give a wrong estimate
for an index where there is just one key value. */
page
=
btr_cur_get_page
(
&
cursor
);
page
=
btr_cur_get_page
(
&
cursor
);
...
@@ -2707,6 +2708,9 @@ btr_estimate_number_of_different_key_vals(
...
@@ -2707,6 +2708,9 @@ btr_estimate_number_of_different_key_vals(
&
matched_bytes
);
&
matched_bytes
);
for
(
j
=
matched_fields
+
1
;
j
<=
n_cols
;
j
++
)
{
for
(
j
=
matched_fields
+
1
;
j
<=
n_cols
;
j
++
)
{
/* We add one if this index record has
a different prefix from the previous */
n_diff
[
j
]
++
;
n_diff
[
j
]
++
;
}
}
...
@@ -2716,6 +2720,18 @@ btr_estimate_number_of_different_key_vals(
...
@@ -2716,6 +2720,18 @@ btr_estimate_number_of_different_key_vals(
rec
=
page_rec_get_next
(
rec
);
rec
=
page_rec_get_next
(
rec
);
}
}
if
(
n_cols
==
dict_index_get_n_unique_in_tree
(
index
))
{
/* We add one because we know that the first record
on the page certainly had a different prefix than the
last record on the previous index page in the
alphabetical order. Before this fix, if there was
just one big record on each clustered index page, the
algorithm grossly underestimated the number of rows
in the table. */
n_diff
[
n_cols
]
++
;
}
total_external_size
+=
total_external_size
+=
btr_rec_get_externally_stored_len
(
rec
);
btr_rec_get_externally_stored_len
(
rec
);
mtr_commit
(
&
mtr
);
mtr_commit
(
&
mtr
);
...
...
mysys/mf_soundex.c
View file @
59f9dcd5
...
@@ -28,6 +28,10 @@
...
@@ -28,6 +28,10 @@
* *
* *
* As an extension if remove_garbage is set then all non- *
* As an extension if remove_garbage is set then all non- *
* alpha characters are skipped *
* alpha characters are skipped *
* *
* Note, that this implementation corresponds to the *
* original version of the algorithm, not to the more *
* popular "enhanced" version, described by Knuth. *
****************************************************************/
****************************************************************/
#include "mysys_priv.h"
#include "mysys_priv.h"
...
...
scripts/mysql_install_db.sh
View file @
59f9dcd5
...
@@ -151,7 +151,7 @@ then
...
@@ -151,7 +151,7 @@ then
if
[
$?
-ne
0
]
if
[
$?
-ne
0
]
then
then
resolved
=
`
$bindir
/resolveip localhost 2>&1
`
resolved
=
`
$bindir
/resolveip localhost 2>&1
`
if
[
$?
-
eq
0
]
if
[
$?
-
ne
0
]
then
then
echo
"Neither host '
$hostname
' and 'localhost' could not be looked up with"
echo
"Neither host '
$hostname
' and 'localhost' could not be looked up with"
echo
"
$bindir
/resolveip"
echo
"
$bindir
/resolveip"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment