Commit c9e2debf authored by unknown's avatar unknown

Merge work:/home/bk/mysql-4.0 into hundin.mysql.fi:/my/bk/mysql-4.0


Docs/manual.texi:
  Auto merged
sql/mysqld.cc:
  Auto merged
parents ae686937 b87029d8
This diff is collapsed.
......@@ -21,7 +21,7 @@ Created 6/2/1994 Heikki Tuuri
#include "lock0lock.h"
#include "ibuf0ibuf.h"
/**
/*
Node pointers
-------------
Leaf pages of a B-tree contain the index records stored in the
......@@ -550,14 +550,15 @@ btr_page_get_father_for_rec(
ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),
MTR_MEMO_X_LOCK));
ut_ad(user_rec != page_get_supremum_rec(page));
ut_ad(user_rec != page_get_infimum_rec(page));
ut_a(user_rec != page_get_supremum_rec(page));
ut_a(user_rec != page_get_infimum_rec(page));
ut_ad(dict_tree_get_page(tree) != buf_frame_get_page_no(page));
heap = mem_heap_create(100);
tuple = dict_tree_build_node_ptr(tree, user_rec, 0, heap);
tuple = dict_tree_build_node_ptr(tree, user_rec, 0, heap,
btr_page_get_level(page, mtr));
/* In the following, we choose just any index from the tree as the
first parameter for btr_cur_search_to_nth_level. */
......@@ -569,7 +570,7 @@ btr_page_get_father_for_rec(
node_ptr = btr_cur_get_rec(&cursor);
ut_ad(btr_node_ptr_get_child_page_no(node_ptr) ==
ut_a(btr_node_ptr_get_child_page_no(node_ptr) ==
buf_frame_get_page_no(page));
mem_heap_free(heap);
......@@ -949,8 +950,8 @@ btr_root_raise_and_insert(
/* Build the node pointer (= node key and page address) for the
child */
node_ptr = dict_tree_build_node_ptr(tree, rec, new_page_no, heap);
node_ptr = dict_tree_build_node_ptr(tree, rec, new_page_no, heap,
level);
/* Reorganize the root to get free space */
btr_page_reorganize(root, mtr);
......@@ -1365,7 +1366,7 @@ btr_attach_half_pages(
half */
node_ptr_upper = dict_tree_build_node_ptr(tree, split_rec,
upper_page_no, heap);
upper_page_no, heap, level);
/* Insert it next to the pointer to the lower half. Note that this
may generate recursion leading to a split on the higher level. */
......@@ -2230,7 +2231,7 @@ btr_check_node_ptr(
node_ptr_tuple = dict_tree_build_node_ptr(
tree,
page_rec_get_next(page_get_infimum_rec(page)),
0, heap);
0, heap, btr_page_get_level(page, mtr));
ut_a(cmp_dtuple_rec(node_ptr_tuple, node_ptr) == 0);
......@@ -2485,10 +2486,11 @@ loop:
heap = mem_heap_create(256);
node_ptr_tuple = dict_tree_build_node_ptr(
tree,
tree,
page_rec_get_next(
page_get_infimum_rec(page)),
0, heap);
0, heap,
btr_page_get_level(page, &mtr));
if (cmp_dtuple_rec(node_ptr_tuple, node_ptr) != 0) {
......
......@@ -2345,9 +2345,9 @@ btr_cur_pessimistic_delete(
heap = mem_heap_create(256);
node_ptr = dict_tree_build_node_ptr(
tree, page_rec_get_next(rec),
buf_frame_get_page_no(page),
heap);
tree, page_rec_get_next(rec),
buf_frame_get_page_no(page),
heap, btr_page_get_level(page, mtr));
btr_insert_on_non_leaf_level(tree,
btr_page_get_level(page, mtr) + 1,
......
......@@ -138,15 +138,11 @@ buf_flush_ready_for_flush(
return(TRUE);
} else if ((block->old || (UT_LIST_GET_LEN(buf_pool->LRU)
< BUF_LRU_OLD_MIN_LEN))
&& (block->buf_fix_count == 0)) {
} else if (block->buf_fix_count == 0) {
/* If we are flushing the LRU list, to avoid deadlocks
we require the block not to be bufferfixed, and hence
not latched. Since LRU flushed blocks are soon moved
to the free list, it is good to flush only old blocks
from the end of the LRU list. */
not latched. */
return(TRUE);
}
......@@ -560,6 +556,15 @@ buf_flush_try_neighbors(
block = buf_page_hash_get(space, i);
if (block && flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
continue;
}
if (block && buf_flush_ready_for_flush(block, flush_type)) {
mutex_exit(&(buf_pool->mutex));
......
......@@ -2415,7 +2415,9 @@ dict_tree_build_node_ptr(
dict_tree_t* tree, /* in: index tree */
rec_t* rec, /* in: record for which to build node pointer */
ulint page_no,/* in: page number to put in node pointer */
mem_heap_t* heap) /* in: memory heap where pointer created */
mem_heap_t* heap, /* in: memory heap where pointer created */
ulint level) /* in: level of rec in tree: 0 means leaf
level */
{
dtuple_t* tuple;
dict_index_t* ind;
......@@ -2427,9 +2429,16 @@ dict_tree_build_node_ptr(
if (tree->type & DICT_UNIVERSAL) {
/* In a universal index tree, we take the whole record as
the node pointer */
the node pointer if the reord is on the leaf level,
on non-leaf levels we remove the last field, which
contains the page number of the child page */
n_unique = rec_get_n_fields(rec);
if (level > 0) {
ut_a(n_unique > 1);
n_unique--;
}
} else {
n_unique = dict_index_get_n_unique_in_tree(ind);
}
......
......@@ -622,7 +622,9 @@ dict_tree_build_node_ptr(
dict_tree_t* tree, /* in: index tree */
rec_t* rec, /* in: record for which to build node pointer */
ulint page_no,/* in: page number to put in node pointer */
mem_heap_t* heap); /* in: memory heap where pointer created */
mem_heap_t* heap, /* in: memory heap where pointer created */
ulint level); /* in: level of rec in tree: 0 means leaf
level */
/**************************************************************************
Copies an initial segment of a physical record, long enough to specify an
index entry uniquely. */
......
......@@ -68,3 +68,12 @@ select * from t2 where b="world";
a B
3 world
drop table t1,t2;
create table t1(x varchar(50) );
create table t2 select x from t1 where 1=2;
describe t1;
Field Type Null Key Default Extra
x varchar(50) YES NULL
describe t2;
Field Type Null Key Default Extra
x char(50) YES NULL
drop table t1,t2;
......@@ -31,3 +31,20 @@ check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
drop table t1;
create table t1 (a int not null auto_increment, b int not null, primary key (a), index(b));
insert into t1 (b) values (1),(2),(2),(2),(2);
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Comment
t1 0 PRIMARY 1 a A 5 NULL NULL
t1 1 b 1 b A 1 NULL NULL
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status Table is already up to date
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Comment
t1 0 PRIMARY 1 a A 5 NULL NULL
t1 1 b 1 b A 1 NULL NULL
drop table t1;
......@@ -65,3 +65,13 @@ create table t2 (key (b)) select * from t1;
explain select * from t2 where b="world";
select * from t2 where b="world";
drop table t1,t2;
#
# Test types after CREATE ... SELECT
#
create table t1(x varchar(50) );
create table t2 select x from t1 where 1=2;
describe t1;
describe t2;
drop table t1,t2;
......@@ -38,3 +38,15 @@ check table t1;
repair table t1;
check table t1;
drop table t1;
#
# Test bug: Two optimize in a row reset index cardinality
#
create table t1 (a int not null auto_increment, b int not null, primary key (a), index(b));
insert into t1 (b) values (1),(2),(2),(2),(2);
optimize table t1;
show index from t1;
optimize table t1;
show index from t1;
drop table t1;
......@@ -541,7 +541,7 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
{
int error=0;
uint extra_testflag=0;
uint local_testflag=param.testflag;
bool optimize_done= !optimize, statistics_done=0;
const char *old_proc_info=thd->proc_info;
char fixed_name[FN_REFLEN];
......@@ -570,19 +570,18 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
(!param.opt_rep_quick ||
!(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))))
{
ulonglong key_map= ((param.testflag & T_CREATE_MISSING_KEYS) ?
ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
((ulonglong) 1L << share->base.keys)-1 :
share->state.key_map);
uint testflag=param.testflag;
if (mi_test_if_sort_rep(file,file->state->records,key_map,0) &&
(param.testflag & T_REP_BY_SORT))
(local_testflag & T_REP_BY_SORT))
{
uint testflag=param.testflag;
extra_testflag= T_STATISTICS;
local_testflag|= T_STATISTICS;
param.testflag|= T_STATISTICS; // We get this for free
thd->proc_info="Repair by sorting";
statistics_done=1;
error = mi_repair_by_sort(&param, file, fixed_name, param.opt_rep_quick);
param.testflag=testflag;
}
else
{
......@@ -590,22 +589,28 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
param.testflag &= ~T_REP_BY_SORT;
error= mi_repair(&param, file, fixed_name, param.opt_rep_quick);
}
param.testflag=testflag;
optimize_done=1;
}
if (!error)
{
if ((param.testflag & T_SORT_INDEX) &&
if ((local_testflag & T_SORT_INDEX) &&
(share->state.changed & STATE_NOT_SORTED_PAGES))
{
optimize_done=1;
thd->proc_info="Sorting index";
error=mi_sort_index(&param,file,fixed_name);
}
if (!statistics_done && (param.testflag & T_STATISTICS) &&
(share->state.changed & STATE_NOT_ANALYZED))
if (!statistics_done && (local_testflag & T_STATISTICS))
{
optimize_done=1;
thd->proc_info="Analyzing";
error = chk_key(&param, file);
if (share->state.changed & STATE_NOT_ANALYZED)
{
optimize_done=1;
thd->proc_info="Analyzing";
error = chk_key(&param, file);
}
else
local_testflag&= ~T_STATISTICS; // Don't update statistics
}
}
thd->proc_info="Saving state";
......@@ -620,10 +625,11 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
file->save_state=file->s->state.state;
if (file->s->base.auto_key)
update_auto_increment_key(&param, file, 1);
error = update_state_info(&param, file,
UPDATE_TIME | UPDATE_OPEN_COUNT |
((param.testflag | extra_testflag) &
T_STATISTICS ? UPDATE_STAT : 0));
if (optimize_done)
error = update_state_info(&param, file,
UPDATE_TIME | UPDATE_OPEN_COUNT |
(local_testflag &
T_STATISTICS ? UPDATE_STAT : 0));
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
if (rows != file->state->records && ! (param.testflag & T_VERY_SILENT))
......
......@@ -621,8 +621,8 @@ double Item_func_rand::val()
{
if (arg_count)
{ // Only use argument once in query
ulong tmp=((ulong) args[0]->val_int())+55555555L;
randominit(&current_thd->rand,tmp,tmp/2);
ulong tmp=((ulong) args[0]->val_int());
randominit(&current_thd->rand,tmp*0x10001L+55555555L,tmp*0x10000001L);
#ifdef DELETE_ITEMS
delete args[0];
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment