Commit 7adae6b3 authored by unknown's avatar unknown

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0

into poseidon.ndb.mysql.com:/home/tomas/mysql-5.0-ndb

parents a66165ce fe3a425a
......@@ -44,7 +44,7 @@
#include <locale.h>
#endif
const char *VER= "14.8";
const char *VER= "14.9";
/* Don't try to make a nice table if the data is too big */
#define MAX_COLUMN_LENGTH 1024
......@@ -1045,7 +1045,12 @@ static COMMANDS *find_command (char *name,char cmd_char)
{
while (my_isspace(charset_info,*name))
name++;
if (strstr(name, delimiter) || strstr(name, "\\g"))
/*
As special case we allow row that starts with word delimiter
to be able to change delimiter if someone has delimiter 'delimiter'.
*/
if (strstr(name, "\\g") || (strstr(name, delimiter) &&
strncmp(name, "delimiter", 9)))
return ((COMMANDS *) 0);
if ((end=strcont(name," \t")))
{
......
......@@ -279,7 +279,6 @@ enum loglevel {
enum cache_type
{
READ_CACHE,WRITE_CACHE,
APPEND_CACHE, /* Like WRITE_CACHE, but only append */
SEQ_READ_APPEND /* sequential read or append */,
READ_FIFO, READ_NET,WRITE_NET};
......
......@@ -79,7 +79,11 @@ row_vers_build_for_consistent_read(
mtr_t* mtr, /* in: mtr holding the latch on rec; it will
also hold the latch on purge_view */
dict_index_t* index, /* in: the clustered index */
ulint** offsets,/* in/out: offsets returned by
rec_get_offsets(rec, index) */
read_view_t* view, /* in: the consistent read view */
mem_heap_t** offset_heap,/* in/out: memory heap from which
the offsets are allocated */
mem_heap_t* in_heap,/* in: memory heap from which the memory for
old_vers is allocated; memory for possible
intermediate versions is allocated and freed
......
......@@ -93,7 +93,7 @@ extern ulint srv_max_n_open_files;
extern ulint srv_max_dirty_pages_pct;
extern ulint srv_force_recovery;
extern ulint srv_thread_concurrency;
extern ulong srv_thread_concurrency;
extern ulint srv_max_n_threads;
......@@ -112,6 +112,7 @@ extern ibool srv_use_checksums;
extern ibool srv_set_thread_priorities;
extern int srv_query_thread_priority;
extern ulong srv_max_buf_pool_modified_pct;
extern ulong srv_max_purge_lag;
extern ibool srv_use_awe;
extern ibool srv_use_adaptive_hash_indexes;
......@@ -131,9 +132,9 @@ extern ibool srv_print_innodb_table_monitor;
extern ibool srv_lock_timeout_and_monitor_active;
extern ibool srv_error_monitor_active;
extern ulint srv_n_spin_wait_rounds;
extern ulint srv_n_free_tickets_to_enter;
extern ulint srv_thread_sleep_delay;
extern ulong srv_n_spin_wait_rounds;
extern ulong srv_n_free_tickets_to_enter;
extern ulong srv_thread_sleep_delay;
extern ulint srv_spin_wait_delay;
extern ibool srv_priority_boost;
......
......@@ -510,6 +510,10 @@ row_sel_build_prev_vers(
read_view_t* read_view, /* in: read view */
plan_t* plan, /* in: plan node for table */
rec_t* rec, /* in: record in a clustered index */
ulint** offsets, /* in/out: offsets returned by
rec_get_offsets(rec, plan->index) */
mem_heap_t** offset_heap, /* in/out: memory heap from which
the offsets are allocated */
rec_t** old_vers, /* out: old version, or NULL if the
record does not exist in the view:
i.e., it was freshly inserted
......@@ -525,8 +529,8 @@ row_sel_build_prev_vers(
}
err = row_vers_build_for_consistent_read(rec, mtr, plan->index,
read_view, plan->old_vers_heap,
old_vers);
offsets, read_view, offset_heap,
plan->old_vers_heap, old_vers);
return(err);
}
......@@ -697,7 +701,8 @@ row_sel_get_clust_rec(
node->read_view)) {
err = row_sel_build_prev_vers(node->read_view, plan,
clust_rec, &old_vers, mtr);
clust_rec, &offsets, &heap,
&old_vers, mtr);
if (err != DB_SUCCESS) {
goto err_exit;
......@@ -1396,14 +1401,18 @@ rec_loop:
node->read_view)) {
err = row_sel_build_prev_vers(node->read_view,
plan, rec, &old_vers,
&mtr);
plan, rec,
&offsets, &heap,
&old_vers, &mtr);
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
}
if (old_vers == NULL) {
offsets = rec_get_offsets(
rec, index, offsets,
ULINT_UNDEFINED, &heap);
row_sel_fetch_columns(index, rec,
offsets,
UT_LIST_GET_FIRST(plan->columns));
......@@ -1417,8 +1426,6 @@ rec_loop:
}
rec = old_vers;
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
}
} else if (!lock_sec_rec_cons_read_sees(rec, index,
node->read_view)) {
......@@ -2535,6 +2542,10 @@ row_sel_build_prev_vers_for_mysql(
dict_index_t* clust_index, /* in: clustered index */
row_prebuilt_t* prebuilt, /* in: prebuilt struct */
rec_t* rec, /* in: record in a clustered index */
ulint** offsets, /* in/out: offsets returned by
rec_get_offsets(rec, clust_index) */
mem_heap_t** offset_heap, /* in/out: memory heap from which
the offsets are allocated */
rec_t** old_vers, /* out: old version, or NULL if the
record does not exist in the view:
i.e., it was freshly inserted
......@@ -2550,8 +2561,8 @@ row_sel_build_prev_vers_for_mysql(
}
err = row_vers_build_for_consistent_read(rec, mtr, clust_index,
read_view, prebuilt->old_vers_heap,
old_vers);
offsets, read_view, offset_heap,
prebuilt->old_vers_heap, old_vers);
return(err);
}
......@@ -2575,6 +2586,10 @@ row_sel_get_clust_rec_for_mysql(
it, NULL if the old version did not exist
in the read view, i.e., it was a fresh
inserted version */
ulint** offsets,/* out: offsets returned by
rec_get_offsets(out_rec, clust_index) */
mem_heap_t** offset_heap,/* in/out: memory heap from which
the offsets are allocated */
mtr_t* mtr) /* in: mtr used to get access to the
non-clustered record; the same mtr is used to
access the clustered index */
......@@ -2584,9 +2599,6 @@ row_sel_get_clust_rec_for_mysql(
rec_t* old_vers;
ulint err;
trx_t* trx;
mem_heap_t* heap = NULL;
ulint offsets_[100] = { 100, };
ulint* offsets = offsets_;
*out_rec = NULL;
trx = thr_get_trx(thr);
......@@ -2642,8 +2654,8 @@ row_sel_get_clust_rec_for_mysql(
goto func_exit;
}
offsets = rec_get_offsets(clust_rec, clust_index, offsets,
ULINT_UNDEFINED, &heap);
*offsets = rec_get_offsets(clust_rec, clust_index, *offsets,
ULINT_UNDEFINED, offset_heap);
if (prebuilt->select_lock_type != LOCK_NONE) {
/* Try to place a lock on the index record; we are searching
......@@ -2651,7 +2663,7 @@ row_sel_get_clust_rec_for_mysql(
we set a LOCK_REC_NOT_GAP type lock */
err = lock_clust_rec_read_check_and_lock(0, clust_rec,
clust_index, offsets,
clust_index, *offsets,
prebuilt->select_lock_type,
LOCK_REC_NOT_GAP, thr);
if (err != DB_SUCCESS) {
......@@ -2669,11 +2681,12 @@ row_sel_get_clust_rec_for_mysql(
if (trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
&& !lock_clust_rec_cons_read_sees(clust_rec, clust_index,
offsets, trx->read_view)) {
*offsets, trx->read_view)) {
err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index,
prebuilt, clust_rec,
offsets, offset_heap,
&old_vers, mtr);
if (err != DB_SUCCESS) {
......@@ -2722,9 +2735,6 @@ func_exit:
err = DB_SUCCESS;
err_exit:
if (heap) {
mem_heap_free(heap);
}
return(err);
}
......@@ -3671,6 +3681,7 @@ rec_loop:
err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index,
prebuilt, rec,
&offsets, &heap,
&old_vers, &mtr);
if (err != DB_SUCCESS) {
......@@ -3714,6 +3725,12 @@ rec_loop:
index_rec = rec;
/* Before and after the following "if" block, "offsets" will be
related to "rec", which may be in "index", a secondary index or
the clustered index ("clust_index"). However, after this "if" block,
"rec" may be pointing to "clust_rec" of "clust_index". */
ut_ad(rec_offs_validate(rec, index, offsets));
if (index != clust_index && (cons_read_requires_clust_rec
|| prebuilt->need_to_access_clustered)) {
......@@ -3723,7 +3740,8 @@ rec_loop:
mtr_has_extra_clust_latch = TRUE;
err = row_sel_get_clust_rec_for_mysql(prebuilt, index, rec,
thr, &clust_rec, &mtr);
thr, &clust_rec,
&offsets, &heap, &mtr);
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
......@@ -3745,20 +3763,18 @@ rec_loop:
if (prebuilt->need_to_access_clustered) {
rec = clust_rec;
}
}
if (prebuilt->need_to_access_clustered) {
ut_ad(rec == clust_rec || index == clust_index);
offsets = rec_get_offsets(rec, clust_index, offsets,
ULINT_UNDEFINED, &heap);
} else {
offsets = rec_get_offsets(rec, index, offsets,
ut_ad(rec_offs_validate(rec, clust_index, offsets));
} else {
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
}
}
/* We found a qualifying row */
ut_ad(rec_offs_validate(rec,
rec == clust_rec ? clust_index : index,
offsets));
if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD
&& prebuilt->select_lock_type == LOCK_NONE
&& !prebuilt->templ_contains_blob
......@@ -3800,8 +3816,11 @@ rec_loop:
}
if (prebuilt->clust_index_was_generated) {
offsets = rec_get_offsets(index_rec, index, offsets,
if (rec != index_rec) {
offsets = rec_get_offsets(
index_rec, index, offsets,
ULINT_UNDEFINED, &heap);
}
row_sel_store_row_id_to_prebuilt(prebuilt, index_rec,
index, offsets);
}
......
......@@ -406,7 +406,11 @@ row_vers_build_for_consistent_read(
of this records */
mtr_t* mtr, /* in: mtr holding the latch on rec */
dict_index_t* index, /* in: the clustered index */
ulint** offsets,/* in/out: offsets returned by
rec_get_offsets(rec, index) */
read_view_t* view, /* in: the consistent read view */
mem_heap_t** offset_heap,/* in/out: memory heap from which
the offsets are allocated */
mem_heap_t* in_heap,/* in: memory heap from which the memory for
old_vers is allocated; memory for possible
intermediate versions is allocated and freed
......@@ -418,11 +422,9 @@ row_vers_build_for_consistent_read(
rec_t* version;
rec_t* prev_version;
dulint prev_trx_id;
mem_heap_t* heap;
mem_heap_t* heap2;
mem_heap_t* heap = NULL;
byte* buf;
ulint err;
ulint* offsets;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
......@@ -432,22 +434,23 @@ row_vers_build_for_consistent_read(
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
heap = mem_heap_create(1024);
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
ut_ad(rec_offs_validate(rec, index, *offsets));
ut_ad(!read_view_sees_trx_id(view,
row_get_rec_trx_id(rec, index, offsets)));
row_get_rec_trx_id(rec, index, *offsets)));
rw_lock_s_lock(&(purge_sys->latch));
version = rec;
for (;;) {
heap2 = heap;
mem_heap_t* heap2 = heap;
heap = mem_heap_create(1024);
err = trx_undo_prev_version_build(rec, mtr, version, index,
offsets, heap, &prev_version);
mem_heap_free(heap2); /* free version and offsets */
*offsets, heap, &prev_version);
if (heap2) {
mem_heap_free(heap2); /* free version */
}
if (err != DB_SUCCESS) {
break;
......@@ -461,17 +464,19 @@ row_vers_build_for_consistent_read(
break;
}
offsets = rec_get_offsets(prev_version, index, NULL,
ULINT_UNDEFINED, &heap);
prev_trx_id = row_get_rec_trx_id(prev_version, index, offsets);
*offsets = rec_get_offsets(prev_version, index, *offsets,
ULINT_UNDEFINED, offset_heap);
prev_trx_id = row_get_rec_trx_id(prev_version, index,
*offsets);
if (read_view_sees_trx_id(view, prev_trx_id)) {
/* The view already sees this version: we can copy
it to in_heap and return */
buf = mem_heap_alloc(in_heap, rec_offs_size(offsets));
*old_vers = rec_copy(buf, prev_version, offsets);
buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets));
*old_vers = rec_copy(buf, prev_version, *offsets);
rec_offs_make_valid(*old_vers, index, *offsets);
err = DB_SUCCESS;
break;
......
......@@ -185,7 +185,7 @@ in the buffer pool to all database pages in the buffer pool smaller than
the following number. But it is not guaranteed that the value stays below
that during a time of heavy update/insert activity. */
ulint srv_max_buf_pool_modified_pct = 90;
ulong srv_max_buf_pool_modified_pct = 90;
/* variable counts amount of data read in total (in bytes) */
ulint srv_data_read = 0;
......@@ -260,7 +260,7 @@ semaphore contention and convoy problems can occur withput this restriction.
Value 10 should be good if there are less than 4 processors + 4 disks in the
computer. Bigger computers need bigger values. */
ulint srv_thread_concurrency = 8;
ulong srv_thread_concurrency = 8;
os_fast_mutex_t srv_conc_mutex; /* this mutex protects srv_conc data
structures */
......@@ -324,9 +324,9 @@ ibool srv_use_awe = FALSE;
ibool srv_use_adaptive_hash_indexes = TRUE;
/*-------------------------------------------*/
ulint srv_n_spin_wait_rounds = 20;
ulint srv_n_free_tickets_to_enter = 500;
ulint srv_thread_sleep_delay = 10000;
ulong srv_n_spin_wait_rounds = 20;
ulong srv_n_free_tickets_to_enter = 500;
ulong srv_thread_sleep_delay = 10000;
ulint srv_spin_wait_delay = 5;
ibool srv_priority_boost = TRUE;
......
This diff is collapsed.
This diff is collapsed.
......@@ -175,7 +175,7 @@ static uint hash_rec_mask(HASH *hash,HASH_LINK *pos,uint buffmax,
/* for compilers which can not handle inline */
static
#if !defined(__SUNPRO_C) && !defined(__USLC__) && !defined(__sgi)
#if !defined(__USLC__) && !defined(__sgi)
inline
#endif
unsigned int rec_hashnr(HASH *hash,const byte *record)
......
......@@ -87,7 +87,7 @@ static void my_aiowait(my_aio_result *result);
void setup_io_cache(IO_CACHE* info)
{
/* Ensure that my_b_tell() and my_b_bytes_in_cache works */
if (info->type == WRITE_CACHE || info->type == APPEND_CACHE)
if (info->type == WRITE_CACHE)
{
info->current_pos= &info->write_pos;
info->current_end= &info->write_end;
......@@ -247,7 +247,7 @@ int init_io_cache(IO_CACHE *info, File file, uint cachesize,
}
#endif
if (type == WRITE_CACHE || type == APPEND_CACHE)
if (type == WRITE_CACHE)
info->write_end=
info->buffer+info->buffer_length- (seek_offset & (IO_SIZE-1));
else
......@@ -318,7 +318,6 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type,
/* One can't do reinit with the following types */
DBUG_ASSERT(type != READ_NET && info->type != READ_NET &&
type != WRITE_NET && info->type != WRITE_NET &&
type != APPEND_CACHE && info->type != APPEND_CACHE &&
type != SEQ_READ_APPEND && info->type != SEQ_READ_APPEND);
/* If the whole file is in memory, avoid flushing to disk */
......@@ -1124,8 +1123,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock)
my_off_t pos_in_file;
DBUG_ENTER("my_b_flush_io_cache");
if (!(append_cache = (info->type == SEQ_READ_APPEND ||
info->type == APPEND_CACHE)))
if (!(append_cache = (info->type == SEQ_READ_APPEND)))
need_append_buffer_lock=0;
if (info->type == WRITE_CACHE || append_cache)
......@@ -1172,13 +1170,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock)
else
{
info->end_of_file+=(info->write_pos-info->append_read_pos);
/*
We only need to worry that info->end_of_file is really accurate
for SEQ_READ_APPEND. For APPEND_CACHE, it is possible that the
file is non-seekable, like a FIFO.
*/
DBUG_ASSERT(info->type != SEQ_READ_APPEND ||
info->end_of_file == my_tell(info->file,MYF(0)));
DBUG_ASSERT(info->end_of_file == my_tell(info->file,MYF(0)));
}
info->append_read_pos=info->write_pos=info->write_buffer;
......
......@@ -356,6 +356,8 @@ public:
void clear();
bool add();
double val_real();
// In SPs we might force the "wrong" type with select into a declare variable
longlong val_int() { return (longlong)val_real(); }
my_decimal *val_decimal(my_decimal *);
String *val_str(String *str);
void reset_field();
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment