Commit e967891e authored by tomas@poseidon.ndb.mysql.com's avatar tomas@poseidon.ndb.mysql.com

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0

into poseidon.ndb.mysql.com:/home/tomas/mysql-5.0-ndb
parents fa21fd56 699c8f38
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#include <locale.h> #include <locale.h>
#endif #endif
const char *VER= "14.8"; const char *VER= "14.9";
/* Don't try to make a nice table if the data is too big */ /* Don't try to make a nice table if the data is too big */
#define MAX_COLUMN_LENGTH 1024 #define MAX_COLUMN_LENGTH 1024
...@@ -1045,7 +1045,12 @@ static COMMANDS *find_command (char *name,char cmd_char) ...@@ -1045,7 +1045,12 @@ static COMMANDS *find_command (char *name,char cmd_char)
{ {
while (my_isspace(charset_info,*name)) while (my_isspace(charset_info,*name))
name++; name++;
if (strstr(name, delimiter) || strstr(name, "\\g")) /*
As special case we allow row that starts with word delimiter
to be able to change delimiter if someone has delimiter 'delimiter'.
*/
if (strstr(name, "\\g") || (strstr(name, delimiter) &&
strncmp(name, "delimiter", 9)))
return ((COMMANDS *) 0); return ((COMMANDS *) 0);
if ((end=strcont(name," \t"))) if ((end=strcont(name," \t")))
{ {
......
...@@ -279,7 +279,6 @@ enum loglevel { ...@@ -279,7 +279,6 @@ enum loglevel {
enum cache_type enum cache_type
{ {
READ_CACHE,WRITE_CACHE, READ_CACHE,WRITE_CACHE,
APPEND_CACHE, /* Like WRITE_CACHE, but only append */
SEQ_READ_APPEND /* sequential read or append */, SEQ_READ_APPEND /* sequential read or append */,
READ_FIFO, READ_NET,WRITE_NET}; READ_FIFO, READ_NET,WRITE_NET};
......
...@@ -79,7 +79,11 @@ row_vers_build_for_consistent_read( ...@@ -79,7 +79,11 @@ row_vers_build_for_consistent_read(
mtr_t* mtr, /* in: mtr holding the latch on rec; it will mtr_t* mtr, /* in: mtr holding the latch on rec; it will
also hold the latch on purge_view */ also hold the latch on purge_view */
dict_index_t* index, /* in: the clustered index */ dict_index_t* index, /* in: the clustered index */
ulint** offsets,/* in/out: offsets returned by
rec_get_offsets(rec, index) */
read_view_t* view, /* in: the consistent read view */ read_view_t* view, /* in: the consistent read view */
mem_heap_t** offset_heap,/* in/out: memory heap from which
the offsets are allocated */
mem_heap_t* in_heap,/* in: memory heap from which the memory for mem_heap_t* in_heap,/* in: memory heap from which the memory for
old_vers is allocated; memory for possible old_vers is allocated; memory for possible
intermediate versions is allocated and freed intermediate versions is allocated and freed
......
...@@ -93,7 +93,7 @@ extern ulint srv_max_n_open_files; ...@@ -93,7 +93,7 @@ extern ulint srv_max_n_open_files;
extern ulint srv_max_dirty_pages_pct; extern ulint srv_max_dirty_pages_pct;
extern ulint srv_force_recovery; extern ulint srv_force_recovery;
extern ulint srv_thread_concurrency; extern ulong srv_thread_concurrency;
extern ulint srv_max_n_threads; extern ulint srv_max_n_threads;
...@@ -112,6 +112,7 @@ extern ibool srv_use_checksums; ...@@ -112,6 +112,7 @@ extern ibool srv_use_checksums;
extern ibool srv_set_thread_priorities; extern ibool srv_set_thread_priorities;
extern int srv_query_thread_priority; extern int srv_query_thread_priority;
extern ulong srv_max_buf_pool_modified_pct;
extern ulong srv_max_purge_lag; extern ulong srv_max_purge_lag;
extern ibool srv_use_awe; extern ibool srv_use_awe;
extern ibool srv_use_adaptive_hash_indexes; extern ibool srv_use_adaptive_hash_indexes;
...@@ -131,9 +132,9 @@ extern ibool srv_print_innodb_table_monitor; ...@@ -131,9 +132,9 @@ extern ibool srv_print_innodb_table_monitor;
extern ibool srv_lock_timeout_and_monitor_active; extern ibool srv_lock_timeout_and_monitor_active;
extern ibool srv_error_monitor_active; extern ibool srv_error_monitor_active;
extern ulint srv_n_spin_wait_rounds; extern ulong srv_n_spin_wait_rounds;
extern ulint srv_n_free_tickets_to_enter; extern ulong srv_n_free_tickets_to_enter;
extern ulint srv_thread_sleep_delay; extern ulong srv_thread_sleep_delay;
extern ulint srv_spin_wait_delay; extern ulint srv_spin_wait_delay;
extern ibool srv_priority_boost; extern ibool srv_priority_boost;
......
...@@ -510,6 +510,10 @@ row_sel_build_prev_vers( ...@@ -510,6 +510,10 @@ row_sel_build_prev_vers(
read_view_t* read_view, /* in: read view */ read_view_t* read_view, /* in: read view */
plan_t* plan, /* in: plan node for table */ plan_t* plan, /* in: plan node for table */
rec_t* rec, /* in: record in a clustered index */ rec_t* rec, /* in: record in a clustered index */
ulint** offsets, /* in/out: offsets returned by
rec_get_offsets(rec, plan->index) */
mem_heap_t** offset_heap, /* in/out: memory heap from which
the offsets are allocated */
rec_t** old_vers, /* out: old version, or NULL if the rec_t** old_vers, /* out: old version, or NULL if the
record does not exist in the view: record does not exist in the view:
i.e., it was freshly inserted i.e., it was freshly inserted
...@@ -525,8 +529,8 @@ row_sel_build_prev_vers( ...@@ -525,8 +529,8 @@ row_sel_build_prev_vers(
} }
err = row_vers_build_for_consistent_read(rec, mtr, plan->index, err = row_vers_build_for_consistent_read(rec, mtr, plan->index,
read_view, plan->old_vers_heap, offsets, read_view, offset_heap,
old_vers); plan->old_vers_heap, old_vers);
return(err); return(err);
} }
...@@ -697,7 +701,8 @@ row_sel_get_clust_rec( ...@@ -697,7 +701,8 @@ row_sel_get_clust_rec(
node->read_view)) { node->read_view)) {
err = row_sel_build_prev_vers(node->read_view, plan, err = row_sel_build_prev_vers(node->read_view, plan,
clust_rec, &old_vers, mtr); clust_rec, &offsets, &heap,
&old_vers, mtr);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
goto err_exit; goto err_exit;
...@@ -1396,14 +1401,18 @@ rec_loop: ...@@ -1396,14 +1401,18 @@ rec_loop:
node->read_view)) { node->read_view)) {
err = row_sel_build_prev_vers(node->read_view, err = row_sel_build_prev_vers(node->read_view,
plan, rec, &old_vers, plan, rec,
&mtr); &offsets, &heap,
&old_vers, &mtr);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
goto lock_wait_or_error; goto lock_wait_or_error;
} }
if (old_vers == NULL) { if (old_vers == NULL) {
offsets = rec_get_offsets(
rec, index, offsets,
ULINT_UNDEFINED, &heap);
row_sel_fetch_columns(index, rec, row_sel_fetch_columns(index, rec,
offsets, offsets,
UT_LIST_GET_FIRST(plan->columns)); UT_LIST_GET_FIRST(plan->columns));
...@@ -1417,8 +1426,6 @@ rec_loop: ...@@ -1417,8 +1426,6 @@ rec_loop:
} }
rec = old_vers; rec = old_vers;
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
} }
} else if (!lock_sec_rec_cons_read_sees(rec, index, } else if (!lock_sec_rec_cons_read_sees(rec, index,
node->read_view)) { node->read_view)) {
...@@ -2535,6 +2542,10 @@ row_sel_build_prev_vers_for_mysql( ...@@ -2535,6 +2542,10 @@ row_sel_build_prev_vers_for_mysql(
dict_index_t* clust_index, /* in: clustered index */ dict_index_t* clust_index, /* in: clustered index */
row_prebuilt_t* prebuilt, /* in: prebuilt struct */ row_prebuilt_t* prebuilt, /* in: prebuilt struct */
rec_t* rec, /* in: record in a clustered index */ rec_t* rec, /* in: record in a clustered index */
ulint** offsets, /* in/out: offsets returned by
rec_get_offsets(rec, clust_index) */
mem_heap_t** offset_heap, /* in/out: memory heap from which
the offsets are allocated */
rec_t** old_vers, /* out: old version, or NULL if the rec_t** old_vers, /* out: old version, or NULL if the
record does not exist in the view: record does not exist in the view:
i.e., it was freshly inserted i.e., it was freshly inserted
...@@ -2550,8 +2561,8 @@ row_sel_build_prev_vers_for_mysql( ...@@ -2550,8 +2561,8 @@ row_sel_build_prev_vers_for_mysql(
} }
err = row_vers_build_for_consistent_read(rec, mtr, clust_index, err = row_vers_build_for_consistent_read(rec, mtr, clust_index,
read_view, prebuilt->old_vers_heap, offsets, read_view, offset_heap,
old_vers); prebuilt->old_vers_heap, old_vers);
return(err); return(err);
} }
...@@ -2575,6 +2586,10 @@ row_sel_get_clust_rec_for_mysql( ...@@ -2575,6 +2586,10 @@ row_sel_get_clust_rec_for_mysql(
it, NULL if the old version did not exist it, NULL if the old version did not exist
in the read view, i.e., it was a fresh in the read view, i.e., it was a fresh
inserted version */ inserted version */
ulint** offsets,/* out: offsets returned by
rec_get_offsets(out_rec, clust_index) */
mem_heap_t** offset_heap,/* in/out: memory heap from which
the offsets are allocated */
mtr_t* mtr) /* in: mtr used to get access to the mtr_t* mtr) /* in: mtr used to get access to the
non-clustered record; the same mtr is used to non-clustered record; the same mtr is used to
access the clustered index */ access the clustered index */
...@@ -2584,9 +2599,6 @@ row_sel_get_clust_rec_for_mysql( ...@@ -2584,9 +2599,6 @@ row_sel_get_clust_rec_for_mysql(
rec_t* old_vers; rec_t* old_vers;
ulint err; ulint err;
trx_t* trx; trx_t* trx;
mem_heap_t* heap = NULL;
ulint offsets_[100] = { 100, };
ulint* offsets = offsets_;
*out_rec = NULL; *out_rec = NULL;
trx = thr_get_trx(thr); trx = thr_get_trx(thr);
...@@ -2642,8 +2654,8 @@ row_sel_get_clust_rec_for_mysql( ...@@ -2642,8 +2654,8 @@ row_sel_get_clust_rec_for_mysql(
goto func_exit; goto func_exit;
} }
offsets = rec_get_offsets(clust_rec, clust_index, offsets, *offsets = rec_get_offsets(clust_rec, clust_index, *offsets,
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, offset_heap);
if (prebuilt->select_lock_type != LOCK_NONE) { if (prebuilt->select_lock_type != LOCK_NONE) {
/* Try to place a lock on the index record; we are searching /* Try to place a lock on the index record; we are searching
...@@ -2651,7 +2663,7 @@ row_sel_get_clust_rec_for_mysql( ...@@ -2651,7 +2663,7 @@ row_sel_get_clust_rec_for_mysql(
we set a LOCK_REC_NOT_GAP type lock */ we set a LOCK_REC_NOT_GAP type lock */
err = lock_clust_rec_read_check_and_lock(0, clust_rec, err = lock_clust_rec_read_check_and_lock(0, clust_rec,
clust_index, offsets, clust_index, *offsets,
prebuilt->select_lock_type, prebuilt->select_lock_type,
LOCK_REC_NOT_GAP, thr); LOCK_REC_NOT_GAP, thr);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
...@@ -2669,11 +2681,12 @@ row_sel_get_clust_rec_for_mysql( ...@@ -2669,11 +2681,12 @@ row_sel_get_clust_rec_for_mysql(
if (trx->isolation_level > TRX_ISO_READ_UNCOMMITTED if (trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
&& !lock_clust_rec_cons_read_sees(clust_rec, clust_index, && !lock_clust_rec_cons_read_sees(clust_rec, clust_index,
offsets, trx->read_view)) { *offsets, trx->read_view)) {
err = row_sel_build_prev_vers_for_mysql( err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index, trx->read_view, clust_index,
prebuilt, clust_rec, prebuilt, clust_rec,
offsets, offset_heap,
&old_vers, mtr); &old_vers, mtr);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
...@@ -2722,9 +2735,6 @@ func_exit: ...@@ -2722,9 +2735,6 @@ func_exit:
err = DB_SUCCESS; err = DB_SUCCESS;
err_exit: err_exit:
if (heap) {
mem_heap_free(heap);
}
return(err); return(err);
} }
...@@ -3671,6 +3681,7 @@ rec_loop: ...@@ -3671,6 +3681,7 @@ rec_loop:
err = row_sel_build_prev_vers_for_mysql( err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index, trx->read_view, clust_index,
prebuilt, rec, prebuilt, rec,
&offsets, &heap,
&old_vers, &mtr); &old_vers, &mtr);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
...@@ -3714,6 +3725,12 @@ rec_loop: ...@@ -3714,6 +3725,12 @@ rec_loop:
index_rec = rec; index_rec = rec;
/* Before and after the following "if" block, "offsets" will be
related to "rec", which may be in "index", a secondary index or
the clustered index ("clust_index"). However, after this "if" block,
"rec" may be pointing to "clust_rec" of "clust_index". */
ut_ad(rec_offs_validate(rec, index, offsets));
if (index != clust_index && (cons_read_requires_clust_rec if (index != clust_index && (cons_read_requires_clust_rec
|| prebuilt->need_to_access_clustered)) { || prebuilt->need_to_access_clustered)) {
...@@ -3723,7 +3740,8 @@ rec_loop: ...@@ -3723,7 +3740,8 @@ rec_loop:
mtr_has_extra_clust_latch = TRUE; mtr_has_extra_clust_latch = TRUE;
err = row_sel_get_clust_rec_for_mysql(prebuilt, index, rec, err = row_sel_get_clust_rec_for_mysql(prebuilt, index, rec,
thr, &clust_rec, &mtr); thr, &clust_rec,
&offsets, &heap, &mtr);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
goto lock_wait_or_error; goto lock_wait_or_error;
...@@ -3745,19 +3763,17 @@ rec_loop: ...@@ -3745,19 +3763,17 @@ rec_loop:
if (prebuilt->need_to_access_clustered) { if (prebuilt->need_to_access_clustered) {
rec = clust_rec; rec = clust_rec;
} ut_ad(rec_offs_validate(rec, clust_index, offsets));
}
if (prebuilt->need_to_access_clustered) {
ut_ad(rec == clust_rec || index == clust_index);
offsets = rec_get_offsets(rec, clust_index, offsets,
ULINT_UNDEFINED, &heap);
} else { } else {
offsets = rec_get_offsets(rec, index, offsets, offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, &heap);
} }
}
/* We found a qualifying row */ /* We found a qualifying row */
ut_ad(rec_offs_validate(rec,
rec == clust_rec ? clust_index : index,
offsets));
if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD
&& prebuilt->select_lock_type == LOCK_NONE && prebuilt->select_lock_type == LOCK_NONE
...@@ -3800,8 +3816,11 @@ rec_loop: ...@@ -3800,8 +3816,11 @@ rec_loop:
} }
if (prebuilt->clust_index_was_generated) { if (prebuilt->clust_index_was_generated) {
offsets = rec_get_offsets(index_rec, index, offsets, if (rec != index_rec) {
offsets = rec_get_offsets(
index_rec, index, offsets,
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, &heap);
}
row_sel_store_row_id_to_prebuilt(prebuilt, index_rec, row_sel_store_row_id_to_prebuilt(prebuilt, index_rec,
index, offsets); index, offsets);
} }
......
...@@ -406,7 +406,11 @@ row_vers_build_for_consistent_read( ...@@ -406,7 +406,11 @@ row_vers_build_for_consistent_read(
of this records */ of this records */
mtr_t* mtr, /* in: mtr holding the latch on rec */ mtr_t* mtr, /* in: mtr holding the latch on rec */
dict_index_t* index, /* in: the clustered index */ dict_index_t* index, /* in: the clustered index */
ulint** offsets,/* in/out: offsets returned by
rec_get_offsets(rec, index) */
read_view_t* view, /* in: the consistent read view */ read_view_t* view, /* in: the consistent read view */
mem_heap_t** offset_heap,/* in/out: memory heap from which
the offsets are allocated */
mem_heap_t* in_heap,/* in: memory heap from which the memory for mem_heap_t* in_heap,/* in: memory heap from which the memory for
old_vers is allocated; memory for possible old_vers is allocated; memory for possible
intermediate versions is allocated and freed intermediate versions is allocated and freed
...@@ -418,11 +422,9 @@ row_vers_build_for_consistent_read( ...@@ -418,11 +422,9 @@ row_vers_build_for_consistent_read(
rec_t* version; rec_t* version;
rec_t* prev_version; rec_t* prev_version;
dulint prev_trx_id; dulint prev_trx_id;
mem_heap_t* heap; mem_heap_t* heap = NULL;
mem_heap_t* heap2;
byte* buf; byte* buf;
ulint err; ulint err;
ulint* offsets;
ut_ad(index->type & DICT_CLUSTERED); ut_ad(index->type & DICT_CLUSTERED);
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX) ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
...@@ -432,22 +434,23 @@ row_vers_build_for_consistent_read( ...@@ -432,22 +434,23 @@ row_vers_build_for_consistent_read(
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
heap = mem_heap_create(1024); ut_ad(rec_offs_validate(rec, index, *offsets));
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
ut_ad(!read_view_sees_trx_id(view, ut_ad(!read_view_sees_trx_id(view,
row_get_rec_trx_id(rec, index, offsets))); row_get_rec_trx_id(rec, index, *offsets)));
rw_lock_s_lock(&(purge_sys->latch)); rw_lock_s_lock(&(purge_sys->latch));
version = rec; version = rec;
for (;;) { for (;;) {
heap2 = heap; mem_heap_t* heap2 = heap;
heap = mem_heap_create(1024); heap = mem_heap_create(1024);
err = trx_undo_prev_version_build(rec, mtr, version, index, err = trx_undo_prev_version_build(rec, mtr, version, index,
offsets, heap, &prev_version); *offsets, heap, &prev_version);
mem_heap_free(heap2); /* free version and offsets */ if (heap2) {
mem_heap_free(heap2); /* free version */
}
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
break; break;
...@@ -461,17 +464,19 @@ row_vers_build_for_consistent_read( ...@@ -461,17 +464,19 @@ row_vers_build_for_consistent_read(
break; break;
} }
offsets = rec_get_offsets(prev_version, index, NULL, *offsets = rec_get_offsets(prev_version, index, *offsets,
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, offset_heap);
prev_trx_id = row_get_rec_trx_id(prev_version, index, offsets); prev_trx_id = row_get_rec_trx_id(prev_version, index,
*offsets);
if (read_view_sees_trx_id(view, prev_trx_id)) { if (read_view_sees_trx_id(view, prev_trx_id)) {
/* The view already sees this version: we can copy /* The view already sees this version: we can copy
it to in_heap and return */ it to in_heap and return */
buf = mem_heap_alloc(in_heap, rec_offs_size(offsets)); buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets));
*old_vers = rec_copy(buf, prev_version, offsets); *old_vers = rec_copy(buf, prev_version, *offsets);
rec_offs_make_valid(*old_vers, index, *offsets);
err = DB_SUCCESS; err = DB_SUCCESS;
break; break;
......
...@@ -185,7 +185,7 @@ in the buffer pool to all database pages in the buffer pool smaller than ...@@ -185,7 +185,7 @@ in the buffer pool to all database pages in the buffer pool smaller than
the following number. But it is not guaranteed that the value stays below the following number. But it is not guaranteed that the value stays below
that during a time of heavy update/insert activity. */ that during a time of heavy update/insert activity. */
ulint srv_max_buf_pool_modified_pct = 90; ulong srv_max_buf_pool_modified_pct = 90;
/* variable counts amount of data read in total (in bytes) */ /* variable counts amount of data read in total (in bytes) */
ulint srv_data_read = 0; ulint srv_data_read = 0;
...@@ -260,7 +260,7 @@ semaphore contention and convoy problems can occur withput this restriction. ...@@ -260,7 +260,7 @@ semaphore contention and convoy problems can occur withput this restriction.
Value 10 should be good if there are less than 4 processors + 4 disks in the Value 10 should be good if there are less than 4 processors + 4 disks in the
computer. Bigger computers need bigger values. */ computer. Bigger computers need bigger values. */
ulint srv_thread_concurrency = 8; ulong srv_thread_concurrency = 8;
os_fast_mutex_t srv_conc_mutex; /* this mutex protects srv_conc data os_fast_mutex_t srv_conc_mutex; /* this mutex protects srv_conc data
structures */ structures */
...@@ -324,9 +324,9 @@ ibool srv_use_awe = FALSE; ...@@ -324,9 +324,9 @@ ibool srv_use_awe = FALSE;
ibool srv_use_adaptive_hash_indexes = TRUE; ibool srv_use_adaptive_hash_indexes = TRUE;
/*-------------------------------------------*/ /*-------------------------------------------*/
ulint srv_n_spin_wait_rounds = 20; ulong srv_n_spin_wait_rounds = 20;
ulint srv_n_free_tickets_to_enter = 500; ulong srv_n_free_tickets_to_enter = 500;
ulint srv_thread_sleep_delay = 10000; ulong srv_thread_sleep_delay = 10000;
ulint srv_spin_wait_delay = 5; ulint srv_spin_wait_delay = 5;
ibool srv_priority_boost = TRUE; ibool srv_priority_boost = TRUE;
......
This diff is collapsed.
This diff is collapsed.
...@@ -175,7 +175,7 @@ static uint hash_rec_mask(HASH *hash,HASH_LINK *pos,uint buffmax, ...@@ -175,7 +175,7 @@ static uint hash_rec_mask(HASH *hash,HASH_LINK *pos,uint buffmax,
/* for compilers which can not handle inline */ /* for compilers which can not handle inline */
static static
#if !defined(__SUNPRO_C) && !defined(__USLC__) && !defined(__sgi) #if !defined(__USLC__) && !defined(__sgi)
inline inline
#endif #endif
unsigned int rec_hashnr(HASH *hash,const byte *record) unsigned int rec_hashnr(HASH *hash,const byte *record)
......
...@@ -87,7 +87,7 @@ static void my_aiowait(my_aio_result *result); ...@@ -87,7 +87,7 @@ static void my_aiowait(my_aio_result *result);
void setup_io_cache(IO_CACHE* info) void setup_io_cache(IO_CACHE* info)
{ {
/* Ensure that my_b_tell() and my_b_bytes_in_cache works */ /* Ensure that my_b_tell() and my_b_bytes_in_cache works */
if (info->type == WRITE_CACHE || info->type == APPEND_CACHE) if (info->type == WRITE_CACHE)
{ {
info->current_pos= &info->write_pos; info->current_pos= &info->write_pos;
info->current_end= &info->write_end; info->current_end= &info->write_end;
...@@ -247,7 +247,7 @@ int init_io_cache(IO_CACHE *info, File file, uint cachesize, ...@@ -247,7 +247,7 @@ int init_io_cache(IO_CACHE *info, File file, uint cachesize,
} }
#endif #endif
if (type == WRITE_CACHE || type == APPEND_CACHE) if (type == WRITE_CACHE)
info->write_end= info->write_end=
info->buffer+info->buffer_length- (seek_offset & (IO_SIZE-1)); info->buffer+info->buffer_length- (seek_offset & (IO_SIZE-1));
else else
...@@ -318,7 +318,6 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type, ...@@ -318,7 +318,6 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type,
/* One can't do reinit with the following types */ /* One can't do reinit with the following types */
DBUG_ASSERT(type != READ_NET && info->type != READ_NET && DBUG_ASSERT(type != READ_NET && info->type != READ_NET &&
type != WRITE_NET && info->type != WRITE_NET && type != WRITE_NET && info->type != WRITE_NET &&
type != APPEND_CACHE && info->type != APPEND_CACHE &&
type != SEQ_READ_APPEND && info->type != SEQ_READ_APPEND); type != SEQ_READ_APPEND && info->type != SEQ_READ_APPEND);
/* If the whole file is in memory, avoid flushing to disk */ /* If the whole file is in memory, avoid flushing to disk */
...@@ -1124,8 +1123,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock) ...@@ -1124,8 +1123,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock)
my_off_t pos_in_file; my_off_t pos_in_file;
DBUG_ENTER("my_b_flush_io_cache"); DBUG_ENTER("my_b_flush_io_cache");
if (!(append_cache = (info->type == SEQ_READ_APPEND || if (!(append_cache = (info->type == SEQ_READ_APPEND)))
info->type == APPEND_CACHE)))
need_append_buffer_lock=0; need_append_buffer_lock=0;
if (info->type == WRITE_CACHE || append_cache) if (info->type == WRITE_CACHE || append_cache)
...@@ -1172,13 +1170,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock) ...@@ -1172,13 +1170,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock)
else else
{ {
info->end_of_file+=(info->write_pos-info->append_read_pos); info->end_of_file+=(info->write_pos-info->append_read_pos);
/* DBUG_ASSERT(info->end_of_file == my_tell(info->file,MYF(0)));
We only need to worry that info->end_of_file is really accurate
for SEQ_READ_APPEND. For APPEND_CACHE, it is possible that the
file is non-seekable, like a FIFO.
*/
DBUG_ASSERT(info->type != SEQ_READ_APPEND ||
info->end_of_file == my_tell(info->file,MYF(0)));
} }
info->append_read_pos=info->write_pos=info->write_buffer; info->append_read_pos=info->write_pos=info->write_buffer;
......
...@@ -356,6 +356,8 @@ public: ...@@ -356,6 +356,8 @@ public:
void clear(); void clear();
bool add(); bool add();
double val_real(); double val_real();
// In SPs we might force the "wrong" type with select into a declare variable
longlong val_int() { return (longlong)val_real(); }
my_decimal *val_decimal(my_decimal *); my_decimal *val_decimal(my_decimal *);
String *val_str(String *str); String *val_str(String *str);
void reset_field(); void reset_field();
......
...@@ -293,13 +293,13 @@ public: ...@@ -293,13 +293,13 @@ public:
{ {
char buf[FN_REFLEN]; char buf[FN_REFLEN];
return open(generate_name(log_name, ".log", 0, buf), return open(generate_name(log_name, ".log", 0, buf),
LOG_NORMAL, 0, APPEND_CACHE, 0, 0, 0); LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0);
} }
bool open_slow_log(const char *log_name) bool open_slow_log(const char *log_name)
{ {
char buf[FN_REFLEN]; char buf[FN_REFLEN];
return open(generate_name(log_name, "-slow.log", 0, buf), return open(generate_name(log_name, "-slow.log", 0, buf),
LOG_NORMAL, 0, APPEND_CACHE, 0, 0, 0); LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0);
} }
bool open_index_file(const char *index_file_name_arg, bool open_index_file(const char *index_file_name_arg,
const char *log_name); const char *log_name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment