Commit b631ea01 authored by unknown's avatar unknown

merge


Docs/manual.texi:
  Auto merged
mysql-test/t/rpl000013.test:
  Auto merged
mysql-test/t/rpl000016.test:
  Auto merged
extra/resolveip.c:
  Auto merged
sql/sql_base.cc:
  Auto merged
sql/sql_insert.cc:
  Auto merged
sql/slave.cc:
  Auto merged
sql/sql_select.cc:
  Auto merged
parents 38da1f98 8c335b8a
......@@ -40563,8 +40563,7 @@ of the following flags in very special circumstances:
A @code{MYSQL*} connection handle if the connection was successful,
@code{NULL} if the connection was unsuccessful. For a successful connection,
the return value is the same as the value of the first parameter, unless you
pass @code{NULL} for that parameter.
the return value is the same as the value of the first parameter.
@subsubheading Errors
......@@ -46274,7 +46273,7 @@ users use this code as the rest of the code and because of this we are
not yet 100% confident in this code.
@menu
* News-3.23.46::
* News-3.23.46:: Changes in release 3.23.46
* News-3.23.45:: Changes in release 3.23.45
* News-3.23.44:: Changes in release 3.23.44
* News-3.23.43:: Changes in release 3.23.43
......@@ -46335,6 +46334,12 @@ One can now kill @code{ANALYZE},@code{REPAIR} and @code{OPTIMIZE TABLE} when
the thread is waiting to get a lock on the table.
@item
Fixed race condition in @code{ANALYZE TABLE}.
@item
Fixed bug when joining with caching (unlikely to happen).
@item
Fixed race condition when using the binary log and @code{INSERT DELAYED}
which could cause the binary log to have rows that was not yet written
to MyISAM tables.
@end itemize
@node News-3.23.45, News-3.23.44, News-3.23.46, News-3.23.x
.TH SAFE_MYSQLD 1 "19 December 2000"
.TH SAFE_MYSQLD 1 "19 December 2000" "safe_mysqld (mysql)" mysql.com
.SH NAME
.BR mysqld_safe
is the recommended way to start a mysqld daemon on Unix. mysqld_safe adds some safety features such as restarting the server when an error occurs and logging run-time information to a log file.
mysqld_safe \- start the mysqld daemon on Unix.
.SH SYNOPSIS
.B mysqld_safe
.RB [ \-\-basedir=\fP\fIpath\fP ]
......@@ -21,18 +20,19 @@ is the recommended way to start a mysqld daemon on Unix. mysqld_safe adds some s
.RB [ \-\-timezone=# ]
.RB [ \-\-user=# ]
.SH DESCRIPTION
mysqld_safe adds some safety features such as restarting the server when an
error occurs and logging run-time information to a log file.
.BR
.TP
.BR \-\-basedir=\fP\fIpath \fP
.TP
.BR \-\-core\-file\-size=#
.BR \-\-core\-file\-size=#
Size of the core file mysqld should be able to create. Passed to ulimit \-c.
.TP
.BR \-\-defaults\-extra\-file=\fP\fIpath \fP
.TP
.BR \-\-defaults\-file=\fP\fIpath \fP
.TP
.BR \-\-open\-files=#
Size of the core file mysqld should be able to create. Passed to ulimit \-c.
.TP
.BR \-\-datadir=\fP\fIpath \fP
.TP
.BR \-\-err\-log=\fP\fIpath \fP
......@@ -76,8 +76,8 @@ which1 (1),
zap (1),
.SH AUTHOR
Ver 1.0, distribution 3.23.29a
Michael (Monty) Widenius (monty@tcx.se),
TCX Datakonsult AB (http://www.tcx.se).
Michael (Monty) Widenius (monty@mysql.com),
MySQL AB (http://www.mysql.com).
This software comes with no warranty.
Manual page by L. (Kill-9) Pedersen
(kill-9@kill\-9.dk), Mercurmedia Data Model Architect /
......
......@@ -21,7 +21,7 @@ table type possible_keys key key_len ref rows Extra
t1 range a,b a 9 NULL 3 where used; Using index
explain select * from t1 where (a is null or a = 7) and b=7;
table type possible_keys key key_len ref rows Extra
t1 range a,b a 9 NULL 2 where used; Using index
t1 ref a,b b 4 const 2 where used
explain select * from t1 where (a is null and b>a) or a is null and b=7 limit 2;
table type possible_keys key key_len ref rows Extra
t1 ref a,b a 5 const 3 where used; Using index
......
......@@ -12,7 +12,7 @@ insert into t2 select * from t1;
connection master1;
create temporary table t1 (n int);
insert into t1 values (4),(5);
insert into t2 select * from t1;
insert into t2 select * from t1 as t10;
save_master_pos;
disconnect master;
connection slave;
......
......@@ -28,7 +28,7 @@ select * from t1;
connection master;
flush logs;
drop table if exists t2;
create table t2(m int not null primary key);
create table t2(m int not null auto_increment primary key);
insert into t2 values (34),(67),(123);
save_master_pos;
flush logs;
......@@ -44,7 +44,8 @@ insert into t2 values(1234);
#same value on the master
connection master;
save_master_pos;
insert into t2 values(1234);
set insert_id=1234;
insert into t2 values(NULL);
connection slave;
sync_with_master;
......
......@@ -69,7 +69,7 @@ for i in extra/comp_err extra/replace extra/perror extra/resolveip \
client/mysqlmanagerc client/mysqlmanager-pwgen tools/mysqlmanager \
client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \
client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest \
client/.libs/mysqlcheck \
client/.libs/mysqlcheck client/.libs/mysqlbinlog \
client/.libs/mysqlmanagerc client/.libs/mysqlmanager-pwgen \
tools/.libs/mysqlmanager
do
......
......@@ -1013,7 +1013,12 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
mi->inc_pos(event_len, ev->log_seq);
flush_master_info(mi);
if(slave_skip_counter)
if(slave_skip_counter && /* protect against common user error of
setting the counter to 1 instead of 2
while recovering from an failed
auto-increment insert */
!(type_code == INTVAR_EVENT &&
slave_skip_counter == 1))
--slave_skip_counter;
delete ev;
return 0; // avoid infinite update loops
......
......@@ -533,10 +533,10 @@ void close_temporary_tables(THD *thd)
if (query) // we might be out of memory, but this is not fatal
{
// skip temporary tables not created directly by the user
if (table->table_name[0] != '#')
if (table->real_name[0] != '#')
{
end = strxmov(end,table->table_cache_key,".",
table->table_name,",", NullS);
table->real_name,",", NullS);
// here we assume table_cache_key always starts
// with \0 terminated db name
found_user_tables = 1;
......
......@@ -1126,7 +1126,7 @@ bool delayed_insert::handle_inserts(void)
{
int error;
uint max_rows;
bool using_ignore=0;
bool using_ignore=0, using_bin_log=mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
......@@ -1151,7 +1151,13 @@ bool delayed_insert::handle_inserts(void)
max_rows= ~0; // Do as much as possible
}
table->file->extra(HA_EXTRA_WRITE_CACHE);
/*
We can't use row caching when using the binary log because if
we get a crash, then binary log will contain rows that are not yet
written to disk, which will cause problems in replication.
*/
if (!using_bin_log)
table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
while ((row=rows.get()))
{
......@@ -1188,7 +1194,7 @@ bool delayed_insert::handle_inserts(void)
if (row->query && row->log_query)
{
mysql_update_log.write(&thd,row->query, row->query_length);
if (mysql_bin_log.is_open())
if (using_bin_log)
{
thd.query_length = row->query_length;
Query_log_event qinfo(&thd, row->query);
......@@ -1224,7 +1230,8 @@ bool delayed_insert::handle_inserts(void)
/* This should never happen */
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name);
}
table->file->extra(HA_EXTRA_WRITE_CACHE);
if (!using_bin_log)
table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
thd.proc_info="insert";
}
......
......@@ -1950,52 +1950,55 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
** Find how much space the prevous read not const tables takes in cache
*/
static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
{
uint null_fields,blobs,fields,rec_length;
null_fields=blobs=fields=rec_length=0;
Field **f_ptr,*field;
for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
{
if (field->query_id == thd->query_id)
{
uint flags=field->flags;
fields++;
rec_length+=field->pack_length();
if (flags & BLOB_FLAG)
blobs++;
if (!(flags & NOT_NULL_FLAG))
null_fields++;
}
}
if (null_fields)
rec_length+=(join_tab->table->null_fields+7)/8;
if (join_tab->table->maybe_null)
rec_length+=sizeof(my_bool);
if (blobs)
{
uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
(join_tab->table->reclength- rec_length));
rec_length+=(uint) max(4,blob_length);
}
join_tab->used_fields=fields;
join_tab->used_fieldlength=rec_length;
join_tab->used_blobs=blobs;
}
static uint
cache_record_length(JOIN *join,uint idx)
{
uint length;
uint length=0;
JOIN_TAB **pos,**end;
THD *thd=join->thd;
length=0;
for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ;
pos != end ;
pos++)
{
JOIN_TAB *join_tab= *pos;
if (!join_tab->used_fieldlength)
{ /* Not calced yet */
uint null_fields,blobs,fields,rec_length;
null_fields=blobs=fields=rec_length=0;
Field **f_ptr,*field;
for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
{
if (field->query_id == thd->query_id)
{
uint flags=field->flags;
fields++;
rec_length+=field->pack_length();
if (flags & BLOB_FLAG)
blobs++;
if (!(flags & NOT_NULL_FLAG))
null_fields++;
}
}
if (null_fields)
rec_length+=(join_tab->table->null_fields+7)/8;
if (join_tab->table->maybe_null)
rec_length+=sizeof(my_bool);
if (blobs)
{
uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
(join_tab->table->reclength- rec_length));
rec_length+=(uint) max(4,blob_length);
}
join_tab->used_fields=fields;
join_tab->used_fieldlength=rec_length;
join_tab->used_blobs=blobs;
}
if (!join_tab->used_fieldlength) /* Not calced yet */
calc_used_field_length(thd, join_tab);
length+=join_tab->used_fieldlength;
}
return length;
......@@ -2319,6 +2322,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
used_tables|=current_map;
if (tab->type == JT_REF && tab->quick &&
tab->ref.key == tab->quick->index &&
tab->ref.key_length < tab->quick->max_used_key_length)
{
/* Range uses longer key; Use this instead of ref on key */
......@@ -5781,15 +5785,19 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
uint length,blobs,size;
CACHE_FIELD *copy,**blob_ptr;
JOIN_CACHE *cache;
JOIN_TAB *join_tab;
DBUG_ENTER("join_init_cache");
cache= &tables[table_count].cache;
cache->fields=blobs=0;
for (i=0 ; i < table_count ; i++)
join_tab=tables;
for (i=0 ; i < table_count ; i++,join_tab++)
{
cache->fields+=tables[i].used_fields;
blobs+=tables[i].used_blobs;
if (!join_tab->used_fieldlength) /* Not calced yet */
calc_used_field_length(thd, join_tab);
cache->fields+=join_tab->used_fields;
blobs+=join_tab->used_blobs;
}
if (!(cache->field=(CACHE_FIELD*)
sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment