Commit 8b865368 authored by serg@infomag.ape.relarn.ru's avatar serg@infomag.ape.relarn.ru

Merge

parents bf617db8 7d0072eb
......@@ -7,7 +7,7 @@
@c @synindex tp fn cp
@synindex cp fn
@iftex
@c Well this is normal in Europe. Maybe this shold go into the include.texi?
@c Well this is normal in Europe. Maybe this should go into the include.texi?
@afourpaper
@end iftex
@c Get version and other info
......@@ -47,10 +47,10 @@ This is a manual about @strong{MySQL} internals.
@end menu
@node caching
@chapter How do MySQL handle caching
@chapter How MySQL handles caching
MySQL has the following caches:
(Note that the some of the filename has wrong spelling of cache :)
(Note that the some of the filename have a wrong spelling of cache. :)
@itemize @bullet
@item Key cache
......@@ -76,17 +76,17 @@ cached for each user/database combination. sql/sql_acl.cc
@item Heap table cache
Many use of GROUP BY or DISTINCT caches all found
rows in a HEAP table (this is a very quick, in memory table with hash index)
rows in a HEAP table (this is a very quick in-memory table with hash index)
@item Join row cache.
For every full join in a SELECT statement (a full join here means there
was no keys that one could use to find the next table in a list), the
were no keys that one could use to find the next table in a list), the
found rows are cached in a join cache. One SELECT query can use many
join caches in the worst case.
@end itemize
@node flush tables
@chapter How do MySQL handle flush tables
@chapter How MySQL handles flush tables
@itemize @bullet
@item
......@@ -122,23 +122,23 @@ same tables.
@end itemize
@node Filesort
@chapter How do MySQL do sorting (filesort)
@chapter How MySQL does sorting (filesort)
- Read all rows according to key or by table-scanning.
- Store the sort-key in a buffer (sort_buffer).
- When the buffer gets full, run a qsort on it and store the result
in a temporary file. Save a pointer to the sorted block.
- Repeate the above until all rows has been read.
- Repeat the above until all rows have been read.
- Repeat the following until there is less than MERGEBUFF2 (15) blocks left.
- Do a multi-merge of up to MERGEBUFF (7) regions to one block in
another temporary file. Repeat until all blocks from the first file
is in the second file.
are in the second file.
- On the last multi-merge, only the pointer to the row (last part of
the sort-key) is written to a result file.
- Now the code in sql/records.cc will be used to read through the
in sorted order by using the row pointersin the result file.
- Now the code in sql/records.cc will be used to read through them
in sorted order by using the row pointers in the result file.
To optimize this, we read in a big block of row pointers, sort these
and then we read the rows in the sorted order into a row buffer
(record_buffer) .
......@@ -152,5 +152,3 @@ same tables.
@contents
@bye
Do text here do something ??
This diff is collapsed.
......@@ -41,3 +41,6 @@ DEFS = -DUNDEF_THREADS_HACK
thread_test.o: thread_test.c
$(COMPILE) -c @MT_INCLUDES@ $(INCLUDES) $<
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -211,6 +211,7 @@ AC_MSG_RESULT($mysql_cv_sys_os)
case "$target_os" in
sco3.2v5*)
CFLAGS="$CFLAGS -DSCO"
CXXFLAGS="$CXXFLAGS -DSCO"
LD='$(CC) $(CFLAGS)'
case "$CFLAGS" in
*-belf*)
......
......@@ -51,3 +51,6 @@ my_global.h: global.h
# generated by configure from the .h.in files
dist-hook:
rm -f $(distdir)/mysql_version.h $(distdir)/my_config.h
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -147,3 +147,6 @@ do-lib-dist:
echo ' $$(AR) r $$@ $$?' >>$$dir/Makefile; \
gtar cvzf $$dir.tar.gz $$dir; \
cd $$dir; gmake
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -1600,7 +1600,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
char llbuff[22];
SORT_INFO *sort_info= &param->sort_info;
ulonglong key_map=share->state.key_map;
DBUG_ENTER("rep_by_sort");
DBUG_ENTER("mi_repair_by_sort");
start_records=info->state->records;
got_error=1;
......@@ -1630,9 +1630,6 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
info->opt_flag|=WRITE_CACHE_USED;
info->rec_cache.file=info->dfile; /* for sort_delete_record */
/* Flush key cache for this file if we are calling this outside myisamchk */
flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED);
if (!(sort_info->record=(byte*) my_malloc((uint) share->base.pack_reclength,
MYF(0))))
{
......@@ -1669,15 +1666,24 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
if (!(param->testflag & T_CREATE_MISSING_KEYS))
{
/*
Flush key cache for this file if we are calling this outside
myisamchk
*/
flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED);
/* Clear the pointers to the given rows */
for (i=0 ; i < share->base.keys ; i++)
share->state.key_root[i]= HA_OFFSET_ERROR;
for (i=0 ; i < share->state.header.max_block_size ; i++)
share->state.key_del[i]= HA_OFFSET_ERROR;
info->state->key_file_length=share->base.keystart;
}
else
{
if (flush_key_blocks(share->kfile, FLUSH_FORCE_WRITE))
goto err;
key_map= ~key_map; /* Create the missing keys */
info->state->key_file_length=share->base.keystart;
}
sort_info->info=info;
sort_info->param = param;
......@@ -1758,6 +1764,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
if (param->testflag & T_STATISTICS)
update_key_parts(sort_info->keyinfo, rec_per_key_part, sort_info->unique,
(ulonglong) info->state->records);
share->state.key_map|=(ulonglong) 1 << sort_info->key;
if (sort_info->fix_datafile)
{
......@@ -2933,7 +2940,8 @@ void mi_dectivate_non_unique_index(MI_INFO *info, ha_rows rows)
MI_KEYDEF *key=share->keyinfo;
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & HA_NOSAME) && ! mi_too_big_key_for_sort(key,rows))
if (!(key->flag & HA_NOSAME) && ! mi_too_big_key_for_sort(key,rows) &&
info->s->base.auto_key != i+1)
{
share->state.key_map&= ~ ((ulonglong) 1 << i);
info->update|= HA_STATE_CHANGED;
......
......@@ -77,3 +77,6 @@ SUFFIXES = .sh
$< > $@-t
@CHMOD@ +x $@-t
@MV@ $@-t $@
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -124,3 +124,6 @@ sql_lex.o: lex_hash.h
#distclean:
# rm -f lex_hash.h
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -169,7 +169,7 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
tables.name = tables.real_name = (char*)table_name;
tables.lock_type = TL_WRITE;
thd->proc_info = "Opening master dump table";
if(open_tables(thd, &tables) || !tables.table)
if(!open_ltable(thd, &tables, TL_WRITE))
{
// open tables will send the error
sql_print_error("create_table_from_dump: could not open created table");
......
......@@ -1154,7 +1154,6 @@ bool delayed_insert::handle_inserts(void)
int
select_insert::prepare(List<Item> &values)
{
TABLE *form=table;
DBUG_ENTER("select_insert::prepare");
save_time_stamp=table->time_stamp;
......@@ -1163,15 +1162,16 @@ select_insert::prepare(List<Item> &values)
if (fields->elements)
{
restore_record(form,2); // Get empty record
restore_record(table,2); // Get empty record
}
else
form->record[0][0]=form->record[2][0]; // Fix delete marker
form->next_number_field=form->found_next_number_field;
table->record[0][0]=table->record[2][0]; // Fix delete marker
table->next_number_field=table->found_next_number_field;
thd->count_cuted_fields=1; /* calc cuted fields */
thd->cuted_fields=0;
if (info.handle_duplicates != DUP_REPLACE)
form->file->extra(HA_EXTRA_WRITE_CACHE);
table->file->extra(HA_EXTRA_WRITE_CACHE);
table->file->deactivate_non_unique_index((ha_rows) 0);
DBUG_RETURN(0);
}
......@@ -1213,14 +1213,16 @@ bool select_insert::send_data(List<Item> &values)
void select_insert::send_error(uint errcode,const char *err)
{
::send_error(&thd->net,errcode,err);
VOID(table->file->extra(HA_EXTRA_NO_CACHE));
table->file->extra(HA_EXTRA_NO_CACHE);
table->file->activate_all_index(thd);
}
bool select_insert::send_eof()
{
int error;
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)) ||
(error=table->file->activate_all_index(thd)))
{
table->file->print_error(error,MYF(0));
::send_error(&thd->net);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment