Commit e3c8798e authored by Michael Widenius's avatar Michael Widenius

When scanning pages, stop when you are at 'data_file_length'.

This fixes a bug that gave ER_FILE_TOO_SHORT error when scanning Aria tables.

storage/maria/ma_blockrec.c:
  Changed code so that we update share->state.state.data_file_length after page cache write is done.
  When scanning pages, stop when you are at 'data_file_length'.
  (We can't trust the bitmap as there may be reserved pages in the bitmap that are not yet written).
  This fixes a bug that gave ER_FILE_TOO_SHORT error when scanning Aria tables.
storage/maria/maria_def.h:
  Updated struct for stopping scanning at end of file
parent 3bac9cf7
......@@ -1993,19 +1993,6 @@ static my_bool write_tail(MARIA_HA *info,
/* Keep BLOCKUSED_USE_ORG_BITMAP */
block->used|= BLOCKUSED_USED | BLOCKUSED_TAIL;
/* Increase data file size, if extended */
position= (my_off_t) block->page * block_size;
if (share->state.state.data_file_length <= position)
{
/*
We are modifying a state member before writing the UNDO; this is a WAL
violation. But for data_file_length this is ok, as long as we change
data_file_length after writing any log record (FILE_ID/REDO/UNDO) (see
collect_tables()).
*/
_ma_set_share_data_file_length(share, position + block_size);
}
if (block_is_read)
{
/* Current page link is last element in pinned_pages */
......@@ -2021,17 +2008,33 @@ static my_bool write_tail(MARIA_HA *info,
page_link->unlock= PAGECACHE_LOCK_READ_UNLOCK;
res= 0;
}
else if (!(res= pagecache_write(share->pagecache,
&info->dfile, block->page, 0,
row_pos.buff,share->page_type,
PAGECACHE_LOCK_READ,
PAGECACHE_PIN,
PAGECACHE_WRITE_DELAY, &page_link.link,
LSN_IMPOSSIBLE)))
else
{
page_link.unlock= PAGECACHE_LOCK_READ_UNLOCK;
page_link.changed= 1;
push_dynamic(&info->pinned_pages, (void*) &page_link);
if (!(res= pagecache_write(share->pagecache,
&info->dfile, block->page, 0,
row_pos.buff,share->page_type,
PAGECACHE_LOCK_READ,
PAGECACHE_PIN,
PAGECACHE_WRITE_DELAY, &page_link.link,
LSN_IMPOSSIBLE)))
{
page_link.unlock= PAGECACHE_LOCK_READ_UNLOCK;
page_link.changed= 1;
push_dynamic(&info->pinned_pages, (void*) &page_link);
}
/* Increase data file size, if extended */
position= (my_off_t) block->page * block_size;
if (share->state.state.data_file_length <= position)
{
/*
We are modifying a state member before writing the UNDO; this is a WAL
violation. But for data_file_length this is ok, as long as we change
data_file_length after writing any log record (FILE_ID/REDO/UNDO) (see
collect_tables()).
*/
_ma_set_share_data_file_length(share, position + block_size);
}
}
DBUG_RETURN(res);
}
......@@ -2068,7 +2071,7 @@ static my_bool write_full_pages(MARIA_HA *info,
uint data_size= FULL_PAGE_SIZE(block_size);
uchar *buff= info->keyread_buff;
uint page_count, sub_blocks;
my_off_t position;
my_off_t position, max_position;
DBUG_ENTER("write_full_pages");
DBUG_PRINT("enter", ("length: %lu page: %lu page_count: %lu",
(ulong) length, (ulong) block->page,
......@@ -2080,9 +2083,7 @@ static my_bool write_full_pages(MARIA_HA *info,
page_count= block->page_count;
sub_blocks= block->sub_blocks;
position= (my_off_t) (page + page_count) * block_size;
if (share->state.state.data_file_length < position)
_ma_set_share_data_file_length(share, position);
max_position= (my_off_t) (page + page_count) * block_size;
/* Increase data file size, if extended */
......@@ -2105,8 +2106,7 @@ static my_bool write_full_pages(MARIA_HA *info,
(ulong) block->page, (ulong) block->page_count));
position= (page + page_count + 1) * block_size;
if (share->state.state.data_file_length < position)
_ma_set_share_data_file_length(share, position);
set_if_bigger(max_position, position);
}
lsn_store(buff, lsn);
buff[PAGE_TYPE_OFFSET]= (uchar) BLOB_PAGE;
......@@ -2134,6 +2134,8 @@ static my_bool write_full_pages(MARIA_HA *info,
page++;
DBUG_ASSERT(block->used & BLOCKUSED_USED);
}
if (share->state.state.data_file_length < max_position)
_ma_set_share_data_file_length(share, max_position);
DBUG_RETURN(0);
}
......@@ -3121,11 +3123,6 @@ static my_bool write_block_record(MARIA_HA *info,
}
#endif
/* Increase data file size, if extended */
position= (my_off_t) head_block->page * block_size;
if (share->state.state.data_file_length <= position)
_ma_set_share_data_file_length(share, position + block_size);
if (head_block_is_read)
{
MARIA_PINNED_PAGE *page_link;
......@@ -3154,6 +3151,11 @@ static my_bool write_block_record(MARIA_HA *info,
page_link.unlock= PAGECACHE_LOCK_READ_UNLOCK;
page_link.changed= 1;
push_dynamic(&info->pinned_pages, (void*) &page_link);
/* Increase data file size, if extended */
position= (my_off_t) head_block->page * block_size;
if (share->state.state.data_file_length <= position)
_ma_set_share_data_file_length(share, position + block_size);
}
if (share->now_transactional && (tmp_data_used || blob_full_pages_exists))
......@@ -5162,6 +5164,7 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info)
info->scan.number_of_rows= 0;
info->scan.bitmap_pos= info->scan.bitmap_end;
info->scan.bitmap_page= (pgcache_page_no_t) 0 - share->bitmap.pages_covered;
info->scan.max_page= share->state.state.data_file_length / share->block_size;
/*
We need to flush what's in memory (bitmap.map) to page cache otherwise, as
we are going to read bitmaps from page cache in table scan (see
......@@ -5363,6 +5366,11 @@ int _ma_scan_block_record(MARIA_HA *info, uchar *record,
page= (info->scan.bitmap_page + 1 +
(data - info->scan.bitmap_buff) / 6 * 16 + bit_pos - 1);
info->scan.row_base_page= ma_recordpos(page, 0);
if (page >= info->scan.max_page)
{
DBUG_PRINT("info", ("Found end of file"));
DBUG_RETURN((my_errno= HA_ERR_END_OF_FILE));
}
if (!(pagecache_read(share->pagecache,
&info->dfile,
page, 0, info->scan.page_buff,
......
......@@ -476,7 +476,7 @@ typedef struct st_maria_block_scan
{
uchar *bitmap_buff, *bitmap_pos, *bitmap_end, *page_buff;
uchar *dir, *dir_end;
pgcache_page_no_t bitmap_page;
pgcache_page_no_t bitmap_page, max_page;
ulonglong bits;
uint number_of_rows, bit_pos;
MARIA_RECORD_POS row_base_page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment