Commit a7e12956 authored by unknown's avatar unknown

After review fixes

parent a6feabdb
......@@ -4212,7 +4212,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_table_info(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY |
HA_CAN_SCAN_UPDATED_INDEX |
HA_AUTO_PART_KEY |
HA_NO_PREFIX_CHAR_KEYS |
HA_NEED_READ_RANGE_BUFFER |
......
......@@ -87,7 +87,6 @@
#define HA_NO_VARCHAR (1 << 27)
#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_CAN_SCAN_UPDATED_INDEX (1 << 31)
/* bits in index_flags(index_number) for what you can do with index */
......@@ -568,20 +567,80 @@ class handler :public Sql_alloc
{ return HA_ERR_WRONG_COMMAND; }
virtual int delete_row(const byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual bool start_bulk_update() { return FALSE; }
virtual bool start_bulk_delete() { return FALSE; }
/*
SYNOPSIS
start_bulk_update()
RETURN
0 Bulk update used by handler
1 Bulk update not used, normal operation used
*/
virtual bool start_bulk_update() { return 1; }
/*
SYNOPSIS
start_bulk_delete()
RETURN
0 Bulk delete used by handler
1 Bulk delete not used, normal operation used
*/
virtual bool start_bulk_delete() { return 1; }
/*
SYNOPSIS
This method is similar to update_row, however the handler doesn't need
to execute the updates at this point in time. The handler can be certain
that another call to bulk_update_row will occur OR a call to
exec_bulk_update before the set of updates in this query is concluded.
bulk_update_row()
old_data Old record
new_data New record
dup_key_found Number of duplicate keys found
RETURN
0 Bulk delete used by handler
1 Bulk delete not used, normal operation used
*/
virtual int bulk_update_row(const byte *old_data, byte *new_data,
uint *dup_key_found)
{
DBUG_ASSERT(FALSE);
return HA_ERR_WRONG_COMMAND;
}
/*
SYNOPSIS
After this call all outstanding updates must be performed. The number
of duplicate key errors are reported in the duplicate key parameter.
It is allowed to continue to the batched update after this call, the
handler has to wait until end_bulk_update with changing state.
exec_bulk_update()
dup_key_found Number of duplicate keys found
RETURN
0 Success
>0 Error code
*/
virtual int exec_bulk_update(uint *dup_key_found)
{
DBUG_ASSERT(FALSE);
return HA_ERR_WRONG_COMMAND;
}
/*
SYNOPSIS
Perform any needed clean-up, no outstanding updates are there at the
moment.
end_bulk_update()
RETURN
Nothing
*/
virtual void end_bulk_update() { return; }
/*
SYNOPSIS
Execute all outstanding deletes and close down the bulk delete.
end_bulk_delete()
RETURN
0 Success
>0 Error code
*/
virtual int end_bulk_delete()
{
DBUG_ASSERT(FALSE);
......
......@@ -172,7 +172,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
deleted=0L;
init_ftfuncs(thd, select_lex, 1);
thd->proc_info="updating";
will_batch= table->file->start_bulk_delete();
will_batch= !table->file->start_bulk_delete();
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
{
......@@ -184,7 +184,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_BEFORE);
if (!(error=table->file->delete_row(table->record[0])))
if (!(error= table->file->delete_row(table->record[0])))
{
deleted++;
if (!--limit && using_limit)
......@@ -223,7 +223,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->file->print_error(loc_error,MYF(0));
error=1;
}
thd->proc_info="end";
thd->proc_info= "end";
end_read_record(&info);
free_io_cache(table); // Will not do any harm
if (options & OPTION_QUICK)
......@@ -623,17 +623,17 @@ int multi_delete::do_deletes(bool from_send_error)
been deleted by foreign key handling
*/
info.ignore_not_found_rows= 1;
will_batch= table->file->start_bulk_delete();
will_batch= !table->file->start_bulk_delete();
while (!(local_error=info.read_record(&info)) && !thd->killed)
{
if ((local_error=table->file->delete_row(table->record[0])))
if ((local_error= table->file->delete_row(table->record[0])))
{
table->file->print_error(local_error,MYF(0));
break;
}
deleted++;
}
if (will_batch && (error=table->file->end_bulk_delete()))
if (will_batch && (error= table->file->end_bulk_delete()))
{
if (!local_error)
{
......
......@@ -262,8 +262,7 @@ int mysql_update(THD *thd,
else
used_key_is_modified=0;
if ((used_key_is_modified &&
!(table->file->table_flags() & HA_CAN_SCAN_UPDATED_INDEX)) || order)
if (used_key_is_modified || order)
{
/*
We can't update table directly; We must first search after all
......@@ -393,7 +392,7 @@ int mysql_update(THD *thd,
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
will_batch= table->file->start_bulk_update();
will_batch= !table->file->start_bulk_update();
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
......@@ -450,17 +449,17 @@ int mysql_update(THD *thd,
call then it should be included in the count of dup_key_found
and error should be set to 0 (only if these errors are ignored).
*/
error=table->file->bulk_update_row(table->record[0],
table->record[1],
&dup_key_found);
error= table->file->bulk_update_row(table->record[0],
table->record[1],
&dup_key_found);
limit+= dup_key_found;
updated-=dup_key_found;
updated-= dup_key_found;
}
else
{
/* Non-batched update */
error=table->file->update_row((byte*) table->record[1],
(byte*) table->record[0]);
error= table->file->update_row((byte*) table->record[1],
(byte*) table->record[0]);
}
if (!error)
{
......@@ -501,7 +500,7 @@ int mysql_update(THD *thd,
are ignored. This is a requirement on batching handlers.
*/
table->file->print_error(error,MYF(0));
error=1;
error= 1;
break;
}
/*
......@@ -509,8 +508,8 @@ int mysql_update(THD *thd,
were duplicate keys found. In both cases we need to correct
the counters and continue the loop.
*/
limit=dup_key_found; //limit is 0 when we get here so need to +
updated-=dup_key_found;
limit= dup_key_found; //limit is 0 when we get here so need to +
updated-= dup_key_found;
}
else
{
......@@ -523,7 +522,7 @@ int mysql_update(THD *thd,
table->file->unlock_row();
thd->row_count++;
}
dup_key_found=0;
dup_key_found= 0;
if (thd->killed && !error)
error= 1; // Aborted
else if (will_batch &&
......@@ -539,16 +538,16 @@ int mysql_update(THD *thd,
{
thd->fatal_error();
table->file->print_error(loc_error,MYF(0));
error=1;
error= 1;
}
else
updated-=dup_key_found;
updated-= dup_key_found;
if (will_batch)
table->file->end_bulk_update();
end_read_record(&info);
free_io_cache(table); // If ORDER BY
delete select;
thd->proc_info="end";
thd->proc_info= "end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment