Commit b76f5cd2 authored by unknown's avatar unknown

true,false -> TRUE, FALSE

Simple fixes/optimization of things discovered during review of new pushed code


include/my_sys.h:
  Ensure that clear_alloc_root() interacts correctly with alloc_root_inited()
mysys/hash.c:
  More comments
  Simple optimization (merge identical code)
mysys/my_bitmap.c:
  Change inline -> static inline
sql/examples/ha_archive.cc:
  Fixed compiler warning
sql/ha_ndbcluster.cc:
  true,false -> TRUE, FALSE
  Change if (false) -> #ifdef NOT_USED
sql/ha_ndbcluster.h:
  true,false -> TRUE, FALSE
sql/handler.cc:
  More comments
  Remove not needed initializations.
  #ifdef not used code
sql/item_cmpfunc.h:
  true,false -> TRUE, FALSE
sql/item_strfunc.cc:
  Move local variables to function beginning
  Remove wrong comments
sql/log_event.h:
  true,false -> TRUE, FALSE
sql/sql_base.cc:
  true,false -> TRUE, FALSE
  More comments
sql/sql_help.cc:
  true,false -> TRUE, FALSE
sql/sql_lex.cc:
  Simple optimization of new code
sql/sql_parse.cc:
  true,false -> TRUE, FALSE
sql/sql_prepare.cc:
  true,false -> TRUE, FALSE
sql/sql_table.cc:
  true,false -> TRUE, FALSE
sql/sql_yacc.yy:
  true,false -> TRUE, FALSE
parent bffccdf4
......@@ -726,7 +726,7 @@ extern void my_free_lock(byte *ptr,myf flags);
#endif
#define alloc_root_inited(A) ((A)->min_malloc != 0)
#define ALLOC_ROOT_MIN_BLOCK_SIZE (MALLOC_OVERHEAD + sizeof(USED_MEM) + 8)
#define clear_alloc_root(A) do { (A)->free= (A)->used= (A)->pre_alloc= 0; } while(0)
#define clear_alloc_root(A) do { (A)->free= (A)->used= (A)->pre_alloc= 0; (A)->min_malloc=0;} while(0)
extern void init_alloc_root(MEM_ROOT *mem_root, uint block_size,
uint pre_alloc_size);
extern gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size);
......
......@@ -72,19 +72,48 @@ _hash_init(HASH *hash,CHARSET_INFO *charset,
}
void hash_free(HASH *hash)
/*
Call hash->free on all elements in hash.
SYNOPSIS
hash_free_elements()
hash hash table
NOTES:
Sets records to 0
*/
static void inline hash_free_elements(HASH *hash)
{
DBUG_ENTER("hash_free");
if (hash->free)
{
uint i,records;
HASH_LINK *data=dynamic_element(&hash->array,0,HASH_LINK*);
for (i=0,records=hash->records ; i < records ; i++)
(*hash->free)(data[i].data);
hash->free=0;
HASH_LINK *end= data + hash->records;
while (data < end)
(*hash->free)((data++)->data);
}
delete_dynamic(&hash->array);
hash->records=0;
}
/*
Free memory used by hash.
SYNOPSIS
hash_free()
hash the hash to delete elements of
NOTES: Hash can't be reused wuthing calling hash_init again.
*/
void hash_free(HASH *hash)
{
DBUG_ENTER("hash_free");
DBUG_PRINT("enter",("hash: 0x%lxd",hash));
hash_free_elements(hash);
hash->free= 0;
delete_dynamic(&hash->array);
DBUG_VOID_RETURN;
}
......@@ -100,15 +129,11 @@ void hash_free(HASH *hash)
void hash_reset(HASH *hash)
{
DBUG_ENTER("hash_reset");
if (hash->free)
{
HASH_LINK *link= dynamic_element(&hash->array, 0, HASH_LINK*);
HASH_LINK *end= link + hash->records;
for (; link < end; ++link)
(*hash->free)(link->data);
}
DBUG_PRINT("enter",("hash: 0x%lxd",hash));
hash_free_elements(hash);
reset_dynamic(&hash->array);
hash->records= 0;
/* Set row pointers so that the hash can be reused at once */
hash->blength= 1;
hash->current_record= NO_RECORD;
DBUG_VOID_RETURN;
......
......@@ -38,7 +38,7 @@
#include <m_string.h>
inline void bitmap_lock(MY_BITMAP *map)
static inline void bitmap_lock(MY_BITMAP *map)
{
#ifdef THREAD
if (map->mutex)
......@@ -47,7 +47,7 @@ inline void bitmap_lock(MY_BITMAP *map)
}
inline void bitmap_unlock(MY_BITMAP *map)
static inline void bitmap_unlock(MY_BITMAP *map)
{
#ifdef THREAD
if (map->mutex)
......
......@@ -103,14 +103,15 @@
rows - This is an unsigned long long which is the number of rows in the data
file.
check point - Reserved for future use
dirty - Status of the file, whether or not its values are the latest. This flag
is what causes a repair to occur
dirty - Status of the file, whether or not its values are the latest. This
flag is what causes a repair to occur
The data file:
check - Just an int of 254 to make sure that the the file we are opening was
never corrupted.
version - The current version of the file format.
data - The data is stored in a "row +blobs" format.
*/
/* Variables for archive share methods */
pthread_mutex_t archive_mutex;
......
......@@ -65,7 +65,7 @@ typedef NdbDictionary::Table NDBTAB;
typedef NdbDictionary::Index NDBINDEX;
typedef NdbDictionary::Dictionary NDBDICT;
bool ndbcluster_inited= false;
bool ndbcluster_inited= FALSE;
static Ndb* g_ndb= NULL;
static Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
......@@ -146,8 +146,10 @@ inline
int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans)
{
int m_batch_execute= 0;
if (false && m_batch_execute)
#ifdef NOT_USED
if (m_batch_execute)
return 0;
#endif
return trans->execute(NoCommit,AbortOnError,1);
}
......@@ -155,8 +157,10 @@ inline
int execute_commit(ha_ndbcluster *h, NdbConnection *trans)
{
int m_batch_execute= 0;
if (false && m_batch_execute)
#ifdef NOT_USED
if (m_batch_execute)
return 0;
#endif
return trans->execute(Commit,AbortOnError,1);
}
......@@ -164,8 +168,10 @@ inline
int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans)
{
int m_batch_execute= 0;
if (false && m_batch_execute)
#ifdef NOT_USED
if (m_batch_execute)
return 0;
#endif
return trans->execute(NoCommit,IgnoreError,1);
}
......@@ -326,7 +332,7 @@ bool ha_ndbcluster::get_error_message(int error,
Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
if (!ndb)
DBUG_RETURN(false);
DBUG_RETURN(FALSE);
const NdbError err= ndb->getNdbError(error);
bool temporary= err.status==NdbError::TemporaryError;
......@@ -367,12 +373,12 @@ static inline bool ndb_supported_type(enum_field_types type)
case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
return true;
return TRUE;
case MYSQL_TYPE_NULL:
case MYSQL_TYPE_GEOMETRY:
break;
}
return false;
return FALSE;
}
......@@ -466,7 +472,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value)
*set_blob_value= true;
*set_blob_value= TRUE;
// No callback needed to write value
DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0);
}
......@@ -609,24 +615,24 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
bool ha_ndbcluster::uses_blob_value(bool all_fields)
{
if (table->blob_fields == 0)
return false;
return FALSE;
if (all_fields)
return true;
return TRUE;
{
uint no_fields= table->fields;
int i;
THD *thd= current_thd;
THD *thd= table->in_use;
// They always put blobs at the end..
for (i= no_fields - 1; i >= 0; i--)
{
Field *field= table->field[i];
if (thd->query_id == field->query_id)
{
return true;
return TRUE;
}
}
}
return false;
return FALSE;
}
......@@ -645,7 +651,7 @@ int ha_ndbcluster::get_metadata(const char *path)
NDBDICT *dict= m_ndb->getDictionary();
const NDBTAB *tab;
int error;
bool invalidating_ndb_table= false;
bool invalidating_ndb_table= FALSE;
DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
......@@ -676,7 +682,7 @@ int ha_ndbcluster::get_metadata(const char *path)
{
DBUG_PRINT("info", ("Invalidating table"));
dict->invalidateTable(m_tabname);
invalidating_ndb_table= true;
invalidating_ndb_table= TRUE;
}
else
{
......@@ -687,12 +693,12 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
error= 3;
invalidating_ndb_table= false;
invalidating_ndb_table= FALSE;
}
}
else
{
invalidating_ndb_table= false;
invalidating_ndb_table= FALSE;
}
my_free((char*)data, MYF(0));
my_free((char*)pack_data, MYF(0));
......@@ -755,7 +761,7 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
error= create_ordered_index(index_name, key_info);
break;
default:
DBUG_ASSERT(false);
DBUG_ASSERT(FALSE);
break;
}
if (error)
......@@ -1172,7 +1178,7 @@ inline int ha_ndbcluster::next_result(byte *buf)
if (execute_no_commit(this,trans) != 0)
DBUG_RETURN(ndb_err(trans));
ops_pending= 0;
blobs_pending= false;
blobs_pending= FALSE;
}
check= cursor->nextResult(contact_ndb);
if (check == 0)
......@@ -1585,7 +1591,7 @@ int ha_ndbcluster::write_row(byte *record)
if (has_auto_increment)
{
skip_auto_increment= false;
skip_auto_increment= FALSE;
update_auto_increment();
skip_auto_increment= !auto_increment_column_changed;
}
......@@ -1595,14 +1601,14 @@ int ha_ndbcluster::write_row(byte *record)
}
// Set non-key attribute(s)
bool set_blob_value= false;
bool set_blob_value= FALSE;
for (i= 0; i < table->fields; i++)
{
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
set_ndb_value(op, field, i, &set_blob_value))
{
skip_auto_increment= true;
skip_auto_increment= TRUE;
ERR_RETURN(op->getNdbError());
}
}
......@@ -1616,7 +1622,7 @@ int ha_ndbcluster::write_row(byte *record)
*/
rows_inserted++;
no_uncommitted_rows_update(1);
bulk_insert_not_flushed= true;
bulk_insert_not_flushed= TRUE;
if ((rows_to_insert == 1) ||
((rows_inserted % bulk_insert_rows) == 0) ||
set_blob_value)
......@@ -1627,12 +1633,12 @@ int ha_ndbcluster::write_row(byte *record)
"rows_inserted:%d, bulk_insert_rows: %d",
(int)rows_inserted, (int)bulk_insert_rows));
bulk_insert_not_flushed= false;
bulk_insert_not_flushed= FALSE;
if (thd->transaction.on)
{
if (execute_no_commit(this,trans) != 0)
{
skip_auto_increment= true;
skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
......@@ -1641,7 +1647,7 @@ int ha_ndbcluster::write_row(byte *record)
{
if (execute_commit(this,trans) != 0)
{
skip_auto_increment= true;
skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
......@@ -1655,11 +1661,11 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_PRINT("info",
("Trying to set next auto increment value to %lu",
(ulong) next_val));
if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, true))
if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE))
DBUG_PRINT("info",
("Setting next auto increment value to %u", next_val));
}
skip_auto_increment= true;
skip_auto_increment= TRUE;
DBUG_RETURN(0);
}
......@@ -1763,8 +1769,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (!(op= cursor->updateTuple()))
ERR_RETURN(trans->getNdbError());
ops_pending++;
if (uses_blob_value(false))
blobs_pending= true;
if (uses_blob_value(FALSE))
blobs_pending= TRUE;
}
else
{
......@@ -1920,7 +1926,7 @@ void ha_ndbcluster::unpack_record(byte* buf)
else
{
NdbBlob* ndb_blob= (*value).blob;
bool isNull= true;
bool isNull= TRUE;
int ret= ndb_blob->getNull(isNull);
DBUG_ASSERT(ret == 0);
if (isNull)
......@@ -1988,7 +1994,7 @@ void ha_ndbcluster::print_results()
else
{
ndb_blob= value.blob;
bool isNull= true;
bool isNull= TRUE;
ndb_blob->getNull(isNull);
if (isNull) {
fprintf(DBUG_FILE, "NULL\n");
......@@ -2165,7 +2171,7 @@ int ha_ndbcluster::index_read(byte *buf,
break;
default:
case UNDEFINED_INDEX:
DBUG_ASSERT(false);
DBUG_ASSERT(FALSE);
return 1;
break;
}
......@@ -2177,7 +2183,7 @@ int ha_ndbcluster::index_read(byte *buf,
start_key.key = key;
start_key.length = key_len;
start_key.flag = find_flag;
error= ordered_index_scan(&start_key, 0, true, buf);
error= ordered_index_scan(&start_key, 0, TRUE, buf);
DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error);
}
......@@ -2219,7 +2225,7 @@ int ha_ndbcluster::index_first(byte *buf)
// Start the ordered index scan and fetch the first row
// Only HA_READ_ORDER indexes get called by index_first
DBUG_RETURN(ordered_index_scan(0, 0, true, buf));
DBUG_RETURN(ordered_index_scan(0, 0, TRUE, buf));
}
......@@ -2228,9 +2234,9 @@ int ha_ndbcluster::index_last(byte *buf)
DBUG_ENTER("index_last");
statistic_increment(ha_read_last_count,&LOCK_status);
int res;
if((res= ordered_index_scan(0, 0, true, buf)) == 0){
if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){
NdbResultSet *cursor= m_active_cursor;
while((res= cursor->nextResult(true)) == 0);
while((res= cursor->nextResult(TRUE)) == 0);
if(res == 1){
unpack_record(buf);
table->status= 0;
......@@ -2584,8 +2590,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= false;
m_ignore_dup_key_not_supported= false;
m_use_write= FALSE;
m_ignore_dup_key_not_supported= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
......@@ -2671,7 +2677,7 @@ int ha_ndbcluster::end_bulk_insert()
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
rows_inserted, bulk_insert_rows));
bulk_insert_not_flushed= false;
bulk_insert_not_flushed= FALSE;
if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans);
......@@ -3210,7 +3216,7 @@ static int create_ndb_column(NDBCOL &col,
col.setAutoIncrementInitialValue(value);
}
else
col.setAutoIncrement(false);
col.setAutoIncrement(FALSE);
return 0;
}
......@@ -3280,7 +3286,7 @@ int ha_ndbcluster::create(const char *name,
col.setName("$PK");
col.setType(NdbDictionary::Column::Bigunsigned);
col.setLength(1);
col.setNullable(false);
col.setNullable(FALSE);
col.setPrimaryKey(TRUE);
col.setAutoIncrement(TRUE);
tab.addColumn(col);
......@@ -3315,7 +3321,7 @@ int ha_ndbcluster::create_ordered_index(const char *name,
KEY *key_info)
{
DBUG_ENTER("create_ordered_index");
DBUG_RETURN(create_index(name, key_info, false));
DBUG_RETURN(create_index(name, key_info, FALSE));
}
int ha_ndbcluster::create_unique_index(const char *name,
......@@ -3323,7 +3329,7 @@ int ha_ndbcluster::create_unique_index(const char *name,
{
DBUG_ENTER("create_unique_index");
DBUG_RETURN(create_index(name, key_info, true));
DBUG_RETURN(create_index(name, key_info, TRUE));
}
......@@ -3349,7 +3355,7 @@ int ha_ndbcluster::create_index(const char *name,
{
ndb_index.setType(NdbDictionary::Index::OrderedIndex);
// TODO Only temporary ordered indexes supported
ndb_index.setLogging(false);
ndb_index.setLogging(FALSE);
}
ndb_index.setTable(m_tabname);
......@@ -3512,15 +3518,15 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_AUTO_PART_KEY |
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(false),
m_ignore_dup_key_not_supported(false),
m_use_write(FALSE),
m_ignore_dup_key_not_supported(FALSE),
retrieve_all_fields(FALSE),
rows_to_insert(1),
rows_inserted(0),
bulk_insert_rows(1024),
bulk_insert_not_flushed(false),
bulk_insert_not_flushed(FALSE),
ops_pending(0),
skip_auto_increment(true),
skip_auto_increment(TRUE),
blobs_pending(0),
blobs_buffer(0),
blobs_buffer_size(0),
......@@ -3931,9 +3937,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
table_list.db= (char*) db;
table_list.real_name=(char*)file_name;
(void)mysql_rm_table_part2(thd, &table_list,
/* if_exists */ true,
/* drop_temporary */ false,
/* dont_log_query*/ true);
/* if_exists */ TRUE,
/* drop_temporary */ FALSE,
/* dont_log_query*/ TRUE);
}
}
......@@ -3942,7 +3948,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
while ((file_name=it2++))
{
DBUG_PRINT("info", ("Table %s need discovery", name));
if (ha_create_table_from_engine(thd, db, file_name, true) == 0)
if (ha_create_table_from_engine(thd, db, file_name, TRUE) == 0)
files->push_back(thd->strdup(file_name));
}
......@@ -4009,7 +4015,7 @@ bool ndbcluster_init()
if (ndb_discover_tables() != 0)
DBUG_RETURN(TRUE);
#endif
DBUG_RETURN(false);
DBUG_RETURN(FALSE);
}
......@@ -4367,7 +4373,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
Uint64 sum_rows= 0;
Uint64 sum_commits= 0;
while((check= rs->nextResult(true)) == 0)
while((check= rs->nextResult(TRUE)) == 0)
{
sum_rows+= rows;
sum_commits+= commits;
......
......@@ -139,12 +139,12 @@ class ha_ndbcluster: public handler
bool low_byte_first() const
{
#ifdef WORDS_BIGENDIAN
return false;
return FALSE;
#else
return true;
return TRUE;
#endif
}
bool has_transactions() { return true; }
bool has_transactions() { return TRUE; }
const char* index_type(uint key_number) {
switch (get_index_type(key_number)) {
......
......@@ -1105,6 +1105,11 @@ void handler::print_error(int error, myf errflag)
break;
case HA_ERR_NO_SUCH_TABLE:
{
/*
We have to use path to find database name instead of using
table->table_cache_key because if the table didn't exist, then
table_cache_key was not set up
*/
char *db;
char buff[FN_REFLEN];
uint length=dirname_part(buff,table->path);
......@@ -1276,22 +1281,25 @@ int ha_create_table_from_engine(THD* thd,
const char *name,
bool create_if_found)
{
int error= 0;
const void* frmblob = NULL;
uint frmlen = 0;
int error;
const void *frmblob;
uint frmlen;
char path[FN_REFLEN];
HA_CREATE_INFO create_info;
TABLE table;
DBUG_ENTER("ha_create_table_from_engine");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
DBUG_PRINT("enter", ("create_if_found: %d", create_if_found));
DBUG_PRINT("enter", ("name '%s'.'%s' create_if_found: %d",
db, name, create_if_found));
bzero((char*) &create_info,sizeof(create_info));
if ((error= ha_discover(thd, db, name, &frmblob, &frmlen)))
DBUG_RETURN(error);
/*
Table exists in handler
frmblob and frmlen are set
*/
// Table exists in handler
if (create_if_found)
{
(void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS);
......@@ -1309,9 +1317,7 @@ int ha_create_table_from_engine(THD* thd,
!(table.file->table_flags() & HA_FILE_BASED))
{
/* Ensure that handler gets name in lower case */
strmov(path, name);
my_casedn_str(files_charset_info, path);
name= path;
}
error=table.file->create(path,&table,&create_info);
......@@ -1319,8 +1325,7 @@ int ha_create_table_from_engine(THD* thd,
}
err_end:
if (frmblob)
my_free((char*) frmblob,MYF(0));
my_free((char*) frmblob, MYF(MY_ALLOW_ZERO));
DBUG_RETURN(error);
}
......@@ -1429,10 +1434,14 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
/*
Try to discover one table from handler(s)
RETURN
0 ok. In this case *frmblob and *frmlen are set
1 error. frmblob and frmlen may not be set
*/
int ha_discover(THD* thd, const char* db, const char* name,
const void** frmblob, uint* frmlen)
int ha_discover(THD *thd, const char *db, const char *name,
const void **frmblob, uint *frmlen)
{
int error= 1; // Table does not exist in any handler
DBUG_ENTER("ha_discover");
......@@ -1470,6 +1479,8 @@ ha_find_files(THD *thd,const char *db,const char *path,
}
#ifdef NOT_YET_USED
/*
Ask handler if the table exists in engine
......@@ -1491,6 +1502,7 @@ int ha_table_exists(THD* thd, const char* db, const char* name)
DBUG_RETURN(error);
}
#endif
/*
......
......@@ -855,7 +855,7 @@ class Item_func_like :public Item_bool_func2
char escape;
Item_func_like(Item *a,Item *b, Item *escape_arg)
:Item_bool_func2(a,b), canDoTurboBM(false), pattern(0), pattern_len(0),
:Item_bool_func2(a,b), canDoTurboBM(FALSE), pattern(0), pattern_len(0),
bmGs(0), bmBc(0), escape_item(escape_arg) {}
longlong val_int();
enum Functype functype() const { return LIKE_FUNC; }
......
......@@ -2709,41 +2709,40 @@ longlong Item_func_crc32::val_int()
String *Item_func_compress::val_str(String *str)
{
int err= Z_OK, code;
ulong new_size;
String *res;
Byte *body;
char *tmp, *last_char;
DBUG_ASSERT(fixed == 1);
String *res= args[0]->val_str(str);
if (!res)
if (!(res= args[0]->val_str(str)))
{
null_value= 1;
return 0;
}
if (res->is_empty()) return res;
int err= Z_OK;
int code;
/*
citation from zlib.h (comment for compress function):
Citation from zlib.h (comment for compress function):
Compresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total
size of the destination buffer, which must be at least 0.1% larger than
sourceLen plus 12 bytes.
Proportion 120/100 founded by Sinisa with help of procedure
compress(compress(compress(...)))
I.e. zlib give number 'at least'..
We assume here that the buffer can't grow more than .25 %.
*/
ulong new_size= res->length() + res->length() / 5 + 12;
new_size= res->length() + res->length() / 5 + 12;
// Will check new_size overflow: new_size <= res->length()
if (((uint32) new_size <= res->length()) ||
// Check new_size overflow: new_size <= res->length()
if (((uint32) (new_size+5) <= res->length()) ||
buffer.realloc((uint32) new_size + 4 + 1))
{
null_value= 1;
return 0;
}
Byte *body= ((Byte*)buffer.ptr()) + 4;
body= ((Byte*)buffer.ptr()) + 4;
// As far as we have checked res->is_empty() we can use ptr()
if ((err= compress(body, &new_size,
......@@ -2755,11 +2754,11 @@ String *Item_func_compress::val_str(String *str)
return 0;
}
char *tmp= (char*)buffer.ptr(); // int4store is a macro; avoid side effects
tmp= (char*)buffer.ptr(); // int4store is a macro; avoid side effects
int4store(tmp, res->length() & 0x3FFFFFFF);
/* This is for the stupid char fields which trim ' ': */
char *last_char= ((char*)body)+new_size-1;
/* This is to ensure that things works for CHAR fields, which trim ' ': */
last_char= ((char*)body)+new_size-1;
if (*last_char == ' ')
{
*++last_char= '.';
......
......@@ -571,7 +571,7 @@ class Load_log_event: public Log_event
{
fname= afname;
fname_len= alen;
local_fname= true;
local_fname= TRUE;
}
/* fname doesn't point to memory inside Log_event::temp_buf */
int check_fname_outside_temp_buf()
......
......@@ -1368,7 +1368,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
*/
if (discover_retry_count++ != 0)
goto err;
if (ha_create_table_from_engine(thd, db, name, true) != 0)
if (ha_create_table_from_engine(thd, db, name, TRUE) != 0)
goto err;
thd->clear_error(); // Clear error message
......@@ -2846,8 +2846,15 @@ void flush_tables()
/*
** Mark all entries with the table as deleted to force an reopen of the table
** Returns true if the table is in use by another thread
Mark all entries with the table as deleted to force an reopen of the table
The table will be closed (not stored in cache) by the current thread when
close_thread_tables() is called.
RETURN
0 This thread now have exclusive access to this table and no other thread
can access the table until close_thread_tables() is called.
1 Table is in use by another thread
*/
bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
......
......@@ -746,7 +746,7 @@ int mysqld_help(THD *thd, const char *mask)
select,&subcategories_list);
delete select;
String *cat= categories_list.head();
if (send_header_2(protocol, true) ||
if (send_header_2(protocol, TRUE) ||
send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) ||
send_variant_2_list(mem_root,protocol,&subcategories_list,"Y",cat))
goto end;
......
......@@ -454,7 +454,6 @@ inline static uint int_token(const char *str,uint length)
int yylex(void *arg, void *yythd)
{
reg1 uchar c;
bool space_ignored;
int tokval, result_state;
uint length;
enum my_lex_states state;
......@@ -537,6 +536,7 @@ int yylex(void *arg, void *yythd)
/* Fall through */
case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling
case MY_LEX_IDENT:
uchar *start;
#if defined(USE_MB) && defined(USE_MB_IDENT)
if (use_mb(cs))
{
......@@ -573,12 +573,16 @@ int yylex(void *arg, void *yythd)
result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT;
}
length= (uint) (lex->ptr - lex->tok_start)-1;
space_ignored= FALSE;
start= lex->ptr;
if (lex->ignore_space)
{
for (; state_map[c] == MY_LEX_SKIP ; space_ignored= TRUE, c= yyGet());
/*
If we find a space then this can't be an identifier. We notice this
below by checking start != lex->ptr.
*/
for (; state_map[c] == MY_LEX_SKIP ; c= yyGet());
}
if (! space_ignored && c == '.' && ident_map[yyPeek()])
if (start == lex->ptr && c == '.' && ident_map[yyPeek()])
lex->next_state=MY_LEX_IDENT_SEP;
else
{ // '(' must follow directly if function
......
......@@ -894,7 +894,7 @@ static int check_connection(THD *thd)
x_free(thd->user);
if (!(thd->user= my_strdup(user, MYF(0))))
return (ER_OUT_OF_RESOURCES);
return check_user(thd, COM_CONNECT, passwd, passwd_len, db, true);
return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE);
}
......@@ -4771,7 +4771,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
acl_reload(thd);
grant_reload(thd);
if (mqh_used)
reset_mqh(thd,(LEX_USER *) NULL,true);
reset_mqh(thd,(LEX_USER *) NULL,TRUE);
}
#endif
if (options & REFRESH_LOG)
......
......@@ -1780,7 +1780,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
#endif
DBUG_ASSERT(thd->free_list == NULL);
thd->protocol= &thd->protocol_prep; // Switch to binary protocol
execute_stmt(thd, stmt, &expanded_query, true);
execute_stmt(thd, stmt, &expanded_query, TRUE);
thd->protocol= &thd->protocol_simple; // Use normal protocol
DBUG_VOID_RETURN;
......@@ -1832,7 +1832,7 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name)
my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE");
send_error(thd);
}
execute_stmt(thd, stmt, &expanded_query, false);
execute_stmt(thd, stmt, &expanded_query, FALSE);
DBUG_VOID_RETURN;
}
......
......@@ -223,7 +223,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
(void) unpack_filename(path,path);
}
if (drop_temporary ||
(access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,true)))
(access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,TRUE)))
{
if (if_exists)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
......
......@@ -843,14 +843,14 @@ prepare_src:
THD *thd=YYTHD;
LEX *lex= thd->lex;
lex->prepared_stmt_code= $1;
lex->prepared_stmt_code_is_varref= false;
lex->prepared_stmt_code_is_varref= FALSE;
}
| '@' ident_or_text
{
THD *thd=YYTHD;
LEX *lex= thd->lex;
lex->prepared_stmt_code= $2;
lex->prepared_stmt_code_is_varref= true;
lex->prepared_stmt_code_is_varref= TRUE;
};
execute:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment