Commit af5a042c authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/data0/bk/mysql-4.1

into bk-internal.mysql.com:/data0/bk/mysql-4.1-cluster-extra


sql/ha_ndbcluster.cc:
  Auto merged
sql/sql_cache.cc:
  Auto merged
BitKeeper/etc/logging_ok:
  Logging to logging@openlogging.org accepted
parents 71424cb1 bf532e26
...@@ -147,6 +147,7 @@ mwagner@work.mysql.com ...@@ -147,6 +147,7 @@ mwagner@work.mysql.com
mydev@mysql.com mydev@mysql.com
mysql@home.(none) mysql@home.(none)
mysql@mc04.(none) mysql@mc04.(none)
mysqldev@bk-internal.mysql.com
mysqldev@build.mysql2.com mysqldev@build.mysql2.com
mysqldev@melody.local mysqldev@melody.local
mysqldev@mysql.com mysqldev@mysql.com
......
# Setup connections to both MySQL Servers connected to the cluster
connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,);
connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
# Check that server1 has NDB support
connection server1;
disable_query_log;
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
flush tables;
@r/have_ndb.require show variables like "have_ndbcluster";
@r/server_id.require show variables like "server_id";
enable_query_log;
# Check that server2 has NDB support
connection server2;
disable_query_log;
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
flush tables;
@r/have_ndb.require show variables like "have_ndbcluster";
@r/server_id1.require show variables like "server_id";
enable_query_log;
# Set the default connection to 'server1'
connection server1;
...@@ -2,6 +2,4 @@ ...@@ -2,6 +2,4 @@
disable_query_log; disable_query_log;
show variables like "have_ndbcluster"; show variables like "have_ndbcluster";
enable_query_log; enable_query_log;
#connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
#connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,$MASTER_MYSOCK1);
#connection server1;
drop table if exists t1;
set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776; set GLOBAL query_cache_size=1355776;
reset query cache; reset query cache;
flush status; flush status;
drop table if exists t1,t2; CREATE TABLE t1 ( pk int not null primary key,
CREATE TABLE t1 (a int) ENGINE=ndbcluster; a int, b int not null, c varchar(20)) ENGINE=ndbcluster;
CREATE TABLE t2 (a int); insert into t1 value (1, 2, 3, 'First row');
select * from t1; select * from t1;
a pk a b c
1 2 3 First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 0 Qcache_queries_in_cache 1
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 0 Qcache_inserts 1
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 0 Qcache_hits 0
select * from t2; select * from t1;
a pk a b c
1 2 3 First row
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
update t1 set a=3 where pk=1;
select * from t1;
pk a b c
1 3 3 First row
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
insert into t1 value (2, 7, 8, 'Second row');
insert into t1 value (4, 5, 6, 'Fourth row');
select * from t1;
pk a b c
2 7 8 Second row
4 5 6 Fourth row
1 3 3 First row
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
select * from t1;
pk a b c
2 7 8 Second row
4 5 6 Fourth row
1 3 3 First row
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 2
select * from t1 where b=3;
pk a b c
1 3 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 2
select * from t1 where b=3;
pk a b c
1 3 3 First row
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 3
delete from t1 where c='Fourth row';
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
select * from t1 where b=3;
pk a b c
1 3 3 First row
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 3
use test;
select * from t1;
pk a b c
2 7 8 Second row
1 3 3 First row
select * from t1 where b=3;
pk a b c
1 3 3 First row
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 4
update t1 set a=4 where b=3;
use test;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
select * from t1;
pk a b c
2 7 8 Second row
1 4 3 First row
select * from t1;
pk a b c
2 7 8 Second row
1 4 3 First row
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 7
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 5
select * from t1;
pk a b c
2 7 8 Second row
1 4 3 First row
select * from t1;
pk a b c
2 7 8 Second row
1 4 3 First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 1
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 1 Qcache_inserts 7
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 0 Qcache_hits 7
begin;
update t1 set a=5 where pk=1;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 7
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 7
select * from t1; select * from t1;
a pk a b c
select * from t2; 2 7 8 Second row
a 1 4 3 First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 1
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 1 Qcache_inserts 8
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 1 Qcache_hits 7
drop table t1, t2; commit;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 8
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 7
select * from t1;
pk a b c
2 7 8 Second row
1 5 3 First row
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 9
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 7
select * from t1;
pk a b c
2 7 8 Second row
1 5 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 9
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 8
drop table t1;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
SET GLOBAL query_cache_size=0; SET GLOBAL query_cache_size=0;
drop table if exists t1, t2;
set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776;
reset query cache;
flush status;
set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776;
reset query cache;
flush status;
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
insert into t1 value (2);
insert into t2 value (3);
select * from t1;
a
2
select * from t2;
a
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 2
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1;
a
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
update t1 set a=3 where a=2;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 2
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1;
a
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 2
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
drop table t1, t2;
drop table if exists t1, t2, t3, t4;
flush status;
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
insert into t1 value (2);
insert into t2 value (3);
select * from t1;
a
2
select * from t2;
a
3
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
flush status;
select * from t1;
a
2
update t1 set a=3 where a=2;
show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
create table t3 (a int not null primary key, b varchar(22),
c int, last_col text) engine=ndb;
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
create table t4 (pk int primary key, b int) engine=ndb;
select * from t1;
a
3
select * from t3;
a b c last_col
1 Hi! 89 Longtext column
show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
show tables like 't4';
Tables_in_test (t4)
t4
show status like 'handler_discover%';
Variable_name Value
Handler_discover 2
show tables;
Tables_in_test
t1
t2
t3
t4
drop table t1, t2, t3, t4;
Variable_name Value
server_id 1
Variable_name Value
server_id 102
-- source include/have_query_cache.inc -- source include/have_query_cache.inc
-- source include/have_ndb.inc -- source include/have_ndb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
# Turn on and reset query cache
set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776; set GLOBAL query_cache_size=1355776;
reset query cache; reset query cache;
flush status; flush status;
--disable_warnings # Create test table in NDB
drop table if exists t1,t2; CREATE TABLE t1 ( pk int not null primary key,
--enable_warnings a int, b int not null, c varchar(20)) ENGINE=ndbcluster;
insert into t1 value (1, 2, 3, 'First row');
# Perform one query which should be inerted in query cache
select * from t1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
# Perform the same query and make sure the query cache is hit
select * from t1;
show status like "Qcache_hits";
# Update the table and make sure the correct data is returned
update t1 set a=3 where pk=1;
select * from t1;
show status like "Qcache_inserts";
show status like "Qcache_hits";
# Insert a new record and make sure the correct data is returned
insert into t1 value (2, 7, 8, 'Second row');
insert into t1 value (4, 5, 6, 'Fourth row');
select * from t1;
show status like "Qcache_inserts";
show status like "Qcache_hits";
select * from t1;
show status like "Qcache_hits";
# Perform a "new" query and make sure the query cache is not hit
select * from t1 where b=3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
# Same query again...
select * from t1 where b=3;
show status like "Qcache_hits";
CREATE TABLE t1 (a int) ENGINE=ndbcluster; # Delete from the table
CREATE TABLE t2 (a int); delete from t1 where c='Fourth row';
show status like "Qcache_queries_in_cache";
select * from t1 where b=3;
show status like "Qcache_hits";
# Start another connection and check that the query cache is hit
connect (con1,localhost,root,,);
connection con1;
use test;
select * from t1;
select * from t1 where b=3;
show status like "Qcache_hits";
# Update the table and switch to other connection
update t1 set a=4 where b=3;
connect (con2,localhost,root,,);
connection con2;
use test;
show status like "Qcache_queries_in_cache";
select * from t1;
select * from t1;
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
select * from t1;
select * from t1; select * from t1;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
select * from t2;
# Use transactions and make sure the query cache is not updated until
# transaction is commited
begin;
update t1 set a=5 where pk=1;
# Note!! the below test shows that table is invalidated
# before transaction is committed
# TODO Fix so that cache is not invalidated HERE!
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
connection con2;
select * from t1; select * from t1;
select * from t2;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
connection con1;
commit;
# TODO Here query is invalidated once again, commit count in NDB has changed
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1;
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
select * from t1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
drop table t1;
drop table t1, t2; show status like "Qcache_queries_in_cache";
SET GLOBAL query_cache_size=0; SET GLOBAL query_cache_size=0;
-- source include/have_query_cache.inc
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
# Turn on and reset query cache on server1
connection server1;
set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776;
reset query cache;
flush status;
# Turn on and reset query cache on server2
connection server2;
set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776;
reset query cache;
flush status;
# Create test tables in NDB and load them into cache
# on server1
connection server1;
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
insert into t1 value (2);
insert into t2 value (3);
select * from t1;
select * from t2;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
# Connect server2, load table in to cache, then update the table
connection server2;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
select * from t1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
update t1 set a=3 where a=2;
# Connect to server1 and check that cache is invalidated
# and correct data is returned
connection server1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
select * from t1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
drop table t1, t2;
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
--disable_warnings
drop table if exists t1, t2, t3, t4;
--enable_warnings
flush status;
# Create test tables on server1
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
insert into t1 value (2);
insert into t2 value (3);
select * from t1;
select * from t2;
show status like 'handler_discover%';
# Connect to server2 and use the tables from there
connection server2;
flush status;
select * from t1;
update t1 set a=3 where a=2;
show status like 'handler_discover%';
# Create a new table on server2
create table t3 (a int not null primary key, b varchar(22),
c int, last_col text) engine=ndb;
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
create table t4 (pk int primary key, b int) engine=ndb;
# Check that the tables are accessible from server1
connection server1;
select * from t1;
select * from t3;
show status like 'handler_discover%';
show tables like 't4';
show status like 'handler_discover%';
show tables;
drop table t1, t2, t3, t4;
...@@ -658,8 +658,9 @@ innobase_query_caching_of_table_permitted( ...@@ -658,8 +658,9 @@ innobase_query_caching_of_table_permitted(
char* full_name, /* in: concatenation of database name, char* full_name, /* in: concatenation of database name,
the null character '\0', and the table the null character '\0', and the table
name */ name */
uint full_name_len) /* in: length of the full name, i.e. uint full_name_len, /* in: length of the full name, i.e.
len(dbname) + len(tablename) + 1 */ len(dbname) + len(tablename) + 1 */
ulonglong *unused) /* unused for this engine */
{ {
ibool is_autocommit; ibool is_autocommit;
trx_t* trx; trx_t* trx;
......
...@@ -33,6 +33,10 @@ typedef struct st_innobase_share { ...@@ -33,6 +33,10 @@ typedef struct st_innobase_share {
} INNOBASE_SHARE; } INNOBASE_SHARE;
my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
uint full_name_len,
ulonglong *unused);
/* The class defining a handle to an Innodb table */ /* The class defining a handle to an Innodb table */
class ha_innobase: public handler class ha_innobase: public handler
{ {
...@@ -168,6 +172,20 @@ class ha_innobase: public handler ...@@ -168,6 +172,20 @@ class ha_innobase: public handler
void init_table_handle_for_HANDLER(); void init_table_handle_for_HANDLER();
longlong get_auto_increment(); longlong get_auto_increment();
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
/*
ask handler about permission to cache table during query registration
*/
my_bool cached_table_registration(THD *thd, char *table_key,
uint key_length,
qc_engine_callback *call_back,
ulonglong *engine_data)
{
*call_back= innobase_query_caching_of_table_permitted;
*engine_data= 0;
return innobase_query_caching_of_table_permitted(thd, table_key,
key_length,
engine_data);
}
static char *get_mysql_bin_log_name(); static char *get_mysql_bin_log_name();
static ulonglong get_mysql_bin_log_pos(); static ulonglong get_mysql_bin_log_pos();
...@@ -233,8 +251,6 @@ int innobase_close_connection(THD *thd); ...@@ -233,8 +251,6 @@ int innobase_close_connection(THD *thd);
int innobase_drop_database(char *path); int innobase_drop_database(char *path);
int innodb_show_status(THD* thd); int innodb_show_status(THD* thd);
my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
uint full_name_len);
void innobase_release_temporary_latches(void* innobase_tid); void innobase_release_temporary_latches(void* innobase_tid);
void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
......
...@@ -3057,7 +3057,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -3057,7 +3057,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
m_transaction_on= FALSE; m_transaction_on= FALSE;
else else
m_transaction_on= thd->variables.ndb_use_transactions; m_transaction_on= thd->variables.ndb_use_transactions;
// m_use_local_query_cache= thd->variables.ndb_use_local_query_cache;
m_active_trans= thd->transaction.all.ndb_tid ? m_active_trans= thd->transaction.all.ndb_tid ?
(NdbConnection*)thd->transaction.all.ndb_tid: (NdbConnection*)thd->transaction.all.ndb_tid:
...@@ -3789,9 +3788,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): ...@@ -3789,9 +3788,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_ha_not_exact_count(FALSE), m_ha_not_exact_count(FALSE),
m_force_send(TRUE), m_force_send(TRUE),
m_autoincrement_prefetch(32), m_autoincrement_prefetch(32),
m_transaction_on(TRUE), m_transaction_on(TRUE)
m_use_local_query_cache(FALSE) {
{
int i; int i;
DBUG_ENTER("ha_ndbcluster"); DBUG_ENTER("ha_ndbcluster");
...@@ -4506,10 +4504,129 @@ const char* ha_ndbcluster::index_type(uint key_number) ...@@ -4506,10 +4504,129 @@ const char* ha_ndbcluster::index_type(uint key_number)
} }
uint8 ha_ndbcluster::table_cache_type() uint8 ha_ndbcluster::table_cache_type()
{ {
if (m_use_local_query_cache) DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT");
return HA_CACHE_TBL_TRANSACT; DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT);
}
static
my_bool
ndbcluster_cache_retrieval_allowed(
/*======================================*/
/* out: TRUE if permitted, FALSE if not;
note that the value FALSE means invalidation
of query cache if *engine_data is changed */
THD* thd, /* in: thd of the user who is trying to
store a result to the query cache or
retrieve it */
char* full_name, /* in: concatenation of database name,
the null character '\0', and the table
name */
uint full_name_len, /* in: length of the full name, i.e.
len(dbname) + len(tablename) + 1 */
ulonglong *engine_data) /* in: value set in call to
ha_ndbcluster::cached_table_registration
out: if return FALSE this is used to invalidate
all cached queries with this table*/
{
DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
char tabname[128];
char *dbname= full_name;
my_bool is_autocommit;
{
int dbname_len= strlen(full_name);
int tabname_len= full_name_len-dbname_len-1;
memcpy(tabname, full_name+dbname_len+1, tabname_len);
tabname[tabname_len]= '\0';
}
if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
is_autocommit = FALSE;
else
is_autocommit = TRUE;
DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d",
dbname,tabname,is_autocommit));
if (!is_autocommit)
{
DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d",
thd->options & OPTION_NOT_AUTOCOMMIT,
thd->options & OPTION_BEGIN));
// ToDo enable cache inside a transaction
// no need to invalidate though so leave *engine_data
DBUG_RETURN(FALSE);
}
{
Ndb *ndb;
Uint64 commit_count;
if (!(ndb= check_ndb_in_thd(thd)))
{
*engine_data= *engine_data+1; // invalidate
DBUG_RETURN(FALSE);
}
ndb->setDatabaseName(dbname);
if (ndb_get_table_statistics(ndb, tabname, 0, &commit_count))
{
*engine_data= *engine_data+1; // invalidate
DBUG_RETURN(FALSE);
}
if (*engine_data != commit_count)
{
*engine_data= commit_count; // invalidate
DBUG_RETURN(FALSE);
}
}
DBUG_PRINT("exit",("*engine_data=%d ok, use cache",*engine_data));
DBUG_RETURN(TRUE);
}
my_bool
ha_ndbcluster::cached_table_registration(
/*======================================*/
/* out: TRUE if permitted, FALSE if not;
note that the value FALSE means invalidation
of query cache if *engine_data is changed */
THD* thd, /* in: thd of the user who is trying to
store a result to the query cache or
retrieve it */
char* full_name, /* in: concatenation of database name,
the null character '\0', and the table
name */
uint full_name_len, /* in: length of the full name, i.e.
len(dbname) + len(tablename) + 1 */
qc_engine_callback
*engine_callback, /* out: function to be called before using
cache on this table */
ulonglong *engine_data) /* out: if return FALSE this is used to
invalidate all cached queries with this table*/
{
DBUG_ENTER("ha_ndbcluster::cached_table_registration");
my_bool is_autocommit;
if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
is_autocommit = FALSE;
else else
return HA_CACHE_TBL_NOCACHE; is_autocommit = TRUE;
DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d",
m_dbname,m_tabname,is_autocommit));
if (!is_autocommit)
{
DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d",
thd->options & OPTION_NOT_AUTOCOMMIT,
thd->options & OPTION_BEGIN));
// ToDo enable cache inside a transaction
// no need to invalidate though so leave *engine_data
DBUG_RETURN(FALSE);
}
{
Uint64 commit_count;
m_ndb->setDatabaseName(m_dbname);
if (ndb_get_table_statistics(m_ndb, m_tabname, 0, &commit_count))
{
*engine_data= 0;
DBUG_RETURN(FALSE);
}
*engine_data= commit_count;
}
*engine_callback= ndbcluster_cache_retrieval_allowed;
DBUG_PRINT("exit",("*engine_data=%d", *engine_data));
DBUG_RETURN(TRUE);
} }
/* /*
......
...@@ -146,7 +146,10 @@ class ha_ndbcluster: public handler ...@@ -146,7 +146,10 @@ class ha_ndbcluster: public handler
static Thd_ndb* seize_thd_ndb(); static Thd_ndb* seize_thd_ndb();
static void release_thd_ndb(Thd_ndb* thd_ndb); static void release_thd_ndb(Thd_ndb* thd_ndb);
uint8 table_cache_type(); uint8 table_cache_type();
my_bool cached_table_registration(THD *thd, char *table_key,
uint key_length,
qc_engine_callback *engine_callback,
ulonglong *engine_data);
private: private:
int alter_table_name(const char *to); int alter_table_name(const char *to);
int drop_table(); int drop_table();
...@@ -244,7 +247,6 @@ class ha_ndbcluster: public handler ...@@ -244,7 +247,6 @@ class ha_ndbcluster: public handler
bool m_force_send; bool m_force_send;
ha_rows m_autoincrement_prefetch; ha_rows m_autoincrement_prefetch;
bool m_transaction_on; bool m_transaction_on;
bool m_use_local_query_cache;
void set_rec_per_key(); void set_rec_per_key();
void records_update(); void records_update();
......
...@@ -229,15 +229,6 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) ...@@ -229,15 +229,6 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
} }
} }
bool ha_caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type)
{
#ifdef HAVE_INNOBASE_DB
if (cache_type == HA_CACHE_TBL_ASKTRANSACT)
return innobase_query_caching_of_table_permitted(thd, table_key, key_length);
#endif
return 1;
}
int ha_init() int ha_init()
{ {
......
...@@ -507,10 +507,15 @@ public: ...@@ -507,10 +507,15 @@ public:
/* Type of table for caching query */ /* Type of table for caching query */
virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; }
/* /* ask handler about permission to cache table during query registration */
Is query with this table cachable (have sense only for ASKTRANSACT virtual my_bool cached_table_registration(THD *thd, char *table_key,
tables) uint key_length,
*/ qc_engine_callback *engine_callback,
ulonglong *engine_data)
{
*engine_callback= 0;
return 1;
}
}; };
/* Some extern variables used with handlers */ /* Some extern variables used with handlers */
...@@ -529,8 +534,6 @@ extern TYPELIB tx_isolation_typelib; ...@@ -529,8 +534,6 @@ extern TYPELIB tx_isolation_typelib;
T != DB_TYPE_BERKELEY_DB && \ T != DB_TYPE_BERKELEY_DB && \
T != DB_TYPE_NDBCLUSTER) T != DB_TYPE_NDBCLUSTER)
bool ha_caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type);
enum db_type ha_resolve_by_name(const char *name, uint namelen); enum db_type ha_resolve_by_name(const char *name, uint namelen);
const char *ha_get_storage_engine(enum db_type db_type); const char *ha_get_storage_engine(enum db_type db_type);
handler *get_new_handler(TABLE *table, enum db_type db_type); handler *get_new_handler(TABLE *table, enum db_type db_type);
......
...@@ -340,6 +340,9 @@ inline THD *_current_thd(void) ...@@ -340,6 +340,9 @@ inline THD *_current_thd(void)
} }
#define current_thd _current_thd() #define current_thd _current_thd()
typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key,
uint key_length,
ulonglong *engine_data);
#include "sql_string.h" #include "sql_string.h"
#include "sql_list.h" #include "sql_list.h"
#include "sql_map.h" #include "sql_map.h"
......
...@@ -911,12 +911,12 @@ end: ...@@ -911,12 +911,12 @@ end:
int int
Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
{ {
ulonglong engine_data;
Query_cache_query *query; Query_cache_query *query;
Query_cache_block *first_result_block, *result_block; Query_cache_block *first_result_block, *result_block;
Query_cache_block_table *block_table, *block_table_end; Query_cache_block_table *block_table, *block_table_end;
ulong tot_length; ulong tot_length;
Query_cache_query_flags flags; Query_cache_query_flags flags;
bool check_tables;
DBUG_ENTER("Query_cache::send_result_to_client"); DBUG_ENTER("Query_cache::send_result_to_client");
if (query_cache_size == 0 || thd->variables.query_cache_type == 0) if (query_cache_size == 0 || thd->variables.query_cache_type == 0)
...@@ -1017,7 +1017,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ...@@ -1017,7 +1017,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err_unlock; goto err_unlock;
} }
check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT;
// Check access; // Check access;
block_table= query_block->table(0); block_table= query_block->table(0);
block_table_end= block_table+query_block->n_tables; block_table_end= block_table+query_block->n_tables;
...@@ -1078,19 +1077,30 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ...@@ -1078,19 +1077,30 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err_unlock; // Parse query goto err_unlock; // Parse query
} }
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
if (check_tables && !ha_caching_allowed(thd, table->db(), engine_data= table->engine_data();
table->key_length(), if (table->callback() &&
table->type())) !(*table->callback())(thd, table->db(),
table->key_length(),
&engine_data))
{ {
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
table_list.db, table_list.alias)); table_list.db, table_list.alias));
BLOCK_UNLOCK_RD(query_block); BLOCK_UNLOCK_RD(query_block);
thd->lex->safe_to_cache_query= 0; // Don't try to cache this if (engine_data != table->engine_data())
{
DBUG_PRINT("qcache",
("Handler require invalidation queries of %s.%s %lld-%lld",
table_list.db, table_list.alias,
engine_data, table->engine_data()));
invalidate_table(table->db(), table->key_length());
}
else
thd->lex->safe_to_cache_query= 0; // Don't try to cache this
goto err_unlock; // Parse query goto err_unlock; // Parse query
} }
else else
DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s", DBUG_PRINT("qcache", ("handler allow caching %s,%s",
check_tables, table_list.db, table_list.alias)); table_list.db, table_list.alias));
} }
move_to_query_list_end(query_block); move_to_query_list_end(query_block);
hits++; hits++;
...@@ -2115,7 +2125,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, ...@@ -2115,7 +2125,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block,
if (!insert_table(tables_used->table->key_length, if (!insert_table(tables_used->table->key_length,
tables_used->table->table_cache_key, block_table, tables_used->table->table_cache_key, block_table,
tables_used->db_length, tables_used->db_length,
tables_used->table->file->table_cache_type())) tables_used->table->file->table_cache_type(),
tables_used->callback_func,
tables_used->engine_data))
break; break;
if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM)
...@@ -2131,9 +2143,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, ...@@ -2131,9 +2143,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block,
uint key_length= filename_2_table_key(key, table->table->filename, uint key_length= filename_2_table_key(key, table->table->filename,
&db_length); &db_length);
(++block_table)->n= ++n; (++block_table)->n= ++n;
/*
There are not callback function for for MyISAM, and engine data
*/
if (!insert_table(key_length, key, block_table, if (!insert_table(key_length, key, block_table,
db_length, db_length,
tables_used->table->file->table_cache_type())) tables_used->table->file->table_cache_type(),
0, 0))
goto err; goto err;
} }
} }
...@@ -2160,7 +2176,9 @@ err: ...@@ -2160,7 +2176,9 @@ err:
my_bool my_bool
Query_cache::insert_table(uint key_len, char *key, Query_cache::insert_table(uint key_len, char *key,
Query_cache_block_table *node, Query_cache_block_table *node,
uint32 db_length, uint8 cache_type) uint32 db_length, uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data)
{ {
DBUG_ENTER("Query_cache::insert_table"); DBUG_ENTER("Query_cache::insert_table");
DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d",
...@@ -2170,6 +2188,23 @@ Query_cache::insert_table(uint key_len, char *key, ...@@ -2170,6 +2188,23 @@ Query_cache::insert_table(uint key_len, char *key,
hash_search(&tables, (byte*) key, hash_search(&tables, (byte*) key,
key_len)); key_len));
if (table_block &&
table_block->table()->engine_data() != engine_data)
{
DBUG_PRINT("qcache",
("Handler require invalidation queries of %s.%s %lld-%lld",
table_block->table()->db(),
table_block->table()->table(),
engine_data,
table_block->table()->engine_data()));
/*
as far as we delete all queries with this table, table block will be
deleted, too
*/
invalidate_table(table_block);
table_block= 0;
}
if (table_block == 0) if (table_block == 0)
{ {
DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)",
...@@ -2200,6 +2235,8 @@ Query_cache::insert_table(uint key_len, char *key, ...@@ -2200,6 +2235,8 @@ Query_cache::insert_table(uint key_len, char *key,
header->table(db + db_length + 1); header->table(db + db_length + 1);
header->key_length(key_len); header->key_length(key_len);
header->type(cache_type); header->type(cache_type);
header->callback(callback);
header->engine_data(engine_data);
} }
Query_cache_block_table *list_root = table_block->table(0); Query_cache_block_table *list_root = table_block->table(0);
...@@ -2720,9 +2757,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, ...@@ -2720,9 +2757,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next) for (; tables_used; tables_used= tables_used->next)
{ {
TABLE *table= tables_used->table; TABLE *table= tables_used->table;
if (!ha_caching_allowed(thd, table->table_cache_key, handler *handler= table->file;
table->key_length, if (!handler->cached_table_registration(thd, table->table_cache_key,
table->file->table_cache_type())) table->key_length,
&tables_used->callback_func,
&tables_used->engine_data))
{ {
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
tables_used->db, tables_used->alias)); tables_used->db, tables_used->alias));
......
...@@ -145,6 +145,10 @@ struct Query_cache_table ...@@ -145,6 +145,10 @@ struct Query_cache_table
char *tbl; char *tbl;
uint32 key_len; uint32 key_len;
uint8 table_type; uint8 table_type;
/* unique for every engine reference */
qc_engine_callback callback_func;
/* data need by some engines */
ulonglong engine_data_buff;
inline char *db() { return (char *) data(); } inline char *db() { return (char *) data(); }
inline char *table() { return tbl; } inline char *table() { return tbl; }
...@@ -153,6 +157,10 @@ struct Query_cache_table ...@@ -153,6 +157,10 @@ struct Query_cache_table
inline void key_length(uint32 len) { key_len= len; } inline void key_length(uint32 len) { key_len= len; }
inline uint8 type() { return table_type; } inline uint8 type() { return table_type; }
inline void type(uint8 t) { table_type= t; } inline void type(uint8 t) { table_type= t; }
inline qc_engine_callback callback() { return callback_func; }
inline void callback(qc_engine_callback fn){ callback_func= fn; }
inline ulonglong engine_data() { return engine_data_buff; }
inline void engine_data(ulonglong data) { engine_data_buff= data; }
inline gptr data() inline gptr data()
{ {
return (gptr)(((byte*)this)+ return (gptr)(((byte*)this)+
...@@ -281,7 +289,9 @@ protected: ...@@ -281,7 +289,9 @@ protected:
TABLE_COUNTER_TYPE tables); TABLE_COUNTER_TYPE tables);
my_bool insert_table(uint key_len, char *key, my_bool insert_table(uint key_len, char *key,
Query_cache_block_table *node, Query_cache_block_table *node,
uint32 db_length, uint8 cache_type); uint32 db_length, uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data);
void unlink_table(Query_cache_block_table *node); void unlink_table(Query_cache_block_table *node);
Query_cache_block *get_free_block (ulong len, my_bool not_less, Query_cache_block *get_free_block (ulong len, my_bool not_less,
ulong min); ulong min);
......
...@@ -213,6 +213,10 @@ typedef struct st_table_list ...@@ -213,6 +213,10 @@ typedef struct st_table_list
TABLE *table; /* opened table */ TABLE *table; /* opened table */
st_table_list *table_list; /* pointer to node of list of all tables */ st_table_list *table_list; /* pointer to node of list of all tables */
class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */
/* data need by some engines in query cache*/
ulonglong engine_data;
/* call back function for asking handler about caching in query cache */
qc_engine_callback callback_func;
GRANT_INFO grant; GRANT_INFO grant;
thr_lock_type lock_type; thr_lock_type lock_type;
uint outer_join; /* Which join type */ uint outer_join; /* Which join type */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment