Commit 893631a8 authored by Olivier Bertrand's avatar Olivier Bertrand

All the last changes made in the ob-10.0 branch including also changes of line...

All the last changes made in the ob-10.0 branch including also changes of line endings of some test files
parent 83ca074c
......@@ -245,6 +245,11 @@ storage/mroonga/vendor/groonga/src/suggest/groonga-suggest-create-dataset
*.ko
*.obj
*.elf
*.exp
*.manifest
*.dep
*.idb
*.res
# Precompiled Headers
*.gch
......@@ -293,7 +298,6 @@ storage/mroonga/vendor/groonga/src/suggest/groonga-suggest-create-dataset
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
[Rr]eleaseWithDebInfo/
x64/
x86/
build/
......
# Set the default behavior, in case people don't have core.autocrlf set.
* text=auto
# Explicitly declare text files you want to always be normalized and converted
# to native line endings on checkout.
*.c text
*.cc text
*.cpp text
*.h text
*.test text
# Declare files that will always have LF line endings on checkout.
*.result text eol=lf
mysql-test/connect/std_data/*.txt text eol=lf
mysql-test/connect/std_data/*.dat text eol=lf
# Denote all files that are truly binary and should not be modified.
*.png binary
*.jpg binary
*.c diff=cpp
*.h diff=cpp
*.cc diff=cpp
*.ic diff=cpp
*.cpp diff=cpp
# Edited by Olivier Bertrand
*-t
*.a
*.ctest
*.o
*.reject
*.so
*.so.*
*.spec
*~
*.bak
*.log
*.cmake
*.tgz
*.msg
.*.swp
*.ninja
.ninja_*
.gdb_history
CMakeFiles/
connect.dir/
connect.dir-Copie/
Debug/
MinSizeRel/
Release/
RelWithDebInfo/
# C and C++
# Compiled Object files
*.slo
*.lo
*.o
*.ko
*.obj
*.elf
*.exp
*.manifest
*.dep
*.idb
*.res
# Precompiled Headers
*.gch
*.pch
# Compiled Static libraries
*.lib
*.a
*.la
*.lai
*.lo
# Compiled Dynamic libraries
*.so
*.so.*
*.dylib
*.dll
# Executables
*.exe
*.out
*.app
*.i*86
*.x86_64
*.hex
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
*.ncb
*.sln
*.vcproj
*.vcproj.*
*.vcproj.*.*
*.vcproj.*.*.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
build/
bld/
[Bb]in/
[Oo]bj/
# Roslyn cache directories
*.ide/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
#NUNIT
*.VisualState.xml
TestResult.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
*_i.c
*_p.c
*_i.h
*.ilk
*.meta
*.obj
*.pch
*.pdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opensdf
*.sdf
*.cachefile
# Visual Studio profiler
*.psess
*.vsp
*.vspx
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# JustCode is a .NET coding addin-in
.JustCode
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# NCrunch
_NCrunch_*
.*crunch*.local.xml
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# TODO: Comment the next line if you want to checkin your web deploy settings
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# NuGet Packages
*.nupkg
# The packages folder can be ignored because of Package Restore
**/packages/*
# except build/, which is used as an MSBuild target.
!**/packages/build/
# If using the old MSBuild-Integrated Package Restore, uncomment this:
#!**/packages/repositories.config
# Windows Azure Build Output
csx/
*.build.csdef
# Windows Store app package directory
AppPackages/
# Others
# sql/
*.Cache
ClientBin/
[Ss]tyle[Cc]op.*
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.pfx
*.publishsettings
node_modules/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
# SQL Server files
*.mdf
*.ldf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
# Microsoft Fakes
FakesAssemblies/
......@@ -74,16 +74,28 @@ typedef struct _dbfheader {
//uchar Dbfox :4; /* FoxPro if equal to 3 */
uchar Version; /* Version information flags */
char Filedate[3]; /* date, YYMMDD, binary. YY=year-1900 */
uint Records; /* records in the file */
ushort Headlen; /* bytes in the header */
ushort Reclen; /* bytes in a record */
ushort Fields; /* Reserved but used to store fields */
private:
/* The following four members are stored in little-endian format on disk */
char m_RecordsBuf[4]; /* records in the file */
char m_HeadlenBuf[2]; /* bytes in the header */
char m_ReclenBuf[2]; /* bytes in a record */
char m_FieldsBuf[2]; /* Reserved but used to store fields */
public:
char Incompleteflag; /* 01 if incomplete, else 00 */
char Encryptflag; /* 01 if encrypted, else 00 */
char Reserved2[12]; /* for LAN use */
char Mdxflag; /* 01 if production .mdx, else 00 */
char Language; /* Codepage */
char Reserved3[2];
uint Records(void) const {return uint4korr(m_RecordsBuf);}
ushort Headlen(void) const {return uint2korr(m_HeadlenBuf);}
ushort Reclen(void) const {return uint2korr(m_ReclenBuf);}
ushort Fields(void) const {return uint2korr(m_FieldsBuf);}
void SetHeadlen(ushort num) {int2store(m_HeadlenBuf, num);}
void SetReclen(ushort num) {int2store(m_ReclenBuf, num);}
void SetFields(ushort num) {int2store(m_FieldsBuf, num);}
} DBFHEADER;
/****************************************************************************/
......@@ -115,7 +127,6 @@ typedef struct _descriptor {
/* Side effects: */
/* Moves file pointer to byte 32; fills buffer at buf with */
/* first 32 bytes of file. */
/* Converts numeric values to platform byte ordering (LE in file) */
/****************************************************************************/
static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf)
{
......@@ -143,13 +154,8 @@ static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf)
} else
strcpy(g->Message, MSG(DBASE_FILE));
// Convert numeric fields to have them in platform byte ordering
buf->Records = uint4korr(&buf->Records);
buf->Headlen = uint2korr(&buf->Headlen);
buf->Reclen = uint2korr(&buf->Reclen);
// Check last byte(s) of header
if (fseek(file, buf->Headlen - dbc, SEEK_SET) != 0) {
if (fseek(file, buf->Headlen() - dbc, SEEK_SET) != 0) {
sprintf(g->Message, MSG(BAD_HEADER), fn);
return RC_FX;
} // endif fseek
......@@ -169,7 +175,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf)
} // endif endmark
// Calculate here the number of fields while we have the dbc info
buf->Fields = (buf->Headlen - dbc - 1) / 32;
buf->SetFields((buf->Headlen() - dbc - 1) / 32);
fseek(file, HEADLEN, SEEK_SET);
return rc;
} // end of dbfhead
......@@ -225,7 +231,7 @@ PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info)
/************************************************************************/
/* Allocate the structures used to refer to the result set. */
/************************************************************************/
fields = mainhead.Fields;
fields = mainhead.Fields();
} else
fields = 0;
......@@ -242,11 +248,11 @@ PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info)
if (trace) {
htrc("Structure of %s\n", filename);
htrc("headlen=%hd reclen=%hd degree=%d\n",
mainhead.Headlen, mainhead.Reclen, fields);
mainhead.Headlen(), mainhead.Reclen(), fields);
htrc("flags(iem)=%d,%d,%d cp=%d\n", mainhead.Incompleteflag,
mainhead.Encryptflag, mainhead.Mdxflag, mainhead.Language);
htrc("%hd records, last changed %02d/%02d/%d\n",
mainhead.Records, mainhead.Filedate[1], mainhead.Filedate[2],
mainhead.Records(), mainhead.Filedate[1], mainhead.Filedate[2],
mainhead.Filedate[0] + (mainhead.Filedate[0] <= 30) ? 2000 : 1900);
htrc("Field Type Offset Len Dec Set Mdx\n");
} // endif trace
......@@ -404,13 +410,13 @@ int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath)
} else if (rc == RC_FX)
return -1;
if ((int)header.Reclen != lrecl) {
sprintf(g->Message, MSG(BAD_LRECL), lrecl, header.Reclen);
if ((int)header.Reclen() != lrecl) {
sprintf(g->Message, MSG(BAD_LRECL), lrecl, header.Reclen());
return -1;
} // endif Lrecl
Records = (int)header.Records;
return (int)header.Headlen;
Records = (int)header.Records();
return (int)header.Headlen();
} // end of ScanHeader
/* ---------------------------- Class DBFFAM ------------------------------ */
......@@ -571,8 +577,8 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
header->Filedate[0] = datm->tm_year - 100;
header->Filedate[1] = datm->tm_mon + 1;
header->Filedate[2] = datm->tm_mday;
int2store(&header->Headlen, hlen);
int2store(&header->Reclen, reclen);
header->SetHeadlen((ushort)hlen);
header->SetReclen((ushort)reclen);
descp = (DESCRIPTOR*)header;
// Currently only standard Xbase types are supported
......@@ -633,13 +639,13 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
DBFHEADER header;
if ((rc = dbfhead(g, Stream, Tdbp->GetFile(g), &header)) == RC_OK) {
if (Lrecl != (int)header.Reclen) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, header.Reclen);
if (Lrecl != (int)header.Reclen()) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, header.Reclen());
return true;
} // endif Lrecl
Records = (int)header.Records;
Headlen = (int)header.Headlen;
Records = (int)header.Records();
Headlen = (int)header.Headlen();
} else if (rc == RC_NF) {
Records = 0;
Headlen = 0;
......@@ -735,7 +741,7 @@ bool DBFFAM::CopyHeader(PGLOBAL g)
if (Headlen) {
void *hdr = PlugSubAlloc(g, NULL, Headlen);
size_t n, hlen = (size_t)Headlen;
int pos = ftell(Stream);
int pos = ftell(Stream);
if (fseek(Stream, 0, SEEK_SET))
strcpy(g->Message, "Seek error in CopyHeader");
......@@ -869,12 +875,14 @@ void DBFFAM::CloseTableFile(PGLOBAL g, bool abort)
if (n > Records) {
// Update the number of rows in the file header
char filename[_MAX_PATH], nRecords[4];
char filename[_MAX_PATH];
int4store(&nRecords, n);
PlugSetPath(filename, To_File, Tdbp->GetPath());
if ((Stream= global_fopen(g, MSGID_OPEN_MODE_STRERROR, filename, "r+b")))
{
char nRecords[4];
int4store(nRecords, n);
fseek(Stream, 4, SEEK_SET); // Get header.Records position
fwrite(nRecords, sizeof(nRecords), 1, Stream);
fclose(Stream);
......@@ -951,13 +959,13 @@ bool DBMFAM::AllocateBuffer(PGLOBAL g)
/************************************************************************/
DBFHEADER *hp = (DBFHEADER*)Memory;
if (Lrecl != (int)uint2korr(&hp->Reclen)) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, uint2korr(&hp->Reclen));
if (Lrecl != (int)hp->Reclen()) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, hp->Reclen());
return true;
} // endif Lrecl
Records = (int)uint4korr(&hp->Records);
Headlen = (int)uint2korr(&hp->Headlen);
Records = (int)hp->Records();
Headlen = (int)hp->Headlen();
} // endif Headlen
/**************************************************************************/
......@@ -1011,7 +1019,7 @@ int DBMFAM::ReadBuffer(PGLOBAL g)
/* Data Base delete line routine for DBF access methods. */
/* Deleted lines are just flagged in the first buffer character. */
/****************************************************************************/
int DBMFAM::DeleteRecords(PGLOBAL, int irc)
int DBMFAM::DeleteRecords(PGLOBAL g, int irc)
{
if (irc == RC_OK)
*Fpos = '*';
......
......@@ -46,6 +46,7 @@
#include "plgdbsem.h"
#include "filamfix.h"
#include "tabdos.h"
#include "tabfix.h"
#include "osutil.h"
#ifndef INVALID_SET_FILE_POINTER
......@@ -133,18 +134,35 @@ bool FIXFAM::AllocateBuffer(PGLOBAL g)
if (Tdbp->GetFtype() == RECFM_BIN) {
// The buffer must be prepared depending on column types
int n = 0;
bool b = false;
PDOSDEF defp = (PDOSDEF)Tdbp->GetDef();
PCOLDEF cdp;
// PCOLDEF cdp;
PBINCOL colp;
// Prepare the first line of the buffer
memset(To_Buf, 0, Buflen);
#if 0
for (cdp = defp->GetCols(); cdp; cdp = cdp->GetNext()) {
if (IsTypeNum(cdp->GetType()))
memset(To_Buf + cdp->GetOffset(), ' ', cdp->GetClen());
if (!IsTypeNum(cdp->GetType())) {
memset(To_Buf + cdp->GetOffset(), ' ', cdp->GetClen());
b = true;
} // endif not num
n = MY_MAX(n, cdp->GetPoff() + cdp->GetClen());
n = MY_MAX(n, cdp->GetOffset() + cdp->GetClen());
} // endfor cdp
#endif // 0
for (colp = (PBINCOL)Tdbp->GetColumns(); colp;
colp = (PBINCOL)colp->GetNext())
if (!colp->IsSpecial()) {
if (!IsTypeNum(colp->GetResultType())) {
memset(To_Buf + colp->GetDeplac(), ' ', colp->GetLength());
b = true;
} // endif not num
n = MY_MAX(n, colp->GetDeplac() + colp->GetFileSize());
} // endif !special
// We do this for binary table because the lrecl can have been
// specified with additional space to include line ending.
......@@ -156,9 +174,10 @@ bool FIXFAM::AllocateBuffer(PGLOBAL g)
} // endif n
// Now repeat this for the whole buffer
for (int len = Lrecl; len <= Buflen - Lrecl; len += Lrecl)
memcpy(To_Buf + len, To_Buf, Lrecl);
if (b)
// Now repeat this for the whole buffer
for (int len = Lrecl; len <= Buflen - Lrecl; len += Lrecl)
memcpy(To_Buf + len, To_Buf, Lrecl);
} else {
memset(To_Buf, ' ', Buflen);
......
......@@ -118,7 +118,7 @@ extern "C" {
/* Static variables */
/***********************************************************************/
#if defined(STORAGE)
char sys_stamp[4] = SYS_STAMP;
char sys_stamp[5] = SYS_STAMP;
#else
extern char sys_stamp[];
#endif
......
......@@ -153,6 +153,7 @@
#endif // LIBXML2_SUPPORT
#include "taboccur.h"
#include "tabpivot.h"
#include "tabfix.h"
#define my_strupr(p) my_caseup_str(default_charset_info, (p));
#define my_strlwr(p) my_casedn_str(default_charset_info, (p));
......@@ -656,6 +657,7 @@ static int connect_init_func(void *p)
sql_print_information("connect_init: hton=%p", p);
DTVAL::SetTimeShift(); // Initialize time zone shift once for all
BINCOL::SetEndian(); // Initialize host endian setting
DBUG_RETURN(0);
} // end of connect_init_func
......
This diff is collapsed.
This diff is collapsed.
......@@ -410,7 +410,7 @@ void Json_Array_Add_deinit(UDF_INIT* initid)
} // end of Json_Array_Add_deinit
/***********************************************************************/
/* Add values to a Json array. */
/* Delete a value from a Json array. */
/***********************************************************************/
my_bool Json_Array_Delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
......@@ -451,7 +451,7 @@ char *Json_Array_Delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else {
n = *(int*)args->args[1];
arp = jvp->GetArray();
arp->DeleteValue(n - 1);
arp->DeleteValue(n);
arp->InitArray(g);
if (!(str = Serialize(g, arp, NULL, 0))) {
......
......@@ -99,6 +99,26 @@ extern "C" HINSTANCE s_hModule; // Saved module handle
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
/***********************************************************************/
/* Get the plugin directory. */
/***********************************************************************/
char *GetPluginDir(void)
{
char *plugin_dir;
#if defined(_WIN64)
plugin_dir = (char *)GetProcAddress(GetModuleHandle(NULL),
"?opt_plugin_dir@@3PADEA");
#elif defined(_WIN32)
plugin_dir = (char*)GetProcAddress(GetModuleHandle(NULL),
"?opt_plugin_dir@@3PADA");
#else
plugin_dir = opt_plugin_dir;
#endif
return plugin_dir;
} // end of GetPluginDir
/***********************************************************************/
/* Get a unique enum table type ID. */
/***********************************************************************/
......@@ -328,7 +348,7 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info)
{
typedef PQRYRES (__stdcall *XCOLDEF) (PGLOBAL, void*, char*, char*, bool);
const char *module, *subtype;
char c, getname[40] = "Col";
char c, soname[_MAX_PATH], getname[40] = "Col";
#if defined(WIN32)
HANDLE hdll; /* Handle to the external DLL */
#else // !WIN32
......@@ -343,6 +363,17 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info)
if (!module || !subtype)
return NULL;
/*********************************************************************/
/* Ensure that the .dll doesn't have a path. */
/* This is done to ensure that only approved dll from the system */
/* directories are used (to make this even remotely secure). */
/*********************************************************************/
if (check_valid_path(module, strlen(module))) {
strcpy(g->Message, "Module cannot contain a path");
return NULL;
} else
PlugSetPath(soname, module, GetPluginDir());
// The exported name is always in uppercase
for (int i = 0; ; i++) {
c = subtype[i];
......@@ -352,11 +383,11 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info)
#if defined(WIN32)
// Load the Dll implementing the table
if (!(hdll = LoadLibrary(module))) {
if (!(hdll = LoadLibrary(soname))) {
char buf[256];
DWORD rc = GetLastError();
sprintf(g->Message, MSG(DLL_LOAD_ERROR), rc, module);
sprintf(g->Message, MSG(DLL_LOAD_ERROR), rc, soname);
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)buf, sizeof(buf), NULL);
......@@ -374,9 +405,9 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info)
const char *error = NULL;
// Load the desired shared library
if (!(hdll = dlopen(module, RTLD_LAZY))) {
if (!(hdll = dlopen(soname, RTLD_LAZY))) {
error = dlerror();
sprintf(g->Message, MSG(SHARED_LIB_ERR), module, SVP(error));
sprintf(g->Message, MSG(SHARED_LIB_ERR), soname, SVP(error));
return NULL;
} // endif Hdll
......
# Use default setting for mysqld processes
!include include/default_mysqld.cnf
!include include/default_client.cnf
[mysqld.1]
#log-bin= master-bin
[mysqld.2]
[ENV]
MASTER_MYPORT= @mysqld.1.port
MASTER_MYSOCK= @mysqld.1.socket
SLAVE_MYPORT= @mysqld.2.port
SLAVE_MYSOCK= @mysqld.2.socket
PGCLIENTENCODING= UTF8
# Use default setting for mysqld processes
!include include/default_mysqld.cnf
!include include/default_client.cnf
[mysqld.1]
#log-bin= master-bin
[mysqld.2]
[ENV]
MASTER_MYPORT= @mysqld.1.port
MASTER_MYSOCK= @mysqld.1.socket
SLAVE_MYPORT= @mysqld.2.port
SLAVE_MYSOCK= @mysqld.2.socket
PGCLIENTENCODING= UTF8
......@@ -14,11 +14,11 @@ SET time_zone='+00:00';
CREATE TABLE t1
(
fig INT(4) NOT NULL FIELD_FORMAT='C',
name CHAR(10) not null,
birth DATE NOT NULL,
id CHAR(5) NOT NULL FIELD_FORMAT='S',
name CHAR(10) NOT NULL,
birth DATE NOT NULL FIELD_FORMAT='L',
id CHAR(5) NOT NULL FIELD_FORMAT='L2',
salary DOUBLE(9,2) NOT NULL DEFAULT 0.00 FIELD_FORMAT='F',
dept INT(4) NOT NULL FIELD_FORMAT='S'
dept INT(4) NOT NULL FIELD_FORMAT='L2'
) ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=5 FILE_NAME='Testbal.dat';
SELECT * FROM t1;
fig name birth id salary dept
......@@ -41,11 +41,11 @@ DROP TABLE t1;
CREATE TABLE t1
(
fig INT(4) NOT NULL FIELD_FORMAT='C',
name CHAR(10) not null,
birth DATE NOT NULL,
id CHAR(5) NOT NULL FIELD_FORMAT='S',
name CHAR(10) NOT NULL,
birth DATE NOT NULL FIELD_FORMAT='L',
id CHAR(5) NOT NULL FIELD_FORMAT='L2',
salary DOUBLE(9,2) NOT NULL DEFAULT 0.00 FIELD_FORMAT='F',
dept INT(4) NOT NULL FIELD_FORMAT='S'
dept INT(4) NOT NULL FIELD_FORMAT='L2'
) ENGINE=CONNECT TABLE_TYPE=BIN READONLY=Yes FILE_NAME='Testbal.dat';
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
ERROR HY000: Table 't1' is read only
......@@ -55,10 +55,10 @@ Table Create Table
t1 CREATE TABLE `t1` (
`fig` int(4) NOT NULL `FIELD_FORMAT`='C',
`name` char(10) NOT NULL,
`birth` date NOT NULL,
`id` char(5) NOT NULL `FIELD_FORMAT`='S',
`birth` date NOT NULL `FIELD_FORMAT`='L',
`id` char(5) NOT NULL `FIELD_FORMAT`='L2',
`salary` double(9,2) NOT NULL DEFAULT '0.00' `FIELD_FORMAT`='F',
`dept` int(4) NOT NULL `FIELD_FORMAT`='S'
`dept` int(4) NOT NULL `FIELD_FORMAT`='L2'
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`=BIN `FILE_NAME`='Testbal.dat' `READONLY`=NO
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
SELECT * FROM t1;
......@@ -74,10 +74,10 @@ Table Create Table
t1 CREATE TABLE `t1` (
`fig` int(4) NOT NULL `FIELD_FORMAT`='C',
`name` char(10) NOT NULL,
`birth` date NOT NULL,
`id` char(5) NOT NULL `FIELD_FORMAT`='S',
`birth` date NOT NULL `FIELD_FORMAT`='L',
`id` char(5) NOT NULL `FIELD_FORMAT`='L2',
`salary` double(9,2) NOT NULL DEFAULT '0.00' `FIELD_FORMAT`='F',
`dept` int(4) NOT NULL `FIELD_FORMAT`='S'
`dept` int(4) NOT NULL `FIELD_FORMAT`='L2'
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`=BIN `FILE_NAME`='Testbal.dat' `READONLY`=YES
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
ERROR HY000: Table 't1' is read only
......
......@@ -89,23 +89,23 @@ ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher L
UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
SELECT * FROM t1 WHERE ISBN = '9782212090819';
ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
9782212090819 fr applications Philippe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
9782212090819 fr applications Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999
9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999
#
# To add an author a new table must be created
#
CREATE TABLE t2 (
FIRSTNAME CHAR(32),
LASTNAME CHAR(32))
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[2]:AUTHOR';
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[1]:AUTHOR';
SELECT * FROM t2;
FIRSTNAME LASTNAME
William J. Pardi
INSERT INTO t2 VALUES('Charles','Dickens');
SELECT * FROM t1;
ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
9782212090819 fr applications Philippe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
9782212090819 fr applications Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999
9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999
9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999
9782840825685 fr applications Charles Dickens XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999
DROP TABLE t1;
......@@ -127,11 +127,11 @@ line
"SUBJECT": "applications",
"AUTHOR": [
{
"FIRSTNAME": "Philippe",
"FIRSTNAME": "Jean-Christophe",
"LASTNAME": "Bernadac"
},
{
"FIRSTNAME": "Franois",
"FIRSTNAME": "Philippe",
"LASTNAME": "Knab"
}
],
......@@ -252,9 +252,9 @@ DROP TABLE t1;
#
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[0]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
WHO WEEK WHAT AMOUNT
......@@ -268,9 +268,9 @@ Janet 3 Food 18.00
Janet 3 Beer 18.00
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
WHO WEEK WHAT AMOUNT
......@@ -284,9 +284,9 @@ Beth 4 Beer 15.00
Janet 4 Car 17.00
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[3]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
WHO WEEK WHAT AMOUNT
......
Warnings:
Warning 1105 No file name. Table will use t1.xml
SET NAMES utf8;
#
# Testing HTML like XML file
#
CREATE TABLE beers (
`Name` CHAR(16) FIELD_FORMAT='brandName',
`Origin` CHAR(16) FIELD_FORMAT='origin',
`Description` CHAR(32) FIELD_FORMAT='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td';
SELECT * FROM beers;
Name Origin Description
Huntsman Bath, UK Wonderful hop, light alcohol
Tuborg Danmark In small bottles
DROP TABLE beers;
#
# Testing HTML file
#
CREATE TABLE coffee (
`Name` CHAR(16),
`Cups` INT(8),
`Type` CHAR(16),
`Sugar` CHAR(4))
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='coffee.htm'
TABNAME='TABLE' HEADER=1 OPTION_LIST='xmlsup=libxml2,Coltype=HTML';
SELECT * FROM coffee;
Name Cups Type Sugar
T. Sexton 10 Espresso No
J. Dinnen 5 Decaf Yes
DROP TABLE coffee;
<?xml version="1.0"?>
<Beers>
<table>
<th><td>Name</td><td>Origin</td><td>Description</td></th>
<tr>
<td><brandName>Huntsman</brandName></td>
<td><origin>Bath, UK</origin></td>
<td><details>Wonderful hop, light alcohol</details></td>
</tr>
<tr>
<td><brandName>Tuborg</brandName></td>
<td><origin>Danmark</origin></td>
<td><details>In small bottles</details></td>
</tr>
</table>
</Beers>
<?xml version="1.0" encoding="UTF-8"?>
<bookstore>
<book category="COOKING">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.00</price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book category="WEB">
<title lang="en">XQuery Kick Start</title>
<author>James McGovern</author>
<author>Per Bothner</author>
<author>Kurt Cagle</author>
<author>James Linn</author>
<author>Vaidyanathan Nagarajan</author>
<year>2003</year>
<price>49.99</price>
</book>
<book category="WEB">
<title lang="en">Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</book>
</bookstore>
<?xml version="1.0" encoding="UTF-8"?>
<bookstore>
<book category="COOKING">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.00</price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book category="WEB">
<title lang="en">XQuery Kick Start</title>
<author>James McGovern</author>
<author>Per Bothner</author>
<author>Kurt Cagle</author>
<author>James Linn</author>
<author>Vaidyanathan Nagarajan</author>
<year>2003</year>
<price>49.99</price>
</book>
<book category="WEB">
<title lang="en">Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</book>
</bookstore>
John Boston 25/01/1986 02/06/2010
Henry Boston 07/06/1987 01/04/2008
George San Jose 10/08/1981 02/06/2010
Sam Chicago 22/11/1979 10/10/2007
James Dallas 13/05/1992 14/12/2009
Bill Boston 11/09/1986 10/02/2008
John Boston 25/01/1986 02/06/2010
Henry Boston 07/06/1987 01/04/2008
George San Jose 10/08/1981 02/06/2010
Sam Chicago 22/11/1979 10/10/2007
James Dallas 13/05/1992 14/12/2009
Bill Boston 11/09/1986 10/02/2008
<TABLE summary="This table charts the number of cups of coffe
consumed by each senator, the type of coffee (decaf
or regular), and whether taken with sugar.">
<CAPTION>Cups of coffee consumed by each senator</CAPTION>
<TR>
<TH>Name</TH>
<TH>Cups</TH>
<TH>Type of Coffee</TH>
<TH>Sugar?</TH>
</TR>
<TR>
<TD>T. Sexton</TD>
<TD>10</TD>
<TD>Espresso</TD>
<TD>No</TD>
</TR>
<TR>
<TD>J. Dinnen</TD>
<TD>5</TD>
<TD>Decaf</TD>
<TD>Yes</TD>
</TR>
</TABLE>
Joe 3Beer 18.00
Beth 4Food 17.00
Janet 5Beer 14.00
Joe 3Food 12.00
Joe 4Beer 19.00
Janet 5Car 12.00
Joe 3Food 19.00
Beth 4Beer 15.00
Janet 5Beer 19.00
Joe 3Car 20.00
Joe 4Beer 16.00
Beth 5Food 12.00
Beth 3Beer 16.00
Joe 4Food 17.00
Joe 5Beer 14.00
Janet 3Car 19.00
Joe 4Food 17.00
Beth 5Beer 20.00
Janet 3Food 18.00
Joe 4Beer 14.00
Joe 5Food 12.00
Janet 3Beer 18.00
Janet 4Car 17.00
Janet 5Food 12.00
Joe 3Beer 18.00
Beth 4Food 17.00
Janet 5Beer 14.00
Joe 3Food 12.00
Joe 4Beer 19.00
Janet 5Car 12.00
Joe 3Food 19.00
Beth 4Beer 15.00
Janet 5Beer 19.00
Joe 3Car 20.00
Joe 4Beer 16.00
Beth 5Food 12.00
Beth 3Beer 16.00
Joe 4Food 17.00
Joe 5Beer 14.00
Janet 3Car 19.00
Joe 4Food 17.00
Beth 5Beer 20.00
Janet 3Food 18.00
Joe 4Beer 14.00
Joe 5Food 12.00
Janet 3Beer 18.00
Janet 4Car 17.00
Janet 5Food 12.00
0;Inconnu
1;Masculin
2;Feminin
0;Inconnu
1;Masculin
2;Feminin
.;Inconnu
C;Celibataire
D;Divorce
L;Union libre
M;Marie
S;Separe
V;Veuf
.;Inconnu
C;Celibataire
D;Divorce
L;Union libre
M;Marie
S;Separe
V;Veuf
let $MYSQLD_DATADIR= `select @@datadir`;
--echo #
--echo # Testing indexing with ALTER on inward table (in-place)
--echo #
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
SELECT * FROM t1;
CREATE INDEX xc ON t1(c);
DESCRIBE SELECT * FROM t1 WHERE c = 2;
DROP INDEX xc ON t1;
CREATE INDEX xd ON t1(d);
DROP INDEX xd ON t1;
ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d);
SHOW INDEX FROM t1;
ALTER TABLE t1 DROP INDEX xc, DROP INDEX xd;
SHOW INDEX FROM t1;
--echo #
--echo # Testing modifying columns inward table (not in-place)
--echo #
ALTER TABLE t1 MODIFY COLUMN c CHAR(5) NOT NULL;
SHOW CREATE TABLE t1;
SELECT * FROM t1;
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL;
--echo #
--echo # Fails because indexing must be in-place
--echo #
--error ER_ALTER_OPERATION_NOT_SUPPORTED
ALTER TABLE t1 MODIFY COLUMN c CHAR(10) NOT NULL, ADD INDEX xd (d);
--echo #
--echo # Testing changing table type (not in-place)
--echo #
ALTER TABLE t1 TABLE_TYPE=CSV HEADER=1 QUOTED=1;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
--echo # create an outward table used to see the t1 file
CREATE TABLE t2 (line VARCHAR(100) NOT NULL) ENGINE=CONNECT FILE_NAME='t1.csv';
SELECT * FROM t2;
--echo #
--echo # Testing changing engine
--echo #
DROP TABLE t1;
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d);
ALTER TABLE t1 ENGINE = MYISAM;
SHOW CREATE TABLE t1;
SHOW INDEX FROM t1;
SELECT * FROM t1;
ALTER TABLE t1 ENGINE = CONNECT TABLE_TYPE=DBF;
SHOW CREATE TABLE t1;
SHOW INDEX FROM t1;
SELECT * FROM t1;
DROP TABLE t1, t2;
--echo #
--echo # Testing ALTER on outward tables
--echo #
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=fix FILE_NAME='tf1.txt' ENDING=1;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
SELECT * FROM t1;
CREATE TABLE t2 (line VARCHAR(100) NOT NULL) ENGINE=CONNECT FILE_NAME='tf1.txt';
SELECT * FROM t2;
--echo #
--echo # Indexing works the same
--echo #
ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d);
SHOW INDEX FROM t1;
SELECT d FROM t1 WHERE c = 2;
ALTER TABLE t1 DROP INDEX xc, DROP INDEX xd;
SHOW INDEX FROM t1;
--echo #
--echo # Other alterations do not modify the file
--echo #
ALTER TABLE t1 MODIFY COLUMN c CHAR(5) NOT NULL;
SELECT * FROM t2;
SHOW CREATE TABLE t1;
#Wrong result
--error ER_GET_ERRMSG
SELECT * FROM t1;
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL;
--echo #
--echo # Changing column order
--echo #
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL AFTER d;
SELECT * FROM t2;
SHOW CREATE TABLE t1;
--echo # Wrong result
SELECT * FROM t1;
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL FIRST;
--echo # What should have been done
ALTER TABLE t1 MODIFY c INT NOT NULL FLAG=0 AFTER d, MODIFY d CHAR(10) NOT NULL FLAG=11;
SHOW CREATE TABLE t1;
SELECT * FROM t1;
--echo #
--echo # Changing to another engine is Ok
--echo # However, the data file is not deleted.
--echo #
ALTER TABLE t1 ENGINE=ARIA;
SHOW CREATE TABLE t1;
set @old_sql_mode=@@sql_mode;
set sql_mode=ignore_bad_table_options;
SHOW CREATE TABLE t1;
set sql_mode=@old_sql_mode;
SELECT * from t1;
SELECT * from t2;
--echo #
--echo # Changing back to CONNECT fails
--echo # Sure enough, the data file was not deleted.
--echo #
--error ER_UNKNOWN_ERROR
ALTER TABLE t1 ENGINE=CONNECT;
--echo #
--echo # But changing back to CONNECT succeed
--echo # if the data file does not exist.
--echo #
--remove_file $MYSQLD_DATADIR/test/tf1.txt
ALTER TABLE t1 ENGINE=CONNECT;
SHOW CREATE TABLE t1;
SELECT * from t1;
SELECT * from t2;
DROP TABLE t1, t2;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/tf1.txt
let $MYSQLD_DATADIR= `select @@datadir`;
--echo #
--echo # Testing indexing with ALTER on inward table (in-place)
--echo #
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
SELECT * FROM t1;
CREATE INDEX xc ON t1(c);
DESCRIBE SELECT * FROM t1 WHERE c = 2;
DROP INDEX xc ON t1;
CREATE INDEX xd ON t1(d);
DROP INDEX xd ON t1;
ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d);
SHOW INDEX FROM t1;
ALTER TABLE t1 DROP INDEX xc, DROP INDEX xd;
SHOW INDEX FROM t1;
--echo #
--echo # Testing modifying columns inward table (not in-place)
--echo #
ALTER TABLE t1 MODIFY COLUMN c CHAR(5) NOT NULL;
SHOW CREATE TABLE t1;
SELECT * FROM t1;
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL;
--echo #
--echo # Fails because indexing must be in-place
--echo #
--error ER_ALTER_OPERATION_NOT_SUPPORTED
ALTER TABLE t1 MODIFY COLUMN c CHAR(10) NOT NULL, ADD INDEX xd (d);
--echo #
--echo # Testing changing table type (not in-place)
--echo #
ALTER TABLE t1 TABLE_TYPE=CSV HEADER=1 QUOTED=1;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
--echo # create an outward table used to see the t1 file
CREATE TABLE t2 (line VARCHAR(100) NOT NULL) ENGINE=CONNECT FILE_NAME='t1.csv';
SELECT * FROM t2;
--echo #
--echo # Testing changing engine
--echo #
DROP TABLE t1;
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d);
ALTER TABLE t1 ENGINE = MYISAM;
SHOW CREATE TABLE t1;
SHOW INDEX FROM t1;
SELECT * FROM t1;
ALTER TABLE t1 ENGINE = CONNECT TABLE_TYPE=DBF;
SHOW CREATE TABLE t1;
SHOW INDEX FROM t1;
SELECT * FROM t1;
DROP TABLE t1, t2;
--echo #
--echo # Testing ALTER on outward tables
--echo #
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=fix FILE_NAME='tf1.txt' ENDING=1;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
SELECT * FROM t1;
CREATE TABLE t2 (line VARCHAR(100) NOT NULL) ENGINE=CONNECT FILE_NAME='tf1.txt';
SELECT * FROM t2;
--echo #
--echo # Indexing works the same
--echo #
ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d);
SHOW INDEX FROM t1;
SELECT d FROM t1 WHERE c = 2;
ALTER TABLE t1 DROP INDEX xc, DROP INDEX xd;
SHOW INDEX FROM t1;
--echo #
--echo # Other alterations do not modify the file
--echo #
ALTER TABLE t1 MODIFY COLUMN c CHAR(5) NOT NULL;
SELECT * FROM t2;
SHOW CREATE TABLE t1;
#Wrong result
--error ER_GET_ERRMSG
SELECT * FROM t1;
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL;
--echo #
--echo # Changing column order
--echo #
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL AFTER d;
SELECT * FROM t2;
SHOW CREATE TABLE t1;
--echo # Wrong result
SELECT * FROM t1;
ALTER TABLE t1 MODIFY COLUMN c INT NOT NULL FIRST;
--echo # What should have been done
ALTER TABLE t1 MODIFY c INT NOT NULL FLAG=0 AFTER d, MODIFY d CHAR(10) NOT NULL FLAG=11;
SHOW CREATE TABLE t1;
SELECT * FROM t1;
--echo #
--echo # Changing to another engine is Ok
--echo # However, the data file is not deleted.
--echo #
ALTER TABLE t1 ENGINE=ARIA;
SHOW CREATE TABLE t1;
set @old_sql_mode=@@sql_mode;
set sql_mode=ignore_bad_table_options;
SHOW CREATE TABLE t1;
set sql_mode=@old_sql_mode;
SELECT * from t1;
SELECT * from t2;
--echo #
--echo # Changing back to CONNECT fails
--echo # Sure enough, the data file was not deleted.
--echo #
--error ER_UNKNOWN_ERROR
ALTER TABLE t1 ENGINE=CONNECT;
--echo #
--echo # But changing back to CONNECT succeed
--echo # if the data file does not exist.
--echo #
--remove_file $MYSQLD_DATADIR/test/tf1.txt
ALTER TABLE t1 ENGINE=CONNECT;
SHOW CREATE TABLE t1;
SELECT * from t1;
SELECT * from t2;
DROP TABLE t1, t2;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/tf1.txt
--source have_libxml2.inc
--echo #
--echo # Testing changing table type (not in-place)
--echo #
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=CSV HEADER=1 QUOTED=1;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
SELECT * FROM t1;
--echo # This would fail if the top node name is not specified.
--echo # This is because the XML top node name defaults to the table name.
--echo # Sure enough the temporary table name begins with '#' and is rejected by XML.
--echo # Therefore the top node name must be specified (along with the row nodes name).
ALTER TABLE t1 TABLE_TYPE=XML TABNAME=t1 OPTION_LIST='rownode=row';
SELECT * FROM t1;
SHOW CREATE TABLE t1;
--echo # Let us see the XML file
CREATE TABLE t2 (line VARCHAR(100) NOT NULL) ENGINE=CONNECT FILE_NAME='t1.xml';
SELECT * FROM t2;
--echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
--echo # Testing field option modification
ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
SELECT * FROM t2;
DROP TABLE t1, t2;
--source have_libxml2.inc
--echo #
--echo # Testing changing table type (not in-place)
--echo #
CREATE TABLE t1 (c INT NOT NULL, d CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=CSV HEADER=1 QUOTED=1;
INSERT INTO t1 VALUES (1,'One'), (2,'Two'), (3,'Three');
SELECT * FROM t1;
--echo # This would fail if the top node name is not specified.
--echo # This is because the XML top node name defaults to the table name.
--echo # Sure enough the temporary table name begins with '#' and is rejected by XML.
--echo # Therefore the top node name must be specified (along with the row nodes name).
ALTER TABLE t1 TABLE_TYPE=XML TABNAME=t1 OPTION_LIST='rownode=row';
SELECT * FROM t1;
SHOW CREATE TABLE t1;
--echo # Let us see the XML file
CREATE TABLE t2 (line VARCHAR(100) NOT NULL) ENGINE=CONNECT FILE_NAME='t1.xml';
SELECT * FROM t2;
--echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
--echo # Testing field option modification
ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
SELECT * FROM t2;
DROP TABLE t1, t2;
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/Testbal.dat $MYSQLD_DATADIR/test/Testbal.dat
--echo #
--echo # Testing errors
--echo #
CREATE TABLE t1
(
ID INT NOT NULL
) Engine=CONNECT TABLE_TYPE=BIN FILE_NAME='nonexistent.txt';
--replace_regex /on .*test.nonexistent.txt/on DATADIR\/test\/nonexistent.txt/
# TODO: check why this is needed for Windows
--replace_result Open(rt) Open(rb)
SELECT * FROM t1;
DROP TABLE t1;
SET time_zone='+00:00';
CREATE TABLE t1
(
fig INT(4) NOT NULL FIELD_FORMAT='C',
name CHAR(10) not null,
birth DATE NOT NULL,
id CHAR(5) NOT NULL FIELD_FORMAT='S',
salary DOUBLE(9,2) NOT NULL DEFAULT 0.00 FIELD_FORMAT='F',
dept INT(4) NOT NULL FIELD_FORMAT='S'
) ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=5 FILE_NAME='Testbal.dat';
SELECT * FROM t1;
--error ER_GET_ERRMSG
INSERT INTO t1 VALUES (55555,'RONALD','1980-02-26','3333',4444.44,555);
INSERT INTO t1 VALUES (5555,'RONALD','1980-02-26','3333',4444.44,555);
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing READONLY tables
--echo #
CREATE TABLE t1
(
fig INT(4) NOT NULL FIELD_FORMAT='C',
name CHAR(10) not null,
birth DATE NOT NULL,
id CHAR(5) NOT NULL FIELD_FORMAT='S',
salary DOUBLE(9,2) NOT NULL DEFAULT 0.00 FIELD_FORMAT='F',
dept INT(4) NOT NULL FIELD_FORMAT='S'
) ENGINE=CONNECT TABLE_TYPE=BIN READONLY=Yes FILE_NAME='Testbal.dat';
--error ER_OPEN_AS_READONLY
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
ALTER TABLE t1 READONLY=NO;
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
SELECT * FROM t1;
ALTER TABLE t1 READONLY=YES;
SHOW CREATE TABLE t1;
--error ER_OPEN_AS_READONLY
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
DROP TABLE t1;
--echo #
--echo # Testing that the underlying file is created
--echo #
CREATE TABLE t1
(
c CHAR(4) NOT NULL FIELD_FORMAT='C'
) ENGINE=CONNECT TABLE_TYPE=BIN FILE_NAME='bin2.dat';
INSERT INTO t1 VALUES (10),(20),(300),(4000);
SELECT * FROM t1;
DROP TABLE t1;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/Testbal.dat
--remove_file $MYSQLD_DATADIR/test/bin2.dat
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/Testbal.dat $MYSQLD_DATADIR/test/Testbal.dat
--echo #
--echo # Testing errors
--echo #
CREATE TABLE t1
(
ID INT NOT NULL
) Engine=CONNECT TABLE_TYPE=BIN FILE_NAME='nonexistent.txt';
--replace_regex /on .*test.nonexistent.txt/on DATADIR\/test\/nonexistent.txt/
# TODO: check why this is needed for Windows
--replace_result Open(rt) Open(rb)
SELECT * FROM t1;
DROP TABLE t1;
SET time_zone='+00:00';
CREATE TABLE t1
(
fig INT(4) NOT NULL FIELD_FORMAT='C',
name CHAR(10) NOT NULL,
birth DATE NOT NULL FIELD_FORMAT='L',
id CHAR(5) NOT NULL FIELD_FORMAT='L2',
salary DOUBLE(9,2) NOT NULL DEFAULT 0.00 FIELD_FORMAT='F',
dept INT(4) NOT NULL FIELD_FORMAT='L2'
) ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=5 FILE_NAME='Testbal.dat';
SELECT * FROM t1;
--error ER_GET_ERRMSG
INSERT INTO t1 VALUES (55555,'RONALD','1980-02-26','3333',4444.44,555);
INSERT INTO t1 VALUES (5555,'RONALD','1980-02-26','3333',4444.44,555);
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing READONLY tables
--echo #
CREATE TABLE t1
(
fig INT(4) NOT NULL FIELD_FORMAT='C',
name CHAR(10) NOT NULL,
birth DATE NOT NULL FIELD_FORMAT='L',
id CHAR(5) NOT NULL FIELD_FORMAT='L2',
salary DOUBLE(9,2) NOT NULL DEFAULT 0.00 FIELD_FORMAT='F',
dept INT(4) NOT NULL FIELD_FORMAT='L2'
) ENGINE=CONNECT TABLE_TYPE=BIN READONLY=Yes FILE_NAME='Testbal.dat';
--error ER_OPEN_AS_READONLY
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
ALTER TABLE t1 READONLY=NO;
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
SELECT * FROM t1;
ALTER TABLE t1 READONLY=YES;
SHOW CREATE TABLE t1;
--error ER_OPEN_AS_READONLY
INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777);
DROP TABLE t1;
--echo #
--echo # Testing that the underlying file is created
--echo #
CREATE TABLE t1
(
c CHAR(4) NOT NULL FIELD_FORMAT='C'
) ENGINE=CONNECT TABLE_TYPE=BIN FILE_NAME='bin2.dat';
INSERT INTO t1 VALUES (10),(20),(300),(4000);
SELECT * FROM t1;
DROP TABLE t1;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/Testbal.dat
--remove_file $MYSQLD_DATADIR/test/bin2.dat
--echo #
--echo # Testing out of range dates as (var)char
--echo #
CREATE TABLE t1 (
id INT NOT NULL,
dat CHAR(10) NOT NULL,
tim CHAR(8) DEFAULT '09:35:08',
datim CHAR(19) DEFAULT '1789-08-10 14:20:30')
ENGINE=CONNECT TABLE_TYPE=FIX;
INSERT INTO t1(id,dat) VALUES(1,'1515-04-01'),(2,'2014-07-26'),(3,'2118-11-02');
SELECT * FROM t1;
SELECT id, DATE(datim) FROM t1 LIMIT 1;
SELECT id, DAYNAME(dat) FROM t1;
SELECT id, DAYNAME(datim) FROM t1 LIMIT 1;
SELECT id, TIME(tim) FROM t1 LIMIT 1;
DROP TABLE t1;
--echo #
--echo # Testing out of range dates as (var)char
--echo #
CREATE TABLE t1 (
id INT NOT NULL,
dat CHAR(10) NOT NULL,
tim CHAR(8) DEFAULT '09:35:08',
datim CHAR(19) DEFAULT '1789-08-10 14:20:30')
ENGINE=CONNECT TABLE_TYPE=FIX;
INSERT INTO t1(id,dat) VALUES(1,'1515-04-01'),(2,'2014-07-26'),(3,'2118-11-02');
SELECT * FROM t1;
SELECT id, DATE(datim) FROM t1 LIMIT 1;
SELECT id, DAYNAME(dat) FROM t1;
SELECT id, DAYNAME(datim) FROM t1 LIMIT 1;
SELECT id, TIME(tim) FROM t1 LIMIT 1;
DROP TABLE t1;
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/funny.txt $MYSQLD_DATADIR/test/funny.txt
--copy_file $MTR_SUITE_DIR/std_data/funny2.txt $MYSQLD_DATADIR/test/funny2.txt
--echo #
--echo # Testing errors
--echo #
CREATE TABLE t1
(
ID INT NOT NULL field_format=' %n%d%n'
) Engine=CONNECT table_type=FMT file_name='nonexistent.txt';
--replace_regex /on .*test.nonexistent.txt/on DATADIR\/test\/nonexistent.txt/
# TODO: check why this is needed for Windows
--replace_result Open(rt) Open(rb)
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing update on FMT tables
--echo #
CREATE TABLE t1
(
id INT NOT NULL field_format=' %n%d%n'
) ENGINE=CONNECT TABLE_TYPE=FMT FILE_NAME='t1.txt';
--error ER_GET_ERRMSG
INSERT INTO t1 VALUES (10),(20);
# TODO:
#--error ER_GET_ERRMSG
#UPDATE t1 SET id=20;
#TRUNCATE TABLE t1;
#DELETE FROM t1 WHERE id=10;
#SELECT * FROM t1;
DROP TABLE t1;
#--remove_file $MYSQLD_DATADIR/test/t1.txt
--echo #
--echo # Testing manual examples
--echo #
CREATE TABLE t1
(
ID Integer(5) not null field_format=' %n%d%n',
NAME Char(16) not null field_format=" , '%n%[^']%n'",
DEPNO Integer(4) not null field_format=' , #%n%d%n',
SALARY Double(12,2) not null field_format=' ; %n%f%n'
) Engine=CONNECT table_type=FMT file_name='funny.txt';
SELECT * FROM t1;
DROP TABLE t1;
#
# TODO: shoudn't a warning instead of error be returned on bad format?
#
CREATE TABLE t1
(
ID Integer(5) not null field_format=' %n%d%n',
NAME Char(16) not null field_format=" , '%n%[^']%n'",
DEPNO Integer(4) not null field_format=' , #%n%d%n',
SALARY Double(12,2) not null field_format=' ; %n%f%n'
) Engine=CONNECT table_type=FMT file_name='funny2.txt';
--error ER_GET_ERRMSG
SELECT * FROM t1;
DROP TABLE t1;
CREATE TABLE t1
(
ID Integer(5) not null field_format=' %n%d%n',
NAME Char(16) not null field_format=' , ''%n%[^'']%m',
DEPNO Integer(4) not null field_format=''' , #%n%d%m',
SALARY Double(12,2) not null field_format=' ; %n%f%n'
) Engine=CONNECT table_type=FMT file_name='funny2.txt';
SELECT * FROM t1;
--error ER_GET_ERRMSG
UPDATE t1 SET SALARY=1234;
# TODO: this query crashes
# UPDATE t1 SET SALARY=1234 WHERE ID=56;
DELETE FROM t1 WHERE ID=56;
SELECT * FROM t1;
DROP TABLE t1;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/funny.txt
--remove_file $MYSQLD_DATADIR/test/funny2.txt
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/funny.txt $MYSQLD_DATADIR/test/funny.txt
--copy_file $MTR_SUITE_DIR/std_data/funny2.txt $MYSQLD_DATADIR/test/funny2.txt
--echo #
--echo # Testing errors
--echo #
CREATE TABLE t1
(
ID INT NOT NULL field_format=' %n%d%n'
) Engine=CONNECT table_type=FMT file_name='nonexistent.txt';
--replace_regex /on .*test.nonexistent.txt/on DATADIR\/test\/nonexistent.txt/
# TODO: check why this is needed for Windows
--replace_result Open(rt) Open(rb)
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing update on FMT tables
--echo #
CREATE TABLE t1
(
id INT NOT NULL field_format=' %n%d%n'
) ENGINE=CONNECT TABLE_TYPE=FMT FILE_NAME='t1.txt';
--error ER_GET_ERRMSG
INSERT INTO t1 VALUES (10),(20);
# TODO:
#--error ER_GET_ERRMSG
#UPDATE t1 SET id=20;
#TRUNCATE TABLE t1;
#DELETE FROM t1 WHERE id=10;
#SELECT * FROM t1;
DROP TABLE t1;
#--remove_file $MYSQLD_DATADIR/test/t1.txt
--echo #
--echo # Testing manual examples
--echo #
CREATE TABLE t1
(
ID Integer(5) not null field_format=' %n%d%n',
NAME Char(16) not null field_format=" , '%n%[^']%n'",
DEPNO Integer(4) not null field_format=' , #%n%d%n',
SALARY Double(12,2) not null field_format=' ; %n%f%n'
) Engine=CONNECT table_type=FMT file_name='funny.txt';
SELECT * FROM t1;
DROP TABLE t1;
#
# TODO: shoudn't a warning instead of error be returned on bad format?
#
CREATE TABLE t1
(
ID Integer(5) not null field_format=' %n%d%n',
NAME Char(16) not null field_format=" , '%n%[^']%n'",
DEPNO Integer(4) not null field_format=' , #%n%d%n',
SALARY Double(12,2) not null field_format=' ; %n%f%n'
) Engine=CONNECT table_type=FMT file_name='funny2.txt';
--error ER_GET_ERRMSG
SELECT * FROM t1;
DROP TABLE t1;
CREATE TABLE t1
(
ID Integer(5) not null field_format=' %n%d%n',
NAME Char(16) not null field_format=' , ''%n%[^'']%m',
DEPNO Integer(4) not null field_format=''' , #%n%d%m',
SALARY Double(12,2) not null field_format=' ; %n%f%n'
) Engine=CONNECT table_type=FMT file_name='funny2.txt';
SELECT * FROM t1;
--error ER_GET_ERRMSG
UPDATE t1 SET SALARY=1234;
# TODO: this query crashes
# UPDATE t1 SET SALARY=1234 WHERE ID=56;
DELETE FROM t1 WHERE ID=56;
SELECT * FROM t1;
DROP TABLE t1;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/funny.txt
--remove_file $MYSQLD_DATADIR/test/funny2.txt
--echo #
--echo # Testing features not specific to any TABLE_TYPE
--echo #
--error ER_UNKNOWN_ERROR
CREATE TABLE t1 (a INT NOT NULL) ENGINE=CONNECT TABLE_TYPE=NON_EXISTING;
#SHOW CREATE TABLE t1;
#DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL) ENGINE=CONNECT TABLE_TYPE=FIX;
INSERT INTO t1 VALUES (10);
SELECT * FROM t1;
#--error ER_GET_ERRMSG
--error ER_UNKNOWN_ERROR
ALTER TABLE t1 TABLE_TYPE=NON_EXISTING;
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing features not specific to any TABLE_TYPE
--echo #
--error ER_UNKNOWN_ERROR
CREATE TABLE t1 (a INT NOT NULL) ENGINE=CONNECT TABLE_TYPE=NON_EXISTING;
#SHOW CREATE TABLE t1;
#DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL) ENGINE=CONNECT TABLE_TYPE=FIX;
INSERT INTO t1 VALUES (10);
SELECT * FROM t1;
#--error ER_GET_ERRMSG
--error ER_UNKNOWN_ERROR
ALTER TABLE t1 TABLE_TYPE=NON_EXISTING;
SELECT * FROM t1;
DROP TABLE t1;
......@@ -97,7 +97,7 @@ SELECT * FROM t1 WHERE ISBN = '9782212090819';
CREATE TABLE t2 (
FIRSTNAME CHAR(32),
LASTNAME CHAR(32))
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[2]:AUTHOR';
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[1]:AUTHOR';
SELECT * FROM t2;
INSERT INTO t2 VALUES('Charles','Dickens');
SELECT * FROM t1;
......@@ -162,25 +162,25 @@ DROP TABLE t1;
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[0]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[3]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
......
--disable_query_log
#
# Check if server has support for loading plugins
#
if (`SELECT @@have_dynamic_loading != 'YES'`) {
--skip UDF requires dynamic loading
}
let $is_win = `select convert(@@version_compile_os using latin1) IN ("Win32","Win64","Windows")`;
if ($is_win)
{
--eval CREATE FUNCTION Json_Array RETURNS STRING SONAME 'ha_connect.dll';
--eval CREATE FUNCTION Json_Array_Add RETURNS STRING SONAME 'ha_connect.dll';
--eval CREATE FUNCTION Json_Object RETURNS STRING SONAME 'ha_connect.dll';
--eval CREATE FUNCTION Json_Object_Nonull RETURNS STRING SONAME 'ha_connect.dll';
--eval CREATE FUNCTION Json_Value returns STRING SONAME 'ha_connect.dll';
--eval CREATE AGGREGATE FUNCTION Json_Array_Grp RETURNS STRING SONAME 'ha_connect.dll';
--eval CREATE AGGREGATE FUNCTION Json_Object_Grp RETURNS STRING SONAME 'ha_connect.dll';
}
if (!$is_win)
{
--eval CREATE FUNCTION Json_Array RETURNS STRING SONAME 'ha_connect.so';
--eval CREATE FUNCTION Json_Array_Add RETURNS STRING SONAME 'ha_connect.so';
--eval CREATE FUNCTION Json_Object RETURNS STRING SONAME 'ha_connect.so';
--eval CREATE FUNCTION Json_Object_Nonull RETURNS STRING SONAME 'ha_connect.so';
--eval CREATE FUNCTION Json_Value returns STRING SONAME 'ha_connect.so';
--eval CREATE AGGREGATE FUNCTION Json_Array_Grp RETURNS STRING SONAME 'ha_connect.so';
--eval CREATE AGGREGATE FUNCTION Json_Object_Grp RETURNS STRING SONAME 'ha_connect.so';
}
--enable_query_log
--source json_udf.inc
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
--copy_file $MTR_SUITE_DIR/std_data/employee.dat $MYSQLD_DATADIR/test/employee.dat
--echo #
--echo # Test UDF's with constant arguments
--echo #
SELECT Json_Array();
SELECT Json_Object(56,3.1416,'foo',NULL);
SELECT Json_Object(56 qty,3.1416 price,'foo' truc, NULL garanty);
SELECT Json_Array(56,3.1416,'My name is "Foo"',NULL);
--error ER_CANT_INITIALIZE_UDF
SELECT Json_Array_Add(Json_Array(56,3.1416,'foo',NULL)) Array;
SELECT Json_Array_Add(Json_Array(56,3.1416,'foo',NULL),'One more') Array;
SELECT Json_Array_Add(Json_Value('one value'),'One more');
--error ER_CANT_INITIALIZE_UDF
SELECT Json_Array_Add('one value','One more');
SELECT Json_Array_Add('one value' json_,'One more');
--error ER_CANT_INITIALIZE_UDF
SELECT Json_Value(56,3.1416,'foo',NULL);
SELECT Json_Value(3.1416);
SELECT Json_Value('foo');
SELECT Json_Value(NULL);
SELECT Json_Value();
SELECT Json_Object();
SELECT Json_Object(Json_Array(56,3.1416,'foo'),NULL);
SELECT Json_Array(Json_Array(56,3.1416,'foo'),NULL);
SELECT Json_Array(Json_Object(56 "qty",3.1416 "price",'foo'),NULL);
--echo #
--echo # Test UDF's with column arguments
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
LANG CHAR(2),
SUBJECT CHAR(32),
AUTHOR CHAR(64),
TITLE CHAR(32),
TRANSLATION CHAR(32),
TRANSLATOR CHAR(80),
PUBLISHER CHAR(32),
DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT Json_Array(AUTHOR, TITLE, DATEPUB) FROM t1;
SELECT Json_Object(AUTHOR, TITLE, DATEPUB) FROM t1;
--error ER_CANT_INITIALIZE_UDF
SELECT Json_Array_Grp(TITLE, DATEPUB) FROM t1;
SELECT Json_Array_Grp(TITLE) FROM t1;
DROP TABLE t1;
CREATE TABLE t1 (
SERIALNO CHAR(5) NOT NULL,
NAME VARCHAR(12) NOT NULL FLAG=6,
SEX SMALLINT(1) NOT NULL,
TITLE VARCHAR(15) NOT NULL FLAG=20,
MANAGER CHAR(5) DEFAULT NULL,
DEPARTMENT CHAr(4) NOT NULL FLAG=41,
SECRETARY CHAR(5) DEFAULT NULL FLAG=46,
SALARY DOUBLE(8,2) NOT NULL FLAG=52
) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1;
SELECT Json_Object(SERIALNO, NAME, TITLE, SALARY) FROM t1 WHERE NAME = 'MERCHANT';
SELECT DEPARTMENT, Json_Array_Grp(NAME) FROM t1 GROUP BY DEPARTMENT;
set connect_json_grp_size=30;
SELECT Json_Array(DEPARTMENT, Json_Array_Grp(NAME)) FROM t1 GROUP BY DEPARTMENT;
SELECT Json_Object(DEPARTMENT, Json_Array_Grp(NAME) json_NAMES) FROM t1 GROUP BY DEPARTMENT;
SELECT Json_Object(DEPARTMENT, Json_Array_Grp(Json_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t1 GROUP BY DEPARTMENT;
SELECT Json_Object(DEPARTMENT, TITLE, Json_Array_Grp(Json_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t1 GROUP BY DEPARTMENT, TITLE;
--error ER_CANT_INITIALIZE_UDF
SELECT Json_Object_Grp(SALARY) FROM t1;
SELECT Json_Object_Grp(SALARY, NAME) FROM t1;
SELECT Json_Object(DEPARTMENT, Json_Object_Grp(SALARY, NAME) "Json_SALARIES") FROM t1 GROUP BY DEPARTMENT;
SELECT Json_Array_Grp(NAME) from t1;
DROP TABLE t1;
DROP FUNCTION Json_Array;
DROP FUNCTION Json_Array_Add;
DROP FUNCTION Json_Object;
DROP FUNCTION Json_Object_Nonull;
DROP FUNCTION Json_Value;
DROP FUNCTION Json_Array_Grp;
DROP FUNCTION Json_Object_Grp;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/biblio.json
--remove_file $MYSQLD_DATADIR/test/employee.dat
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/emp.txt $MYSQLD_DATADIR/test/emp.txt
--echo #
--echo # Show MRR setting. The way it is done is because the t3 table cannot be directly based on
--echo # the information_schema.session_variables table. Not being a CONNECT table, it would be
--echo # read using an intermediate MYSQL table using the MySQL API and could not reflect the
--echo # current session variable change (the call would create another session) This would be
--echo # correct only for querying GLOBAL variables but is not what we want to do here.
--echo #
CREATE TABLE t2 (
name VARCHAR(64) NOT NULL,
value VARCHAR(1024) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=DOS;
INSERT INTO t2 SELECT * FROM information_schema.session_variables WHERE variable_name = 'OPTIMIZER_SWITCH';
# Check that MRR is OFF by default
create table t3 (
name CHAR(32) NOT NULL,
value CHAR(64) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XCOL TABNAME=t2 OPTION_LIST='Colname=value';
SELECT value FROM t3 WHERE value LIKE 'mrr%';
--echo #
--echo # Testing indexing with MRR OFF
--echo #
CREATE TABLE t1
(
matricule INT(4) KEY NOT NULL field_format='Z',
nom VARCHAR(16) NOT NULL,
prenom VARCHAR(20) NOT NULL,
sexe SMALLINT(1) NOT NULL COMMENT 'sexe 1:M 2:F',
aanais INT(4) NOT NULL,
mmnais INT(2) NOT NULL,
ddentree DATE NOT NULL date_format='YYYYMM',
ddnom DATE NOT NULL date_format='YYYYMM',
brut INT(5) NOT NULL,
net DOUBLE(8,2) NOT NULL,
service INT(2) NOT NULL,
sitmat CHAR(1) NOT NULL,
formation CHAR(5) NOT NULL,
INDEX NP(nom,prenom)
) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='emp.txt' ENDING=2;
SELECT * FROM t1 LIMIT 10;
--echo # Without MRR, the rows are retrieved sorted by name
SELECT matricule, nom, prenom, sitmat, net FROM t1 WHERE nom IN ('ETANG','FOCH','CERF','ITALIE','ROI');
--echo #
--echo # Testing indexing with MRR ON
--echo #
SET @@LOCAL.OPTIMIZER_SWITCH='mrr=on';
--echo # Refresh the t2 table to reflect the change
UPDATE t2, information_schema.session_variables SET value = variable_value WHERE variable_name = 'OPTIMIZER_SWITCH';
--echo # Check that MRR is ON for the session
SELECT value FROM t3 WHERE value LIKE 'mrr%';
--echo # With MRR, the rows are retrieved sorted by their position in the table
SELECT matricule, nom, prenom, sitmat, net FROM t1 WHERE nom IN ('ETANG','FOCH','CERF','ITALIE','ROI');
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
#
# Clean up
#
SET @@LOCAL.OPTIMIZER_SWITCH='mrr=off';
--remove_file $MYSQLD_DATADIR/test/emp.txt
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/emp.txt $MYSQLD_DATADIR/test/emp.txt
--echo #
--echo # Show MRR setting. The way it is done is because the t3 table cannot be directly based on
--echo # the information_schema.session_variables table. Not being a CONNECT table, it would be
--echo # read using an intermediate MYSQL table using the MySQL API and could not reflect the
--echo # current session variable change (the call would create another session) This would be
--echo # correct only for querying GLOBAL variables but is not what we want to do here.
--echo #
CREATE TABLE t2 (
name VARCHAR(64) NOT NULL,
value VARCHAR(1024) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=DOS;
INSERT INTO t2 SELECT * FROM information_schema.session_variables WHERE variable_name = 'OPTIMIZER_SWITCH';
# Check that MRR is OFF by default
create table t3 (
name CHAR(32) NOT NULL,
value CHAR(64) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XCOL TABNAME=t2 OPTION_LIST='Colname=value';
SELECT value FROM t3 WHERE value LIKE 'mrr%';
--echo #
--echo # Testing indexing with MRR OFF
--echo #
CREATE TABLE t1
(
matricule INT(4) KEY NOT NULL field_format='Z',
nom VARCHAR(16) NOT NULL,
prenom VARCHAR(20) NOT NULL,
sexe SMALLINT(1) NOT NULL COMMENT 'sexe 1:M 2:F',
aanais INT(4) NOT NULL,
mmnais INT(2) NOT NULL,
ddentree DATE NOT NULL date_format='YYYYMM',
ddnom DATE NOT NULL date_format='YYYYMM',
brut INT(5) NOT NULL,
net DOUBLE(8,2) NOT NULL,
service INT(2) NOT NULL,
sitmat CHAR(1) NOT NULL,
formation CHAR(5) NOT NULL,
INDEX NP(nom,prenom)
) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='emp.txt' ENDING=2;
SELECT * FROM t1 LIMIT 10;
--echo # Without MRR, the rows are retrieved sorted by name
SELECT matricule, nom, prenom, sitmat, net FROM t1 WHERE nom IN ('ETANG','FOCH','CERF','ITALIE','ROI');
--echo #
--echo # Testing indexing with MRR ON
--echo #
SET @@LOCAL.OPTIMIZER_SWITCH='mrr=on';
--echo # Refresh the t2 table to reflect the change
UPDATE t2, information_schema.session_variables SET value = variable_value WHERE variable_name = 'OPTIMIZER_SWITCH';
--echo # Check that MRR is ON for the session
SELECT value FROM t3 WHERE value LIKE 'mrr%';
--echo # With MRR, the rows are retrieved sorted by their position in the table
SELECT matricule, nom, prenom, sitmat, net FROM t1 WHERE nom IN ('ETANG','FOCH','CERF','ITALIE','ROI');
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
#
# Clean up
#
SET @@LOCAL.OPTIMIZER_SWITCH='mrr=off';
--remove_file $MYSQLD_DATADIR/test/emp.txt
--source include/not_embedded.inc
let $PORT= `select @@port`;
--disable_query_log
--replace_result $PORT PORT
--error 0,ER_UNKNOWN_ERROR
eval CREATE TABLE t1 (a INT) ENGINE=CONNECT TABLE_TYPE=MYSQL
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/tx1';
if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
AND ENGINE='CONNECT'
AND CREATE_OPTIONS LIKE '%`table_type`=MySQL%'`)
{
Skip Need MySQL support;
}
DROP TABLE t1;
--enable_query_log
connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,);
connect (slave,127.0.0.1,root,,test,$SLAVE_MYPORT,);
connection master;
CREATE DATABASE connect;
connection slave;
CREATE DATABASE connect;
--source include/not_embedded.inc
let $PORT= `select @@port`;
--disable_query_log
--replace_result $PORT PORT
--error 0,ER_UNKNOWN_ERROR
eval CREATE TABLE t1 (a INT) ENGINE=CONNECT TABLE_TYPE=MYSQL
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/tx1';
if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
AND ENGINE='CONNECT'
AND CREATE_OPTIONS LIKE '%`table_type`=MySQL%'`)
{
Skip Need MySQL support;
}
DROP TABLE t1;
--enable_query_log
connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,);
connect (slave,127.0.0.1,root,,test,$SLAVE_MYPORT,);
connection master;
CREATE DATABASE connect;
connection slave;
CREATE DATABASE connect;
connection master;
--disable_warnings
DROP TABLE IF EXISTS connect.t1;
DROP DATABASE IF EXISTS connect;
connection slave;
DROP TABLE IF EXISTS connect.t1;
DROP DATABASE IF EXISTS connect;
--enable_warnings
connection master;
--disable_warnings
DROP TABLE IF EXISTS connect.t1;
DROP DATABASE IF EXISTS connect;
connection slave;
DROP TABLE IF EXISTS connect.t1;
DROP DATABASE IF EXISTS connect;
--enable_warnings
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment