Commit a82171c9 authored by Olivier Bertrand's avatar Olivier Bertrand

In BIN table date_format now imply by default field_format='C'.

  modified:   storage/connect/tabfix.cpp
  modified:   storage/connect/reldef.cpp

Json array index (position) is now 0 based by default. This corresponds
to what all json applications and functions do. Also fix ROWNUM calculation.
  modified:   storage/connect/jsonudf.cpp
  modified:   storage/connect/mysql-test/connect/r/json.result
  modified:   storage/connect/mysql-test/connect/t/json.test
  modified:   storage/connect/tabjson.cpp
  modified:   storage/connect/tabjson.h
parent b9c91090
......@@ -410,7 +410,7 @@ void Json_Array_Add_deinit(UDF_INIT* initid)
} // end of Json_Array_Add_deinit
/***********************************************************************/
/* Add values to a Json array. */
/* Delete a value from a Json array. */
/***********************************************************************/
my_bool Json_Array_Delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
......@@ -451,7 +451,7 @@ char *Json_Array_Delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else {
n = *(int*)args->args[1];
arp = jvp->GetArray();
arp->DeleteValue(n - 1);
arp->DeleteValue(n);
arp->InitArray(g);
if (!(str = Serialize(g, arp, NULL, 0))) {
......
......@@ -97,7 +97,7 @@ ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher L
CREATE TABLE t2 (
FIRSTNAME CHAR(32),
LASTNAME CHAR(32))
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[2]:AUTHOR';
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[1]:AUTHOR';
SELECT * FROM t2;
FIRSTNAME LASTNAME
William J. Pardi
......@@ -252,9 +252,9 @@ DROP TABLE t1;
#
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[0]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
WHO WEEK WHAT AMOUNT
......@@ -268,9 +268,9 @@ Janet 3 Food 18.00
Janet 3 Beer 18.00
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
WHO WEEK WHAT AMOUNT
......@@ -284,9 +284,9 @@ Beth 4 Beer 15.00
Janet 4 Car 17.00
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[3]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:AMOUNT')
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
WHO WEEK WHAT AMOUNT
......
--source include/not_embedded.inc
--source include/have_partition.inc
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json
--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json
--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json
--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json
--echo #
--echo # Testing doc samples
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
LANG CHAR(2),
SUBJECT CHAR(32),
AUTHOR CHAR(64),
TITLE CHAR(32),
TRANSLATION CHAR(32),
TRANSLATOR CHAR(80),
PUBLISHER CHAR(32),
DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing Jpath. Get the number of authors
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
Language CHAR(2) FIELD_FORMAT='LANG',
Subject CHAR(32) FIELD_FORMAT='SUBJECT',
Authors INT(2) FIELD_FORMAT='AUTHOR:[#]',
Title CHAR(32) FIELD_FORMAT='TITLE',
Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
Year int(4) FIELD_FORMAT='DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Concatenates the authors
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
Language CHAR(2) FIELD_FORMAT='LANG',
Subject CHAR(32) FIELD_FORMAT='SUBJECT',
AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:FIRSTNAME',
AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:LASTNAME',
Title CHAR(32) FIELD_FORMAT='TITLE',
Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
Year int(4) FIELD_FORMAT='DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing expanding authors
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
Language CHAR(2) FIELD_FORMAT='LANG',
Subject CHAR(32) FIELD_FORMAT='SUBJECT',
AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:FIRSTNAME',
AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:LASTNAME',
Title CHAR(32) FIELD_FORMAT='TITLE',
Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
Year int(4) FIELD_FORMAT='DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
SELECT * FROM t1 WHERE ISBN = '9782212090819';
--echo #
--echo # To add an author a new table must be created
--echo #
CREATE TABLE t2 (
FIRSTNAME CHAR(32),
LASTNAME CHAR(32))
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[2]:AUTHOR';
SELECT * FROM t2;
INSERT INTO t2 VALUES('Charles','Dickens');
SELECT * FROM t1;
DROP TABLE t1;
DROP TABLE t2;
--echo #
--echo # Check the biblio file has the good format
--echo #
CREATE TABLE t1
(
line char(255)
)
ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # A file with 2 arrays
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK::EXPENSE:["+"]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK::EXPENSE:[+]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Now it can be fully expanded
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
#--error ER_GET_ERRMSG
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # A table showing many calculated results
--echo #
CREATE TABLE t1 (
WHO CHAR(12) NOT NULL,
WEEKS CHAR(12) NOT NULL FIELD_FORMAT='WEEK:[", "]:NUMBER',
SUMS CHAR(64) NOT NULL FIELD_FORMAT='WEEK:["+"]:EXPENSE:[+]:AMOUNT',
SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[+]:EXPENSE:[+]:AMOUNT',
AVGS CHAR(64) NOT NULL FIELD_FORMAT='WEEK:["+"]:EXPENSE:[!]:AMOUNT',
SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[+]:EXPENSE:[!]:AMOUNT',
AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[!]:EXPENSE:[+]:AMOUNT',
AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[!]:EXPENSE:[!]:AMOUNT',
AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[!]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Expand expense in 3 one week tables
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[3]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
--echo #
--echo # The expanded table is made as a TBL table
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32),
AMOUNT DOUBLE(8,2))
ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
SELECT * FROM t1;
DROP TABLE t1, t2, t3, t4;
--echo #
--echo # Three partial JSON tables
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json';
SELECT * FROM t4;
--echo #
--echo # The complete table can be a multiple JSON table
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1;
SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
DROP TABLE t1;
--echo #
--echo # Or also a partition JSON table
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json';
ALTER TABLE t1
PARTITION BY LIST COLUMNS(WEEK) (
PARTITION `3` VALUES IN(3),
PARTITION `4` VALUES IN(4),
PARTITION `5` VALUES IN(5));
SHOW WARNINGS;
SELECT * FROM t1;
SELECT * FROM t1 WHERE WEEK = 4;
DROP TABLE t1, t2, t3, t4;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/biblio.json
--remove_file $MYSQLD_DATADIR/test/expense.json
--remove_file $MYSQLD_DATADIR/test/mulexp3.json
--remove_file $MYSQLD_DATADIR/test/mulexp4.json
--remove_file $MYSQLD_DATADIR/test/mulexp5.json
--source include/not_embedded.inc
--source include/have_partition.inc
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json
--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json
--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json
--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json
--echo #
--echo # Testing doc samples
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
LANG CHAR(2),
SUBJECT CHAR(32),
AUTHOR CHAR(64),
TITLE CHAR(32),
TRANSLATION CHAR(32),
TRANSLATOR CHAR(80),
PUBLISHER CHAR(32),
DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing Jpath. Get the number of authors
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
Language CHAR(2) FIELD_FORMAT='LANG',
Subject CHAR(32) FIELD_FORMAT='SUBJECT',
Authors INT(2) FIELD_FORMAT='AUTHOR:[#]',
Title CHAR(32) FIELD_FORMAT='TITLE',
Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
Year int(4) FIELD_FORMAT='DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Concatenates the authors
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
Language CHAR(2) FIELD_FORMAT='LANG',
Subject CHAR(32) FIELD_FORMAT='SUBJECT',
AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:FIRSTNAME',
AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:LASTNAME',
Title CHAR(32) FIELD_FORMAT='TITLE',
Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
Year int(4) FIELD_FORMAT='DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Testing expanding authors
--echo #
CREATE TABLE t1
(
ISBN CHAR(15),
Language CHAR(2) FIELD_FORMAT='LANG',
Subject CHAR(32) FIELD_FORMAT='SUBJECT',
AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:FIRSTNAME',
AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:LASTNAME',
Title CHAR(32) FIELD_FORMAT='TITLE',
Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
Year int(4) FIELD_FORMAT='DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
SELECT * FROM t1 WHERE ISBN = '9782212090819';
--echo #
--echo # To add an author a new table must be created
--echo #
CREATE TABLE t2 (
FIRSTNAME CHAR(32),
LASTNAME CHAR(32))
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json' OPTION_LIST='Object=[1]:AUTHOR';
SELECT * FROM t2;
INSERT INTO t2 VALUES('Charles','Dickens');
SELECT * FROM t1;
DROP TABLE t1;
DROP TABLE t2;
--echo #
--echo # Check the biblio file has the good format
--echo #
CREATE TABLE t1
(
line char(255)
)
ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # A file with 2 arrays
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK::EXPENSE:["+"]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK::EXPENSE:[+]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Now it can be fully expanded
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
#--error ER_GET_ERRMSG
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # A table showing many calculated results
--echo #
CREATE TABLE t1 (
WHO CHAR(12) NOT NULL,
WEEKS CHAR(12) NOT NULL FIELD_FORMAT='WEEK:[", "]:NUMBER',
SUMS CHAR(64) NOT NULL FIELD_FORMAT='WEEK:["+"]:EXPENSE:[+]:AMOUNT',
SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[+]:EXPENSE:[+]:AMOUNT',
AVGS CHAR(64) NOT NULL FIELD_FORMAT='WEEK:["+"]:EXPENSE:[!]:AMOUNT',
SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[+]:EXPENSE:[!]:AMOUNT',
AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[!]:EXPENSE:[+]:AMOUNT',
AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[!]:EXPENSE:[!]:AMOUNT',
AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='WEEK:[!]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Expand expense in 3 one week tables
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[0]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[0]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
--echo #
--echo # The expanded table is made as a TBL table
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32),
AMOUNT DOUBLE(8,2))
ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
SELECT * FROM t1;
DROP TABLE t1, t2, t3, t4;
--echo #
--echo # Three partial JSON tables
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json';
SELECT * FROM t4;
--echo #
--echo # The complete table can be a multiple JSON table
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1;
SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
DROP TABLE t1;
--echo #
--echo # Or also a partition JSON table
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json';
ALTER TABLE t1
PARTITION BY LIST COLUMNS(WEEK) (
PARTITION `3` VALUES IN(3),
PARTITION `4` VALUES IN(4),
PARTITION `5` VALUES IN(5));
SHOW WARNINGS;
SELECT * FROM t1;
SELECT * FROM t1 WHERE WEEK = 4;
DROP TABLE t1, t2, t3, t4;
#
# Clean up
#
--remove_file $MYSQLD_DATADIR/test/biblio.json
--remove_file $MYSQLD_DATADIR/test/expense.json
--remove_file $MYSQLD_DATADIR/test/mulexp3.json
--remove_file $MYSQLD_DATADIR/test/mulexp4.json
--remove_file $MYSQLD_DATADIR/test/mulexp5.json
......@@ -333,7 +333,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
if (nof)
// Field width is the internal representation width
// that can also depend on the column format
switch (cdp->Fmt ? *cdp->Fmt : 'X') {
switch (cdp->Fmt ? *cdp->Fmt : cdp->Decode ? 'C' : 'X') {
case 'X': nof= cdp->Clen;
case 'C': break;
case 'R':
......@@ -346,7 +346,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
default: /* New format */
for (nof= 0, i= 0; cdp->Fmt[i]; i++)
if (isdigit(cdp->Fmt[i]))
nof= (nof * 10 + (cdp->Fmt[i] - 48));
nof= (nof * 10 + (cdp->Fmt[i] - '0'));
if (!nof)
nof= cdp->Clen;
......@@ -377,20 +377,16 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
// not specified (for instance if quoted is specified)
// if ((ending= Hc->GetIntegerOption("Ending")) < 0) {
if ((ending= Hc->GetIntegerOption("Ending")) <= 0) {
#if defined(WIN32)
ending= 2;
#else
ending= 1;
#endif
ending= (tc == TAB_BIN || tc == TAB_VEC) ? 0 : CRLF;
Hc->SetIntegerOption("Ending", ending);
} // endif ending
// Calculate the default record size
switch (tc) {
case TAB_FIX:
case TAB_BIN:
recln= nlg + ending; // + length of line ending
break;
case TAB_BIN:
case TAB_VEC:
recln= nlg;
......
......@@ -385,7 +385,7 @@ BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am)
for (N = 0, i = 0; fmt[i]; i++)
if (isdigit(fmt[i]))
N = (N * 10 + (fmt[i] - 48));
N = (N * 10 + (fmt[i] - '0'));
else
Fmt = toupper(fmt[i]);
......@@ -408,7 +408,7 @@ BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am)
} else {
N = 0;
Fmt = 'X';
Fmt = GetDomain() ? 'C' : 'X';
} // endif fmt
} // end of BINCOL constructor
......
......@@ -372,6 +372,7 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff)
Pretty = GetIntCatInfo("Pretty", 2);
Level = GetIntCatInfo("Level", 0);
Limit = GetIntCatInfo("Limit", 10);
Base = GetIntCatInfo("Base", 0);
return DOSDEF::DefineAM(g, "DOS", poff);
} // end of DefineAM
......@@ -440,6 +441,7 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
Xcol = tdp->Xcol;
Limit = tdp->Limit;
Pretty = tdp->Pretty;
B = tdp->Base ? 1 : 0;
Strict = tdp->Strict;
} else {
Jmode = MODE_OBJECT;
......@@ -447,11 +449,12 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
Xcol = NULL;
Limit = 1;
Pretty = 0;
B = 1;
Strict = false;
} // endif tdp
Fpos = -1;
N = 0;
N = M = 0;
NextSame = 0;
SameRow = 0;
Xval = -1;
......@@ -469,6 +472,7 @@ TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp)
Xcol = tdbp->Xcol;
Fpos = tdbp->Fpos;
N = tdbp->N;
M = tdbp->M;
Limit = tdbp->Limit;
NextSame = tdbp->NextSame;
SameRow = tdbp->SameRow;
......@@ -563,7 +567,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
jsp->GetObject()->GetValue(objpath) : NULL;
} else if (objpath[strlen(objpath)-1] == ']') {
val = (jsp->GetType() == TYPE_JAR) ?
jsp->GetArray()->GetValue(atoi(objpath+1) - 1) : NULL;
jsp->GetArray()->GetValue(atoi(objpath+1) - B) : NULL;
} else
val = NULL;
......@@ -649,6 +653,7 @@ int TDBJSN::ReadDB(PGLOBAL g)
if (NextSame) {
SameRow = NextSame;
NextSame = 0;
M++;
return RC_OK;
} else if ((rc = TDBDOS::ReadDB(g)) == RC_OK)
if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) {
......@@ -660,6 +665,7 @@ int TDBJSN::ReadDB(PGLOBAL g)
Row = FindRow(g);
SameRow = 0;
Fpos++;
M = 1;
rc = RC_OK;
} // endif's
......@@ -708,7 +714,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
val->SetValue(arp);
val = new(g) JVALUE;
i = atoi(objpath+1) - 1;
i = atoi(objpath+1) - B;
arp->SetValue(g, val, i);
arp->InitArray(g);
} else {
......@@ -856,8 +862,8 @@ bool JSONCOL::SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm)
return true;
else if (jnp->Op != OP_EXP) {
if (b) {
// Return 1st value
jnp->Rank = 1;
// Return 1st value (B is the index base)
jnp->Rank = 1 - Tjp->B;
jnp->Op = OP_EQ;
} else if (!Value->IsTypeNum()) {
jnp->CncVal = AllocateValue(g, (void*)", ", TYPE_STRING);
......@@ -868,13 +874,9 @@ bool JSONCOL::SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm)
} // endif OP
} else if (dg) {
if (atoi(p) > 0) {
// Return nth value
jnp->Rank = atoi(p);
jnp->Op = OP_EQ;
} else // Ignore array
jnp->Op = OP_NULL;
// Return nth value
jnp->Rank = atoi(p) - Tjp->B;
jnp->Op = OP_EQ;
} else if (n == 1) {
// Set the Op value;
switch (*p) {
......@@ -1118,16 +1120,12 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
arp = (PJAR)row;
if (!Nodes[i].Key) {
if (Nodes[i].Op != OP_NULL) {
if (Nodes[i].Rank) {
val = arp->GetValue(Nodes[i].Rank - 1);
} else if (Nodes[i].Op == OP_EXP) {
return ExpandArray(g, arp, i);
} else
return CalculateArray(g, arp, i);
} else
val = NULL;
if (Nodes[i].Op == OP_EQ)
val = arp->GetValue(Nodes[i].Rank);
else if (Nodes[i].Op == OP_EXP)
return ExpandArray(g, arp, i);
else
return CalculateArray(g, arp, i);
} else if (i < Nod-1) {
strcpy(g->Message, "Unexpected array");
......@@ -1289,16 +1287,12 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
break;
case TYPE_JAR:
if (!Nodes[i].Key) {
if (Nodes[i].Op != OP_NULL) {
arp = (PJAR)row;
if (Nodes[i].Rank)
val = arp->GetValue(Nodes[i].Rank - 1);
else
val = arp->GetValue(Nodes[i].Rx);
arp = (PJAR)row;
} else
val = NULL;
if (Nodes[i].Op == OP_EQ)
val = arp->GetValue(Nodes[i].Rank);
else
val = arp->GetValue(Nodes[i].Rx);
} else {
strcpy(g->Message, "Unexpected array");
......@@ -1390,8 +1384,8 @@ void JSONCOL::WriteColumn(PGLOBAL g)
} // endif jsp
if (arp) {
if (Nod > 1 && Nodes[Nod-2].Rank)
arp->SetValue(g, new(g) JVALUE(jsp), Nodes[Nod-2].Rank-1);
if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ)
arp->SetValue(g, new(g) JVALUE(jsp), Nodes[Nod-2].Rank);
else
arp->AddValue(g, new(g) JVALUE(jsp));
......@@ -1411,8 +1405,8 @@ void JSONCOL::WriteColumn(PGLOBAL g)
case TYPE_INT:
case TYPE_DOUBLE:
if (arp) {
if (Nodes[Nod-1].Rank)
arp->SetValue(g, new(g) JVALUE(g, Value), Nodes[Nod-1].Rank-1);
if (Nodes[Nod-1].Op == OP_EQ)
arp->SetValue(g, new(g) JVALUE(g, Value), Nodes[Nod-1].Rank);
else
arp->AddValue(g, new(g) JVALUE(g, Value));
......@@ -1562,7 +1556,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
arp = jsp->GetArray();
objp = NULL;
i = atoi(objpath+1) - 1;
i = atoi(objpath+1) - B;
val = arp->GetValue(i);
if (!val) {
......@@ -1750,6 +1744,7 @@ int TDBJSON::ReadDB(PGLOBAL)
if (NextSame) {
SameRow = NextSame;
NextSame = false;
M++;
rc = RC_OK;
} else if (++Fpos < (signed)Doc->size()) {
Row = Doc->GetValue(Fpos);
......@@ -1758,6 +1753,7 @@ int TDBJSON::ReadDB(PGLOBAL)
Row = ((PJVAL)Row)->GetJson();
SameRow = 0;
M = 1;
rc = RC_OK;
} else
rc = RC_EF;
......
......@@ -57,6 +57,7 @@ class JSONDEF : public DOSDEF { /* Table description */
int Limit; /* Limit of multiple values */
int Pretty; /* Depends on file structure */
int Level; /* Used for catalog table */
int Base; /* Tne array index base */
bool Strict; /* Strict syntax checking */
}; // end of JSONDEF
......@@ -84,7 +85,7 @@ class TDBJSN : public TDBDOS {
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual PCOL InsertSpecialColumn(PCOL colp);
virtual int RowNumber(PGLOBAL g, bool b = FALSE)
{return (b) ? N : Fpos + 1;}
{return (b) ? M : N;}
// Database routines
virtual int Cardinality(PGLOBAL g);
......@@ -106,13 +107,14 @@ class TDBJSN : public TDBDOS {
char *Objname; // The table object name
char *Xcol; // Name of expandable column
int Fpos; // The current row index
//int Spos; // DELETE start index
int N; // The current Rownum
int M; // Index of multiple value
int Limit; // Limit of multiple values
int Pretty; // Depends on file structure
int NextSame; // Same next row
int SameRow; // Same row nb
int Xval; // Index of expandable array
int B; // Array index base
bool Strict; // Strict syntax checking
bool Comma; // Row has final comma
}; // end of class TDBJSN
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment