Commit 1d90d687 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.4 into 10.5

parents 5fc172fd 36d173e5
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
# This file includes OSX specific options and quirks, related to system checks
......@@ -34,9 +34,10 @@ typedef struct {
pthread_t id;
uint num;
pthread_mutex_t data_mutex;
pthread_cond_t avail_cond;
pthread_cond_t data_cond;
pthread_cond_t done_cond;
my_bool data_avail;
pthread_t data_avail;
my_bool cancelled;
const char *from;
size_t from_len;
......@@ -195,9 +196,13 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
threads = comp_ctxt->threads;
nthreads = comp_ctxt->nthreads;
const pthread_t self = pthread_self();
ptr = (const char *) buf;
while (len > 0) {
uint max_thread;
bool wait = nthreads == 1;
retry:
bool submitted = false;
/* Send data to worker threads for compression */
for (i = 0; i < nthreads; i++) {
......@@ -206,16 +211,33 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
if (thd->data_avail == pthread_t(~0UL)) {
} else if (!wait) {
skip:
pthread_mutex_unlock(&thd->data_mutex);
continue;
} else {
for (;;) {
pthread_cond_wait(&thd->avail_cond,
&thd->data_mutex);
if (thd->data_avail
== pthread_t(~0UL)) {
break;
}
goto skip;
}
}
chunk_len = (len > COMPRESS_CHUNK_SIZE) ?
COMPRESS_CHUNK_SIZE : len;
thd->from = ptr;
thd->from_len = chunk_len;
thd->data_avail = TRUE;
thd->data_avail = self;
pthread_cond_signal(&thd->data_cond);
pthread_mutex_unlock(&thd->data_mutex);
submitted = true;
len -= chunk_len;
if (len == 0) {
break;
......@@ -223,13 +245,20 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
ptr += chunk_len;
}
max_thread = (i < nthreads) ? i : nthreads - 1;
if (!submitted) {
wait = true;
goto retry;
}
/* Reap and stream the compressed data */
for (i = 0; i <= max_thread; i++) {
for (i = 0; i < nthreads; i++) {
thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
if (thd->data_avail != self) {
pthread_mutex_unlock(&thd->data_mutex);
continue;
}
while (!thd->to_len) {
pthread_cond_wait(&thd->done_cond,
&thd->data_mutex);
......@@ -247,6 +276,8 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
}
thd->to_len = 0;
thd->data_avail = pthread_t(~0UL);
pthread_cond_signal(&thd->avail_cond);
pthread_mutex_unlock(&thd->data_mutex);
if (fail) {
......@@ -334,6 +365,7 @@ destroy_worker_thread(comp_thread_ctxt_t *thd)
pthread_join(thd->id, NULL);
pthread_cond_destroy(&thd->avail_cond);
pthread_cond_destroy(&thd->data_cond);
pthread_cond_destroy(&thd->done_cond);
pthread_mutex_destroy(&thd->data_mutex);
......@@ -364,11 +396,14 @@ create_worker_threads(uint n)
/* Initialize and data mutex and condition var */
if (pthread_mutex_init(&thd->data_mutex, NULL) ||
pthread_cond_init(&thd->avail_cond, NULL) ||
pthread_cond_init(&thd->data_cond, NULL) ||
pthread_cond_init(&thd->done_cond, NULL)) {
goto err;
}
thd->data_avail = pthread_t(~0UL);
if (pthread_create(&thd->id, NULL, compress_worker_thread_func,
thd)) {
msg("compress: pthread_create() failed: "
......@@ -410,13 +445,13 @@ compress_worker_thread_func(void *arg)
pthread_mutex_lock(&thd->data_mutex);
while (1) {
while (!thd->data_avail && !thd->cancelled) {
while (!thd->cancelled
&& (thd->to_len || thd->data_avail == pthread_t(~0UL))) {
pthread_cond_wait(&thd->data_cond, &thd->data_mutex);
}
if (thd->cancelled)
break;
thd->data_avail = FALSE;
thd->to_len = qlz_compress(thd->from, thd->to, thd->from_len,
&thd->state);
......
......@@ -1016,6 +1016,33 @@ j
{"ID": "4", "Name": "Betty", "Age": 19}
drop table t1;
#
# MDEV-27151: JSON_VALUE() does not parse NULL properties properly
#
#
# It is correct for JSON_EXTRACT() to give null instead of "NULL" because
# it returns the json literal that is put inside json.
# Hence it should return null as in 'null' string and not SQL NULL.
# JSON_VALUE() returns the "VALUE" so it is correct for it to return SQl NULL
#
SELECT NULL;
NULL
NULL
SELECT JSON_VALUE('{"nulltest": null}', '$.nulltest');
JSON_VALUE('{"nulltest": null}', '$.nulltest')
NULL
SELECT 1 + NULL;
1 + NULL
NULL
SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest');
1 + JSON_VALUE('{"nulltest": null}', '$.nulltest')
NULL
SELECT NULL;
NULL
NULL
SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a');
JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a')
null
#
# End of 10.3 tests
#
#
......
......@@ -627,6 +627,25 @@ SELECT * FROM t1 WHERE JSON_EXTRACT(j, '$.Age')=19;
drop table t1;
--echo #
--echo # MDEV-27151: JSON_VALUE() does not parse NULL properties properly
--echo #
--echo #
--echo # It is correct for JSON_EXTRACT() to give null instead of "NULL" because
--echo # it returns the json literal that is put inside json.
--echo # Hence it should return null as in 'null' string and not SQL NULL.
--echo # JSON_VALUE() returns the "VALUE" so it is correct for it to return SQl NULL
--echo #
SELECT NULL;
SELECT JSON_VALUE('{"nulltest": null}', '$.nulltest');
SELECT 1 + NULL;
SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest');
SELECT NULL;
SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a');
--echo #
--echo # End of 10.3 tests
--echo #
......
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,32k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:29:25.129637040 +0530
@@ -3,18 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 196608
-# bytes: 65536
+# bytes: 131072
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
-# bytes: 9437184
+# bytes: 786432
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 196608
-# bytes: 65536
+# bytes: 131072
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
-# bytes: 4194304
-DROP TABLE t1;
......
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,4k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:39.288769153 +0530
@@ -3,18 +3,18 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 24576
-# bytes: 65536
+# bytes: 16384
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
# bytes: 9437184
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 24576
-# bytes: 65536
+# bytes: 16384
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
# bytes: 4194304
DROP TABLE t1;
......
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,64k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:30:28.957174270 +0530
@@ -3,18 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 393216
-# bytes: 65536
+# bytes: 262144
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
-# bytes: 9437184
+# bytes: 983040
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 393216
-# bytes: 65536
+# bytes: 262144
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
-# bytes: 4194304
-DROP TABLE t1;
......
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,8k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:03.516962339 +0530
@@ -3,18 +3,18 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 49152
-# bytes: 65536
+# bytes: 32768
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
# bytes: 9437184
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 49152
-# bytes: 65536
+# bytes: 32768
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
# bytes: 4194304
DROP TABLE t1;
......
......@@ -3,12 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
# bytes: 98304
# bytes: 65536
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
# bytes: 9437184
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
# bytes: 98304
# bytes: 65536
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
# bytes: 4194304
DROP TABLE t1;
......
......@@ -5,6 +5,7 @@
#
--source include/have_innodb.inc
--source include/innodb_page_size.inc
# Embedded server does not restart of server
--source include/not_embedded.inc
--source include/no_valgrind_without_big.inc
......
......@@ -1024,7 +1024,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -1331,7 +1331,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -2637,7 +2637,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -2946,7 +2946,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -4151,7 +4151,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -4439,7 +4439,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -5697,7 +5697,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -6004,7 +6004,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -7288,7 +7288,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......@@ -7595,7 +7595,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
......
......@@ -31,13 +31,13 @@ FOUND 1 /Database page corruption detected.*/ in backup.log
FOUND 1 /completed OK!/ in backup.log
--- "innodb_corrupted_pages" file content: ---
test/t1_corrupted
6 8 9
4 6 7
test/t2_corrupted
7 8 10
5 6 8
test/t4_corrupted_new
1
test/t5_corrupted_to_rename_renamed
6
4
test/t7_corrupted_to_alter
3
------
......@@ -48,19 +48,19 @@ INSERT INTO t3_inc VALUES (3), (4), (5), (6), (7), (8), (9);
# Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
--- "innodb_corrupted_pages" file content: ---
test/t1_corrupted
6 8 9
4 6 7
test/t1_inc_corrupted
6 8 9
4 6 7
test/t2_corrupted
7 8 10
5 6 8
test/t2_inc_corrupted
7 8 10
5 6 8
test/t4_inc_corrupted_new
1
test/t5_corrupted_to_rename_renamed
6
4
test/t5_inc_corrupted_to_rename_renamed
6
4
test/t7_inc_corrupted_to_alter
3
------
......@@ -85,16 +85,16 @@ DROP TABLE t7_inc_corrupted_to_alter;
# Full backup with --log-innodb-page-corruption
--- "innodb_corrupted_pages" file content: ---
test/t3
6 8
4 6
------
# Extend some tablespace and corrupt extended pages for incremental backup
# restart
# Incremental backup --log-innodb-page-corruption
--- "innodb_corrupted_pages" file content: ---
test/t3
6 8
4 6
test/t3_inc
6 8
4 6
------
# Full backup prepare
# "innodb_corrupted_pages" file must not exist after successful prepare
......
/* Copyright (c) 2016, 2021, MariaDB Corporation.
/* Copyright (c) 2016, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -579,10 +579,6 @@ bool Item_func_json_query::fix_length_and_dec()
}
/*
Returns NULL, not an error if the found value
is not a scalar.
*/
bool Json_path_extractor::extract(String *str, Item *item_js, Item *item_jp,
CHARSET_INFO *cs)
{
......@@ -615,6 +611,9 @@ bool Json_path_extractor::extract(String *str, Item *item_js, Item *item_jp,
if (json_read_value(&je))
return true;
if (je.value_type == JSON_VALUE_NULL)
return true;
if (unlikely(check_and_get_value(&je, str, &error)))
{
if (error)
......@@ -1099,7 +1098,6 @@ my_decimal *Item_func_json_extract::val_decimal(my_decimal *to)
case JSON_VALUE_ARRAY:
case JSON_VALUE_FALSE:
case JSON_VALUE_UNINITALIZED:
// TODO: fix: NULL should be NULL
case JSON_VALUE_NULL:
int2my_decimal(E_DEC_FATAL_ERROR, 0, false/*unsigned_flag*/, to);
return to;
......
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2021, MariaDB Corporation.
Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -1676,6 +1676,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
fseg_inode_t* inode;
ib_id_t seg_id;
uint32_t n_reserved;
bool reserved_extent = false;
DBUG_ENTER("fseg_create");
......@@ -1700,18 +1701,35 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
}
}
if (!has_done_reservation
&& !fsp_reserve_free_extents(&n_reserved, space, 2,
FSP_NORMAL, mtr)) {
DBUG_RETURN(NULL);
}
buf_block_t* header = fsp_get_header(space, mtr);
buf_block_t* iblock;
inode_alloc:
inode = fsp_alloc_seg_inode(space, header, &iblock, mtr);
if (inode == NULL) {
reserve_extent:
if (!has_done_reservation && !reserved_extent) {
if (!fsp_reserve_free_extents(
&n_reserved, space, 2,
FSP_NORMAL, mtr)) {
DBUG_RETURN(NULL);
}
/* Extents reserved successfully. So
try allocating the page or inode */
reserved_extent = true;
if (inode) {
goto page_alloc;
}
goto inode_alloc;
}
if (inode) {
fsp_free_seg_inode(space, inode, iblock, mtr);
}
goto funct_exit;
}
......@@ -1737,6 +1755,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
FSEG_FRAG_SLOT_SIZE * FSEG_FRAG_ARR_N_SLOTS, 0xff);
if (!block) {
page_alloc:
block = fseg_alloc_free_page_low(space,
inode, iblock, 0, FSP_UP,
#ifdef UNIV_DEBUG
......@@ -1744,13 +1763,9 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
#endif /* UNIV_DEBUG */
mtr, mtr);
/* The allocation cannot fail if we have already reserved a
space for the page. */
ut_ad(!has_done_reservation || block != NULL);
if (block == NULL) {
fsp_free_seg_inode(space, inode, iblock, mtr);
goto funct_exit;
if (!block) {
ut_ad(!has_done_reservation);
goto reserve_extent;
}
ut_d(const auto x = rw_lock_get_x_lock_count(&block->lock));
......@@ -1772,7 +1787,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
+ block->frame, space->id);
funct_exit:
if (!has_done_reservation) {
if (!has_done_reservation && reserved_extent) {
space->release_free_extents(n_reserved);
}
......
#
# MDEV-29008 Server crash or assertion `field' failed in spider_db_open_item_ident / group by handler
#
for master_1
for child2
child2_1
child2_2
child2_3
for child3
connection child2_1;
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
CREATE TABLE tbl_a (
a INT,
b INT
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO tbl_a VALUES (1,2),(3,4);
connection master_1;
CREATE DATABASE auto_test_local;
USE auto_test_local;
CREATE TABLE tbl_a (
a INT,
b INT
) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"';
SELECT MIN(t2.a) AS f1, t1.b AS f2 FROM tbl_a AS t1 JOIN tbl_a AS t2 GROUP BY f2 ORDER BY f1, f2;
f1 f2
1 2
1 4
connection master_1;
DROP DATABASE IF EXISTS auto_test_local;
connection child2_1;
DROP DATABASE IF EXISTS auto_test_remote;
for master_1
for child2
child2_1
child2_2
child2_3
for child3
!include include/default_mysqld.cnf
!include ../my_1_1.cnf
!include ../my_2_1.cnf
--echo #
--echo # MDEV-29008 Server crash or assertion `field' failed in spider_db_open_item_ident / group by handler
--echo #
--disable_query_log
--disable_result_log
--source ../../t/test_init.inc
--enable_result_log
--enable_query_log
--connection child2_1
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
eval CREATE TABLE tbl_a (
a INT,
b INT
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
INSERT INTO tbl_a VALUES (1,2),(3,4);
--connection master_1
CREATE DATABASE auto_test_local;
USE auto_test_local;
eval CREATE TABLE tbl_a (
a INT,
b INT
) $MASTER_1_ENGINE $MASTER_1_CHARSET COMMENT='table "tbl_a", srv "s_2_1"';
SELECT MIN(t2.a) AS f1, t1.b AS f2 FROM tbl_a AS t1 JOIN tbl_a AS t2 GROUP BY f2 ORDER BY f1, f2;
--connection master_1
DROP DATABASE IF EXISTS auto_test_local;
--connection child2_1
DROP DATABASE IF EXISTS auto_test_remote;
--disable_query_log
--disable_result_log
--source ../t/test_deinit.inc
--enable_query_log
--enable_result_log
/* Copyright (C) 2008-2019 Kentoku Shiba
Copyright (C) 2019, 2020, MariaDB Corporation.
Copyright (C) 2019, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -9877,8 +9877,6 @@ int spider_db_open_item_ident(
} else {
if (!use_fields)
{
if (!(field = spider->field_exchange(field)))
DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
if (str)
{
if ((error_num = share->dbton_share[dbton_id]->
......@@ -9887,6 +9885,8 @@ int spider_db_open_item_ident(
DBUG_RETURN(error_num);
}
} else {
if (!(field = spider->field_exchange(field)))
DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
if (str)
{
SPIDER_FIELD_CHAIN *field_chain = fields->get_next_field_chain();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment