Commit 56948ee5 authored by Daniel Black's avatar Daniel Black Committed by Andrew Hutchings

clang15 warnings - unused vars and old prototypes

clang15 finally errors on old prototype definations.

Its also a lot fussier about variables that aren't used
as is the case a number of time with loop counters that
aren't examined.

RocksDB was complaining that its get_range function was
declared without the array length in ha_rocksdb.h. While
a constant is used rather than trying to import the
Rdb_key_def::INDEX_NUMBER_SIZE header (was causing a lot of
errors on the defination of other orders). If the constant
does change can be assured that the same compile warnings will
tell us of the error.

The ha_rocksdb::index_read_map_impl DBUG_EXECUTE_IF was similar
to the existing endless functions used in replication tests.
Its rather moot point as the rocksdb.force_shutdown test that
uses myrocks_busy_loop_on_row_read is currently disabled.
parent d7f44791
...@@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags) ...@@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags)
for end of file. for end of file.
IN assertion: the stream s has been sucessfully opened for reading. IN assertion: the stream s has been sucessfully opened for reading.
*/ */
int get_byte(s) int get_byte(azio_stream *s)
azio_stream *s;
{ {
if (s->z_eof) return EOF; if (s->z_eof) return EOF;
if (s->stream.avail_in == 0) if (s->stream.avail_in == 0)
...@@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer) ...@@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer)
* Cleanup then free the given azio_stream. Return a zlib error code. * Cleanup then free the given azio_stream. Return a zlib error code.
Try freeing in the reverse order of allocations. Try freeing in the reverse order of allocations.
*/ */
int destroy (s) int destroy (azio_stream *s)
azio_stream *s;
{ {
int err = Z_OK; int err = Z_OK;
...@@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush) ...@@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush)
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
} }
int ZEXPORT azflush (s, flush) int ZEXPORT azflush (azio_stream *s, int flush)
azio_stream *s;
int flush;
{ {
int err; int err;
...@@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush) ...@@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush)
/* =========================================================================== /* ===========================================================================
Rewinds input file. Rewinds input file.
*/ */
int azrewind (s) int azrewind (azio_stream *s)
azio_stream *s;
{ {
if (s == NULL || s->mode != 'r') return -1; if (s == NULL || s->mode != 'r') return -1;
...@@ -733,10 +728,7 @@ int azrewind (s) ...@@ -733,10 +728,7 @@ int azrewind (s)
SEEK_END is not implemented, returns error. SEEK_END is not implemented, returns error.
In this version of the library, azseek can be extremely slow. In this version of the library, azseek can be extremely slow.
*/ */
my_off_t azseek (s, offset, whence) my_off_t azseek (azio_stream *s, my_off_t offset, int whence)
azio_stream *s;
my_off_t offset;
int whence;
{ {
if (s == NULL || whence == SEEK_END || if (s == NULL || whence == SEEK_END ||
...@@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence) ...@@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence)
given compressed file. This position represents a number of bytes in the given compressed file. This position represents a number of bytes in the
uncompressed data stream. uncompressed data stream.
*/ */
my_off_t ZEXPORT aztell (file) my_off_t ZEXPORT aztell (azio_stream *file)
azio_stream *file;
{ {
return azseek(file, 0L, SEEK_CUR); return azseek(file, 0L, SEEK_CUR);
} }
......
...@@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, ...@@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
bool del, PHC) bool del, PHC)
{ {
char *p; char *p;
int i, n; int n;
bool rcop= true; bool rcop= true;
PCOL colp; PCOL colp;
//PCOLUMN cp; //PCOLUMN cp;
...@@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, ...@@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
n = strlen(p) + 1; n = strlen(p) + 1;
} // endfor p } // endfor p
for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) { for (colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) {
if (colp->InitValue(g)) if (colp->InitValue(g))
throw 2; throw 2;
...@@ -310,7 +310,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, ...@@ -310,7 +310,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
n = strlen(p) + 1; n = strlen(p) + 1;
} // endfor p } // endfor p
for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) { for (colp = utp->GetColumns(); colp; colp = colp->GetNext()) {
if (colp->InitValue(g)) if (colp->InitValue(g))
throw 5; throw 5;
......
...@@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo ...@@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo
return ret; return ret;
} }
void fill_fopen_filefunc (pzlib_filefunc_def) void fill_fopen_filefunc (zlib_filefunc_def* pzlib_filefunc_def)
zlib_filefunc_def* pzlib_filefunc_def;
{ {
pzlib_filefunc_def->zopen_file = fopen_file_func; pzlib_filefunc_def->zopen_file = fopen_file_func;
pzlib_filefunc_def->zread_file = fread_file_func; pzlib_filefunc_def->zread_file = fread_file_func;
......
...@@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp) ...@@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp)
/***********************************************************************/ /***********************************************************************/
bool TDBTBL::InitTableList(PGLOBAL g) bool TDBTBL::InitTableList(PGLOBAL g)
{ {
int n;
uint sln; uint sln;
const char *scs; const char *scs;
PTABLE tp, tabp; PTABLE tp, tabp;
...@@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g) ...@@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g)
sln = hc->get_table()->s->connect_string.length; sln = hc->get_table()->s->connect_string.length;
// PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath()); // PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath());
for (n = 0, tp = tdp->Tablep; tp; tp = tp->GetNext()) { for (tp = tdp->Tablep; tp; tp = tp->GetNext()) {
if (TestFil(g, To_CondFil, tp)) { if (TestFil(g, To_CondFil, tp)) {
tabp = new(g) XTAB(tp); tabp = new(g) XTAB(tp);
...@@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g) ...@@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g)
else else
Tablist = tabp; Tablist = tabp;
n++;
} // endif filp } // endif filp
} // endfor tp } // endfor tp
......
...@@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in ...@@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in
{ {
uLong uTotalOutBefore = zi->ci.stream.total_out; uLong uTotalOutBefore = zi->ci.stream.total_out;
err=deflate(&zi->ci.stream, Z_NO_FLUSH); err=deflate(&zi->ci.stream, Z_NO_FLUSH);
if(uTotalOutBefore > zi->ci.stream.total_out)
{
int bBreak = 0;
bBreak++;
}
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
} }
......
...@@ -8468,8 +8468,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, ...@@ -8468,8 +8468,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key,
const key_range *end_key) { const key_range *end_key) {
DBUG_ENTER_FUNC(); DBUG_ENTER_FUNC();
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", int debug_i = 0; DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", my_sleep(50000););
while (1) { debug_i++; });
int rc = 0; int rc = 0;
...@@ -12124,7 +12123,6 @@ static int calculate_stats( ...@@ -12124,7 +12123,6 @@ static int calculate_stats(
} }
} }
int num_sst = 0;
for (const auto &it : props) { for (const auto &it : props) {
std::vector<Rdb_index_stats> sst_stats; std::vector<Rdb_index_stats> sst_stats;
Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats);
...@@ -12153,7 +12151,6 @@ static int calculate_stats( ...@@ -12153,7 +12151,6 @@ static int calculate_stats(
stats[it1.m_gl_index_id].merge( stats[it1.m_gl_index_id].merge(
it1, true, it_index->second->max_storage_fmt_length()); it1, true, it_index->second->max_storage_fmt_length());
} }
num_sst++;
} }
if (include_memtables) { if (include_memtables) {
......
...@@ -401,7 +401,7 @@ class ha_rocksdb : public my_core::handler { ...@@ -401,7 +401,7 @@ class ha_rocksdb : public my_core::handler {
void free_key_buffers(); void free_key_buffers();
// the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
rocksdb::Range get_range(const int i, uchar buf[]) const; rocksdb::Range get_range(const int i, uchar buf[2 * 4]) const;
/* /*
Perf timers for data reads Perf timers for data reads
......
...@@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, ...@@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>) // The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>)
// The pivots_fname is the name of the pivots file. // The pivots_fname is the name of the pivots file.
// Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree. // Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree.
int64_t n_pivots=0; // number of pivots in pivots_file
FIDX pivots_file; // the file FIDX pivots_file; // the file
r = ft_loader_open_temp_file (bl, &pivots_file); r = ft_loader_open_temp_file (bl, &pivots_file);
...@@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, ...@@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
allocate_node(&sts, lblock); allocate_node(&sts, lblock);
n_pivots++;
invariant(maxkey.data != NULL); invariant(maxkey.data != NULL);
if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) { if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) {
ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr); ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
...@@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, ...@@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// We haven't paniced, so the sum should add up. // We haven't paniced, so the sum should add up.
invariant(used_estimate == total_disksize_estimate); invariant(used_estimate == total_disksize_estimate);
n_pivots++;
{ {
DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file. DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file.
r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl); r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl);
...@@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st ...@@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
int height = 1; int height = 1;
// Watch out for the case where we saved the last pivot but didn't write any more nodes out. // Watch out for the case where we saved the last pivot but didn't write any more nodes out.
// The trick is not to look at n_pivots, but to look at blocks.n_blocks // The trick is to look at blocks.n_blocks
while (sts->n_subtrees > 1) { while (sts->n_subtrees > 1) {
// If there is more than one block in blocks, then we must build another level of the tree. // If there is more than one block in blocks, then we must build another level of the tree.
......
...@@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) { ...@@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
{ {
//verify neither pair->begin_id nor end_id is in snapshot_xids //verify neither pair->begin_id nor end_id is in snapshot_xids
TOKUTXN curr_txn = txn_manager->snapshot_head; TOKUTXN curr_txn = txn_manager->snapshot_head;
uint32_t curr_index = 0;
while (curr_txn != NULL) { while (curr_txn != NULL) {
invariant(tuple->begin_id != curr_txn->txnid.parent_id64); invariant(tuple->begin_id != curr_txn->txnid.parent_id64);
invariant(tuple->end_id != curr_txn->txnid.parent_id64); invariant(tuple->end_id != curr_txn->txnid.parent_id64);
curr_txn = curr_txn->snapshot_next; curr_txn = curr_txn->snapshot_next;
curr_index++;
} }
} }
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment