Commit 56948ee5 authored by Daniel Black's avatar Daniel Black Committed by Andrew Hutchings

clang15 warnings - unused vars and old prototypes

clang15 finally errors on old prototype definations.

Its also a lot fussier about variables that aren't used
as is the case a number of time with loop counters that
aren't examined.

RocksDB was complaining that its get_range function was
declared without the array length in ha_rocksdb.h. While
a constant is used rather than trying to import the
Rdb_key_def::INDEX_NUMBER_SIZE header (was causing a lot of
errors on the defination of other orders). If the constant
does change can be assured that the same compile warnings will
tell us of the error.

The ha_rocksdb::index_read_map_impl DBUG_EXECUTE_IF was similar
to the existing endless functions used in replication tests.
Its rather moot point as the rocksdb.force_shutdown test that
uses myrocks_busy_loop_on_row_read is currently disabled.
parent d7f44791
......@@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags)
for end of file.
IN assertion: the stream s has been sucessfully opened for reading.
*/
int get_byte(s)
azio_stream *s;
int get_byte(azio_stream *s)
{
if (s->z_eof) return EOF;
if (s->stream.avail_in == 0)
......@@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer)
* Cleanup then free the given azio_stream. Return a zlib error code.
Try freeing in the reverse order of allocations.
*/
int destroy (s)
azio_stream *s;
int destroy (azio_stream *s)
{
int err = Z_OK;
......@@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush)
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
int ZEXPORT azflush (s, flush)
azio_stream *s;
int flush;
int ZEXPORT azflush (azio_stream *s, int flush)
{
int err;
......@@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush)
/* ===========================================================================
Rewinds input file.
*/
int azrewind (s)
azio_stream *s;
int azrewind (azio_stream *s)
{
if (s == NULL || s->mode != 'r') return -1;
......@@ -733,10 +728,7 @@ int azrewind (s)
SEEK_END is not implemented, returns error.
In this version of the library, azseek can be extremely slow.
*/
my_off_t azseek (s, offset, whence)
azio_stream *s;
my_off_t offset;
int whence;
my_off_t azseek (azio_stream *s, my_off_t offset, int whence)
{
if (s == NULL || whence == SEEK_END ||
......@@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence)
given compressed file. This position represents a number of bytes in the
uncompressed data stream.
*/
my_off_t ZEXPORT aztell (file)
azio_stream *file;
my_off_t ZEXPORT aztell (azio_stream *file)
{
return azseek(file, 0L, SEEK_CUR);
}
......
......@@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
bool del, PHC)
{
char *p;
int i, n;
int n;
bool rcop= true;
PCOL colp;
//PCOLUMN cp;
......@@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
n = strlen(p) + 1;
} // endfor p
for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) {
for (colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) {
if (colp->InitValue(g))
throw 2;
......@@ -310,7 +310,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
n = strlen(p) + 1;
} // endfor p
for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) {
for (colp = utp->GetColumns(); colp; colp = colp->GetNext()) {
if (colp->InitValue(g))
throw 5;
......
......@@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo
return ret;
}
void fill_fopen_filefunc (pzlib_filefunc_def)
zlib_filefunc_def* pzlib_filefunc_def;
void fill_fopen_filefunc (zlib_filefunc_def* pzlib_filefunc_def)
{
pzlib_filefunc_def->zopen_file = fopen_file_func;
pzlib_filefunc_def->zread_file = fread_file_func;
......
......@@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp)
/***********************************************************************/
bool TDBTBL::InitTableList(PGLOBAL g)
{
int n;
uint sln;
const char *scs;
PTABLE tp, tabp;
......@@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g)
sln = hc->get_table()->s->connect_string.length;
// PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath());
for (n = 0, tp = tdp->Tablep; tp; tp = tp->GetNext()) {
for (tp = tdp->Tablep; tp; tp = tp->GetNext()) {
if (TestFil(g, To_CondFil, tp)) {
tabp = new(g) XTAB(tp);
......@@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g)
else
Tablist = tabp;
n++;
} // endif filp
} // endfor tp
......
......@@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in
{
uLong uTotalOutBefore = zi->ci.stream.total_out;
err=deflate(&zi->ci.stream, Z_NO_FLUSH);
if(uTotalOutBefore > zi->ci.stream.total_out)
{
int bBreak = 0;
bBreak++;
}
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
}
......
......@@ -8468,8 +8468,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key,
const key_range *end_key) {
DBUG_ENTER_FUNC();
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", int debug_i = 0;
while (1) { debug_i++; });
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", my_sleep(50000););
int rc = 0;
......@@ -12124,7 +12123,6 @@ static int calculate_stats(
}
}
int num_sst = 0;
for (const auto &it : props) {
std::vector<Rdb_index_stats> sst_stats;
Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats);
......@@ -12153,7 +12151,6 @@ static int calculate_stats(
stats[it1.m_gl_index_id].merge(
it1, true, it_index->second->max_storage_fmt_length());
}
num_sst++;
}
if (include_memtables) {
......
......@@ -401,7 +401,7 @@ class ha_rocksdb : public my_core::handler {
void free_key_buffers();
// the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
rocksdb::Range get_range(const int i, uchar buf[]) const;
rocksdb::Range get_range(const int i, uchar buf[2 * 4]) const;
/*
Perf timers for data reads
......
......@@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>)
// The pivots_fname is the name of the pivots file.
// Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree.
int64_t n_pivots=0; // number of pivots in pivots_file
FIDX pivots_file; // the file
r = ft_loader_open_temp_file (bl, &pivots_file);
......@@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
allocate_node(&sts, lblock);
n_pivots++;
invariant(maxkey.data != NULL);
if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) {
ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
......@@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// We haven't paniced, so the sum should add up.
invariant(used_estimate == total_disksize_estimate);
n_pivots++;
{
DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file.
r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl);
......@@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
int height = 1;
// Watch out for the case where we saved the last pivot but didn't write any more nodes out.
// The trick is not to look at n_pivots, but to look at blocks.n_blocks
// The trick is to look at blocks.n_blocks
while (sts->n_subtrees > 1) {
// If there is more than one block in blocks, then we must build another level of the tree.
......
......@@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
{
//verify neither pair->begin_id nor end_id is in snapshot_xids
TOKUTXN curr_txn = txn_manager->snapshot_head;
uint32_t curr_index = 0;
while (curr_txn != NULL) {
invariant(tuple->begin_id != curr_txn->txnid.parent_id64);
invariant(tuple->end_id != curr_txn->txnid.parent_id64);
curr_txn = curr_txn->snapshot_next;
curr_index++;
}
}
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment