Commit ba1fb955 authored by marko's avatar marko

branches/zip: Try to adhere to ISO 9899:1989 where possible. (The recently

introduced ib_longlong and %ll length specifier are not C89.)

row_ext_t: Change char* buf to byte* buf to avoid signedness warnings.

Change the type of all bit fields to unsigned.

rec_get_next_ptr(): Add (byte*) casts to avoid arithmetics on void*.

page_zip_decompress_clust(): Do not mix declarations and code.
parent 0f852331
...@@ -893,25 +893,25 @@ struct buf_page_struct{ ...@@ -893,25 +893,25 @@ struct buf_page_struct{
machine word. Some of them are additionally protected by machine word. Some of them are additionally protected by
buf_pool->mutex. */ buf_pool->mutex. */
ulint space:32; /* tablespace id */ unsigned space:32; /* tablespace id */
ulint offset:32; /* page number */ unsigned offset:32; /* page number */
ulint state:3; /* state of the control block unsigned state:3; /* state of the control block
(@see enum buf_page_state); also (@see enum buf_page_state); also
protected by buf_pool->mutex */ protected by buf_pool->mutex */
ulint flush_type:2; /* if this block is currently being unsigned flush_type:2; /* if this block is currently being
flushed to disk, this tells the flushed to disk, this tells the
flush_type (@see enum buf_flush) */ flush_type (@see enum buf_flush) */
ulint accessed:1; /* TRUE if the page has been accessed unsigned accessed:1; /* TRUE if the page has been accessed
while in the buffer pool: read-ahead while in the buffer pool: read-ahead
may read in pages which have not been may read in pages which have not been
accessed yet; a thread is allowed to accessed yet; a thread is allowed to
read this for heuristic purposes read this for heuristic purposes
without holding any mutex or latch */ without holding any mutex or latch */
ulint io_fix:2; /* type of pending I/O operation unsigned io_fix:2; /* type of pending I/O operation
(@see enum buf_io_fix); also (@see enum buf_io_fix); also
protected by buf_pool->mutex */ protected by buf_pool->mutex */
ulint buf_fix_count:24;/* count of how manyfold this block unsigned buf_fix_count:24;/* count of how manyfold this block
is currently bufferfixed */ is currently bufferfixed */
page_zip_des_t zip; /* compressed page */ page_zip_des_t zip; /* compressed page */
...@@ -955,9 +955,9 @@ struct buf_page_struct{ ...@@ -955,9 +955,9 @@ struct buf_page_struct{
ibool in_LRU_list; /* TRUE of the page is in the LRU list; ibool in_LRU_list; /* TRUE of the page is in the LRU list;
used in debugging */ used in debugging */
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
ulint old:1; /* TRUE if the block is in the old unsigned old:1; /* TRUE if the block is in the old
blocks in the LRU list */ blocks in the LRU list */
ulint LRU_position:31;/* value which monotonically decreases unsigned LRU_position:31;/* value which monotonically decreases
(or may stay constant if old==TRUE) (or may stay constant if old==TRUE)
toward the end of the LRU list, if toward the end of the LRU list, if
buf_pool->ulint_clock has not wrapped buf_pool->ulint_clock has not wrapped
...@@ -965,7 +965,7 @@ struct buf_page_struct{ ...@@ -965,7 +965,7 @@ struct buf_page_struct{
be used in heuristic algorithms, be used in heuristic algorithms,
because of the possibility of a because of the possibility of a
wrap-around! */ wrap-around! */
ulint freed_page_clock:32;/* the value of unsigned freed_page_clock:32;/* the value of
buf_pool->freed_page_clock when this buf_pool->freed_page_clock when this
block was the last time put to the block was the last time put to the
head of the LRU list; a thread is head of the LRU list; a thread is
...@@ -1001,9 +1001,9 @@ struct buf_block_struct{ ...@@ -1001,9 +1001,9 @@ struct buf_block_struct{
contention on the buffer pool mutex */ contention on the buffer pool mutex */
rw_lock_t lock; /* read-write lock of the buffer rw_lock_t lock; /* read-write lock of the buffer
frame */ frame */
ulint lock_hash_val:32;/* hashed value of the page address unsigned lock_hash_val:32;/* hashed value of the page address
in the record lock hash table */ in the record lock hash table */
ulint check_index_page_at_flush:1; unsigned check_index_page_at_flush:1;
/* TRUE if we know that this is /* TRUE if we know that this is
an index page, and want the database an index page, and want the database
to check its consistency before flush; to check its consistency before flush;
...@@ -1053,16 +1053,16 @@ struct buf_block_struct{ ...@@ -1053,16 +1053,16 @@ struct buf_block_struct{
pointers in the adaptive hash index pointers in the adaptive hash index
pointing to this frame */ pointing to this frame */
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
ulint is_hashed:1; /* TRUE if hash index has already been unsigned is_hashed:1; /* TRUE if hash index has already been
built on this page; note that it does built on this page; note that it does
not guarantee that the index is not guarantee that the index is
complete, though: there may have been complete, though: there may have been
hash collisions, record deletions, hash collisions, record deletions,
etc. */ etc. */
ulint curr_n_fields:10;/* prefix length for hash indexing: unsigned curr_n_fields:10;/* prefix length for hash indexing:
number of full fields */ number of full fields */
ulint curr_n_bytes:15;/* number of bytes in hash indexing */ unsigned curr_n_bytes:15;/* number of bytes in hash indexing */
ibool curr_left_side:1;/* TRUE or FALSE in hash indexing */ unsigned curr_left_side:1;/* TRUE or FALSE in hash indexing */
dict_index_t* index; /* Index for which the adaptive dict_index_t* index; /* Index for which the adaptive
hash index has been created. */ hash index has been created. */
/* 4. Debug fields */ /* 4. Debug fields */
......
...@@ -33,15 +33,15 @@ struct page_zip_des_struct ...@@ -33,15 +33,15 @@ struct page_zip_des_struct
page_zip_t* data; /* compressed page data */ page_zip_t* data; /* compressed page data */
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
ulint m_start:16; /* start offset of modification log */ unsigned m_start:16; /* start offset of modification log */
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
ulint m_end:16; /* end offset of modification log */ unsigned m_end:16; /* end offset of modification log */
ulint m_nonempty:1; /* TRUE if the modification log unsigned m_nonempty:1; /* TRUE if the modification log
is not empty */ is not empty */
ulint n_blobs:12; /* number of externally stored unsigned n_blobs:12; /* number of externally stored
columns on the page; the maximum columns on the page; the maximum
is 744 on a 16 KiB page */ is 744 on a 16 KiB page */
ulint ssize:3; /* 0 or compressed page size; unsigned ssize:3; /* 0 or compressed page size;
the size in bytes is 512<<ssize. */ the size in bytes is 512<<ssize. */
}; };
......
...@@ -275,12 +275,13 @@ rec_get_next_ptr( ...@@ -275,12 +275,13 @@ rec_get_next_ptr(
&& field_value < 32768) && field_value < 32768)
|| field_value < (uint16) -REC_N_NEW_EXTRA_BYTES); || field_value < (uint16) -REC_N_NEW_EXTRA_BYTES);
return(ut_align_down(rec, UNIV_PAGE_SIZE) return((byte*) ut_align_down(rec, UNIV_PAGE_SIZE)
+ ut_align_offset(rec + field_value, UNIV_PAGE_SIZE)); + ut_align_offset(rec + field_value, UNIV_PAGE_SIZE));
} else { } else {
ut_ad(field_value < UNIV_PAGE_SIZE); ut_ad(field_value < UNIV_PAGE_SIZE);
return(ut_align_down(rec, UNIV_PAGE_SIZE) + field_value); return((byte*) ut_align_down(rec, UNIV_PAGE_SIZE)
+ field_value);
} }
} }
......
...@@ -45,7 +45,7 @@ struct row_ext_struct{ ...@@ -45,7 +45,7 @@ struct row_ext_struct{
ulint n_ext; /* number of externally stored columns */ ulint n_ext; /* number of externally stored columns */
const ulint* ext; /* col_no's of externally stored columns */ const ulint* ext; /* col_no's of externally stored columns */
ulint zip_size;/* compressed page size, or 0 */ ulint zip_size;/* compressed page size, or 0 */
char* buf; /* backing store of the column prefix cache */ byte* buf; /* backing store of the column prefix cache */
ulint len[1]; /* prefix lengths; 0 if not cached */ ulint len[1]; /* prefix lengths; 0 if not cached */
}; };
......
...@@ -1959,6 +1959,7 @@ page_zip_decompress_clust( ...@@ -1959,6 +1959,7 @@ page_zip_decompress_clust(
/* Decompress the records in heap_no order. */ /* Decompress the records in heap_no order. */
for (slot = 0; slot < n_dense; slot++) { for (slot = 0; slot < n_dense; slot++) {
rec_t* rec = recs[slot]; rec_t* rec = recs[slot];
ulint i;
d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES
- d_stream->next_out; - d_stream->next_out;
...@@ -1993,7 +1994,6 @@ page_zip_decompress_clust( ...@@ -1993,7 +1994,6 @@ page_zip_decompress_clust(
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, &heap);
/* This is a leaf page in a clustered index. */ /* This is a leaf page in a clustered index. */
ulint i;
/* Check if there are any externally stored columns. /* Check if there are any externally stored columns.
For each externally stored column, restore the For each externally stored column, restore the
......
...@@ -83,7 +83,7 @@ row_sel_sec_rec_is_for_blob( ...@@ -83,7 +83,7 @@ row_sel_sec_rec_is_for_blob(
zip_size, zip_size,
clust_field, clust_len); clust_field, clust_len);
len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen, len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen,
sec_len, len, buf); sec_len, len, (const char*) buf);
return(!cmp_data_data(mtype, prtype, buf, len, sec_field, sec_len)); return(!cmp_data_data(mtype, prtype, buf, len, sec_field, sec_len));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment