Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
cdecb914
Commit
cdecb914
authored
Jun 28, 2011
by
Marko Mäkelä
Browse files
Options
Browse Files
Download
Plain Diff
Merge mysql-5.1 to mysql-5.5.
parents
e42a24e5
0c54d44f
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
59 additions
and
31 deletions
+59
-31
storage/innobase/page/page0zip.c
storage/innobase/page/page0zip.c
+59
-31
No files found.
storage/innobase/page/page0zip.c
View file @
cdecb914
...
@@ -150,6 +150,20 @@ page_zip_empty_size(
...
@@ -150,6 +150,20 @@ page_zip_empty_size(
}
}
#endif
/* !UNIV_HOTBACKUP */
#endif
/* !UNIV_HOTBACKUP */
/*************************************************************//**
Gets the number of elements in the dense page directory,
including deleted records (the free list).
@return number of elements in the dense page directory */
UNIV_INLINE
ulint
page_zip_dir_elems
(
/*===============*/
const
page_zip_des_t
*
page_zip
)
/*!< in: compressed page */
{
/* Exclude the page infimum and supremum from the record count. */
return
(
page_dir_get_n_heap
(
page_zip
->
data
)
-
PAGE_HEAP_NO_USER_LOW
);
}
/*************************************************************//**
/*************************************************************//**
Gets the size of the compressed page trailer (the dense page directory),
Gets the size of the compressed page trailer (the dense page directory),
including deleted records (the free list).
including deleted records (the free list).
...
@@ -160,13 +174,41 @@ page_zip_dir_size(
...
@@ -160,13 +174,41 @@ page_zip_dir_size(
/*==============*/
/*==============*/
const
page_zip_des_t
*
page_zip
)
/*!< in: compressed page */
const
page_zip_des_t
*
page_zip
)
/*!< in: compressed page */
{
{
/* Exclude the page infimum and supremum from the record count. */
return
(
PAGE_ZIP_DIR_SLOT_SIZE
*
page_zip_dir_elems
(
page_zip
));
ulint
size
=
PAGE_ZIP_DIR_SLOT_SIZE
}
*
(
page_dir_get_n_heap
(
page_zip
->
data
)
-
PAGE_HEAP_NO_USER_LOW
);
/*************************************************************//**
return
(
size
);
Gets an offset to the compressed page trailer (the dense page directory),
including deleted records (the free list).
@return offset of the dense page directory */
UNIV_INLINE
ulint
page_zip_dir_start_offs
(
/*====================*/
const
page_zip_des_t
*
page_zip
,
/*!< in: compressed page */
ulint
n_dense
)
/*!< in: directory size */
{
ut_ad
(
n_dense
*
PAGE_ZIP_DIR_SLOT_SIZE
<
page_zip_get_size
(
page_zip
));
return
(
page_zip_get_size
(
page_zip
)
-
n_dense
*
PAGE_ZIP_DIR_SLOT_SIZE
);
}
}
/*************************************************************//**
Gets a pointer to the compressed page trailer (the dense page directory),
including deleted records (the free list).
@param[in] page_zip compressed page
@param[in] n_dense number of entries in the directory
@return pointer to the dense page directory */
#define page_zip_dir_start_low(page_zip, n_dense) \
((page_zip)->data + page_zip_dir_start_offs(page_zip, n_dense))
/*************************************************************//**
Gets a pointer to the compressed page trailer (the dense page directory),
including deleted records (the free list).
@param[in] page_zip compressed page
@return pointer to the dense page directory */
#define page_zip_dir_start(page_zip) \
page_zip_dir_start_low(page_zip, page_zip_dir_elems(page_zip))
/*************************************************************//**
/*************************************************************//**
Gets the size of the compressed page trailer (the dense page directory),
Gets the size of the compressed page trailer (the dense page directory),
only including user records (excluding the free list).
only including user records (excluding the free list).
...
@@ -2242,8 +2284,7 @@ zlib_done:
...
@@ -2242,8 +2284,7 @@ zlib_done:
}
}
/* Restore the uncompressed columns in heap_no order. */
/* Restore the uncompressed columns in heap_no order. */
storage
=
page_zip
->
data
+
page_zip_get_size
(
page_zip
)
storage
=
page_zip_dir_start_low
(
page_zip
,
n_dense
);
-
n_dense
*
PAGE_ZIP_DIR_SLOT_SIZE
;
for
(
slot
=
0
;
slot
<
n_dense
;
slot
++
)
{
for
(
slot
=
0
;
slot
<
n_dense
;
slot
++
)
{
rec_t
*
rec
=
recs
[
slot
];
rec_t
*
rec
=
recs
[
slot
];
...
@@ -2728,8 +2769,7 @@ zlib_done:
...
@@ -2728,8 +2769,7 @@ zlib_done:
return
(
FALSE
);
return
(
FALSE
);
}
}
storage
=
page_zip
->
data
+
page_zip_get_size
(
page_zip
)
storage
=
page_zip_dir_start_low
(
page_zip
,
n_dense
);
-
n_dense
*
PAGE_ZIP_DIR_SLOT_SIZE
;
externs
=
storage
-
n_dense
externs
=
storage
-
n_dense
*
(
DATA_TRX_ID_LEN
+
DATA_ROLL_PTR_LEN
);
*
(
DATA_TRX_ID_LEN
+
DATA_ROLL_PTR_LEN
);
...
@@ -3457,9 +3497,7 @@ page_zip_write_rec(
...
@@ -3457,9 +3497,7 @@ page_zip_write_rec(
}
}
/* Write the data bytes. Store the uncompressed bytes separately. */
/* Write the data bytes. Store the uncompressed bytes separately. */
storage
=
page_zip
->
data
+
page_zip_get_size
(
page_zip
)
storage
=
page_zip_dir_start
(
page_zip
);
-
(
page_dir_get_n_heap
(
page
)
-
PAGE_HEAP_NO_USER_LOW
)
*
PAGE_ZIP_DIR_SLOT_SIZE
;
if
(
page_is_leaf
(
page
))
{
if
(
page_is_leaf
(
page
))
{
ulint
len
;
ulint
len
;
...
@@ -3755,9 +3793,7 @@ corrupt:
...
@@ -3755,9 +3793,7 @@ corrupt:
field
=
page
+
offset
;
field
=
page
+
offset
;
storage
=
page_zip
->
data
+
z_offset
;
storage
=
page_zip
->
data
+
z_offset
;
storage_end
=
page_zip
->
data
+
page_zip_get_size
(
page_zip
)
storage_end
=
page_zip_dir_start
(
page_zip
);
-
(
page_dir_get_n_heap
(
page
)
-
PAGE_HEAP_NO_USER_LOW
)
*
PAGE_ZIP_DIR_SLOT_SIZE
;
heap_no
=
1
+
(
storage_end
-
storage
)
/
REC_NODE_PTR_SIZE
;
heap_no
=
1
+
(
storage_end
-
storage
)
/
REC_NODE_PTR_SIZE
;
...
@@ -3793,7 +3829,9 @@ page_zip_write_node_ptr(
...
@@ -3793,7 +3829,9 @@ page_zip_write_node_ptr(
{
{
byte
*
field
;
byte
*
field
;
byte
*
storage
;
byte
*
storage
;
#ifdef UNIV_DEBUG
page_t
*
page
=
page_align
(
rec
);
page_t
*
page
=
page_align
(
rec
);
#endif
/* UNIV_DEBUG */
ut_ad
(
PAGE_ZIP_MATCH
(
rec
,
page_zip
));
ut_ad
(
PAGE_ZIP_MATCH
(
rec
,
page_zip
));
ut_ad
(
page_simple_validate_new
(
page
));
ut_ad
(
page_simple_validate_new
(
page
));
...
@@ -3810,9 +3848,7 @@ page_zip_write_node_ptr(
...
@@ -3810,9 +3848,7 @@ page_zip_write_node_ptr(
UNIV_MEM_ASSERT_RW
(
page_zip
->
data
,
page_zip_get_size
(
page_zip
));
UNIV_MEM_ASSERT_RW
(
page_zip
->
data
,
page_zip_get_size
(
page_zip
));
UNIV_MEM_ASSERT_RW
(
rec
,
size
);
UNIV_MEM_ASSERT_RW
(
rec
,
size
);
storage
=
page_zip
->
data
+
page_zip_get_size
(
page_zip
)
storage
=
page_zip_dir_start
(
page_zip
)
-
(
page_dir_get_n_heap
(
page
)
-
PAGE_HEAP_NO_USER_LOW
)
*
PAGE_ZIP_DIR_SLOT_SIZE
-
(
rec_get_heap_no_new
(
rec
)
-
1
)
*
REC_NODE_PTR_SIZE
;
-
(
rec_get_heap_no_new
(
rec
)
-
1
)
*
REC_NODE_PTR_SIZE
;
field
=
rec
+
size
-
REC_NODE_PTR_SIZE
;
field
=
rec
+
size
-
REC_NODE_PTR_SIZE
;
...
@@ -3861,7 +3897,9 @@ page_zip_write_trx_id_and_roll_ptr(
...
@@ -3861,7 +3897,9 @@ page_zip_write_trx_id_and_roll_ptr(
{
{
byte
*
field
;
byte
*
field
;
byte
*
storage
;
byte
*
storage
;
#ifdef UNIV_DEBUG
page_t
*
page
=
page_align
(
rec
);
page_t
*
page
=
page_align
(
rec
);
#endif
/* UNIV_DEBUG */
ulint
len
;
ulint
len
;
ut_ad
(
PAGE_ZIP_MATCH
(
rec
,
page_zip
));
ut_ad
(
PAGE_ZIP_MATCH
(
rec
,
page_zip
));
...
@@ -3879,9 +3917,7 @@ page_zip_write_trx_id_and_roll_ptr(
...
@@ -3879,9 +3917,7 @@ page_zip_write_trx_id_and_roll_ptr(
UNIV_MEM_ASSERT_RW
(
page_zip
->
data
,
page_zip_get_size
(
page_zip
));
UNIV_MEM_ASSERT_RW
(
page_zip
->
data
,
page_zip_get_size
(
page_zip
));
storage
=
page_zip
->
data
+
page_zip_get_size
(
page_zip
)
storage
=
page_zip_dir_start
(
page_zip
)
-
(
page_dir_get_n_heap
(
page
)
-
PAGE_HEAP_NO_USER_LOW
)
*
PAGE_ZIP_DIR_SLOT_SIZE
-
(
rec_get_heap_no_new
(
rec
)
-
1
)
-
(
rec_get_heap_no_new
(
rec
)
-
1
)
*
(
DATA_TRX_ID_LEN
+
DATA_ROLL_PTR_LEN
);
*
(
DATA_TRX_ID_LEN
+
DATA_ROLL_PTR_LEN
);
...
@@ -3948,11 +3984,7 @@ page_zip_clear_rec(
...
@@ -3948,11 +3984,7 @@ page_zip_clear_rec(
/* Clear node_ptr. On the compressed page,
/* Clear node_ptr. On the compressed page,
there is an array of node_ptr immediately before the
there is an array of node_ptr immediately before the
dense page directory, at the very end of the page. */
dense page directory, at the very end of the page. */
storage
=
page_zip
->
data
storage
=
page_zip_dir_start
(
page_zip
);
+
page_zip_get_size
(
page_zip
)
-
(
page_dir_get_n_heap
(
page
)
-
PAGE_HEAP_NO_USER_LOW
)
*
PAGE_ZIP_DIR_SLOT_SIZE
;
ut_ad
(
dict_index_get_n_unique_in_tree
(
index
)
==
ut_ad
(
dict_index_get_n_unique_in_tree
(
index
)
==
rec_offs_n_fields
(
offsets
)
-
1
);
rec_offs_n_fields
(
offsets
)
-
1
);
field
=
rec_get_nth_field
(
rec
,
offsets
,
field
=
rec_get_nth_field
(
rec
,
offsets
,
...
@@ -3972,11 +4004,7 @@ page_zip_clear_rec(
...
@@ -3972,11 +4004,7 @@ page_zip_clear_rec(
=
dict_col_get_clust_pos
(
=
dict_col_get_clust_pos
(
dict_table_get_sys_col
(
dict_table_get_sys_col
(
index
->
table
,
DATA_TRX_ID
),
index
);
index
->
table
,
DATA_TRX_ID
),
index
);
storage
=
page_zip
->
data
storage
=
page_zip_dir_start
(
page_zip
);
+
page_zip_get_size
(
page_zip
)
-
(
page_dir_get_n_heap
(
page
)
-
PAGE_HEAP_NO_USER_LOW
)
*
PAGE_ZIP_DIR_SLOT_SIZE
;
field
=
rec_get_nth_field
(
rec
,
offsets
,
trx_id_pos
,
&
len
);
field
=
rec_get_nth_field
(
rec
,
offsets
,
trx_id_pos
,
&
len
);
ut_ad
(
len
==
DATA_TRX_ID_LEN
);
ut_ad
(
len
==
DATA_TRX_ID_LEN
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment