Commit 972a14b5 authored by Jan Lindström's avatar Jan Lindström

Code cleanup after review.

parent d6afa800
...@@ -113,7 +113,6 @@ typedef struct wrk_itm ...@@ -113,7 +113,6 @@ typedef struct wrk_itm
ulint n_flushed; /*!< Flushed pages count */ ulint n_flushed; /*!< Flushed pages count */
os_thread_id_t id_usr; /*!< Thread-id currently working */ os_thread_id_t id_usr; /*!< Thread-id currently working */
wrk_status_t wi_status; /*!< Work item status */ wrk_status_t wi_status; /*!< Work item status */
struct wrk_itm *next; /*!< Next work item */
mem_heap_t *wheap; /*!< Heap were to allocate memory mem_heap_t *wheap; /*!< Heap were to allocate memory
for queue nodes */ for queue nodes */
mem_heap_t *rheap; mem_heap_t *rheap;
...@@ -262,6 +261,9 @@ mtflush_service_io( ...@@ -262,6 +261,9 @@ mtflush_service_io(
work_item->wi_status = WRK_ITEM_SET; work_item->wi_status = WRK_ITEM_SET;
} }
#ifdef UNIV_MTFLUSH_DEBUG
ut_a(work_item->id_usr == 0);
#endif
work_item->id_usr = os_thread_get_curr_id(); work_item->id_usr = os_thread_get_curr_id();
/* This works as a producer/consumer model, where in tasks are /* This works as a producer/consumer model, where in tasks are
...@@ -365,7 +367,6 @@ buf_mtflu_io_thread_exit(void) ...@@ -365,7 +367,6 @@ buf_mtflu_io_thread_exit(void)
/* Allocate work items for shutdown message */ /* Allocate work items for shutdown message */
work_item = (wrk_t*)mem_heap_alloc(mtflush_io->wheap, sizeof(wrk_t)*srv_mtflush_threads); work_item = (wrk_t*)mem_heap_alloc(mtflush_io->wheap, sizeof(wrk_t)*srv_mtflush_threads);
memset(work_item, 0, sizeof(wrk_t)*srv_mtflush_threads);
/* Confirm if the io-thread KILL is in progress, bailout */ /* Confirm if the io-thread KILL is in progress, bailout */
if (mtflush_io->gwt_status == WTHR_KILL_IT) { if (mtflush_io->gwt_status == WTHR_KILL_IT) {
...@@ -383,6 +384,7 @@ buf_mtflu_io_thread_exit(void) ...@@ -383,6 +384,7 @@ buf_mtflu_io_thread_exit(void)
work_item[i].wi_status = WRK_ITEM_EXIT; work_item[i].wi_status = WRK_ITEM_EXIT;
work_item[i].wheap = mtflush_io->wheap; work_item[i].wheap = mtflush_io->wheap;
work_item[i].rheap = mtflush_io->rheap; work_item[i].rheap = mtflush_io->rheap;
work_item[i].id_usr = 0;
ib_wqueue_add(mtflush_io->wq, ib_wqueue_add(mtflush_io->wq,
(void *)&(work_item[i]), (void *)&(work_item[i]),
...@@ -518,7 +520,6 @@ buf_mtflu_flush_work_items( ...@@ -518,7 +520,6 @@ buf_mtflu_flush_work_items(
node items areallocated */ node items areallocated */
work_heap = mem_heap_create(0); work_heap = mem_heap_create(0);
reply_heap = mem_heap_create(0); reply_heap = mem_heap_create(0);
memset(work_item, 0, sizeof(wrk_t)*MTFLUSH_MAX_WORKER);
for(i=0;i<buf_pool_inst; i++) { for(i=0;i<buf_pool_inst; i++) {
...@@ -530,6 +531,8 @@ buf_mtflu_flush_work_items( ...@@ -530,6 +531,8 @@ buf_mtflu_flush_work_items(
work_item[i].wi_status = WRK_ITEM_UNSET; work_item[i].wi_status = WRK_ITEM_UNSET;
work_item[i].wheap = work_heap; work_item[i].wheap = work_heap;
work_item[i].rheap = reply_heap; work_item[i].rheap = reply_heap;
work_item[i].n_flushed = 0;
work_item[i].id_usr = 0;
ib_wqueue_add(mtflush_ctx->wq, ib_wqueue_add(mtflush_ctx->wq,
(void *)(work_item + i), (void *)(work_item + i),
...@@ -544,7 +547,7 @@ buf_mtflu_flush_work_items( ...@@ -544,7 +547,7 @@ buf_mtflu_flush_work_items(
if (done_wi != NULL) { if (done_wi != NULL) {
per_pool_pages_flushed[i] = done_wi->n_flushed; per_pool_pages_flushed[i] = done_wi->n_flushed;
#if UNIV_DEBUG #ifdef UNIV_MTFLUSH_DEBUG
if((int)done_wi->id_usr == 0 && if((int)done_wi->id_usr == 0 &&
(done_wi->wi_status == WRK_ITEM_SET || (done_wi->wi_status == WRK_ITEM_SET ||
done_wi->wi_status == WRK_ITEM_UNSET)) { done_wi->wi_status == WRK_ITEM_UNSET)) {
......
...@@ -113,7 +113,6 @@ typedef struct wrk_itm ...@@ -113,7 +113,6 @@ typedef struct wrk_itm
ulint n_flushed; /*!< Flushed pages count */ ulint n_flushed; /*!< Flushed pages count */
os_thread_id_t id_usr; /*!< Thread-id currently working */ os_thread_id_t id_usr; /*!< Thread-id currently working */
wrk_status_t wi_status; /*!< Work item status */ wrk_status_t wi_status; /*!< Work item status */
struct wrk_itm *next; /*!< Next work item */
mem_heap_t *wheap; /*!< Heap were to allocate memory mem_heap_t *wheap; /*!< Heap were to allocate memory
for queue nodes */ for queue nodes */
mem_heap_t *rheap; mem_heap_t *rheap;
...@@ -269,6 +268,9 @@ mtflush_service_io( ...@@ -269,6 +268,9 @@ mtflush_service_io(
work_item->wi_status = WRK_ITEM_SET; work_item->wi_status = WRK_ITEM_SET;
} }
#ifdef UNIV_MTFLUSH_DEBUG
ut_a(work_item->id_usr == 0);
#endif
work_item->id_usr = os_thread_get_curr_id(); work_item->id_usr = os_thread_get_curr_id();
/* This works as a producer/consumer model, where in tasks are /* This works as a producer/consumer model, where in tasks are
...@@ -372,7 +374,6 @@ buf_mtflu_io_thread_exit(void) ...@@ -372,7 +374,6 @@ buf_mtflu_io_thread_exit(void)
/* Allocate work items for shutdown message */ /* Allocate work items for shutdown message */
work_item = (wrk_t*)mem_heap_alloc(mtflush_io->wheap, sizeof(wrk_t)*srv_mtflush_threads); work_item = (wrk_t*)mem_heap_alloc(mtflush_io->wheap, sizeof(wrk_t)*srv_mtflush_threads);
memset(work_item, 0, sizeof(wrk_t)*srv_mtflush_threads);
/* Confirm if the io-thread KILL is in progress, bailout */ /* Confirm if the io-thread KILL is in progress, bailout */
if (mtflush_io->gwt_status == WTHR_KILL_IT) { if (mtflush_io->gwt_status == WTHR_KILL_IT) {
...@@ -390,6 +391,7 @@ buf_mtflu_io_thread_exit(void) ...@@ -390,6 +391,7 @@ buf_mtflu_io_thread_exit(void)
work_item[i].wi_status = WRK_ITEM_EXIT; work_item[i].wi_status = WRK_ITEM_EXIT;
work_item[i].wheap = mtflush_io->wheap; work_item[i].wheap = mtflush_io->wheap;
work_item[i].rheap = mtflush_io->rheap; work_item[i].rheap = mtflush_io->rheap;
work_item[i].id_usr = 0;
ib_wqueue_add(mtflush_io->wq, ib_wqueue_add(mtflush_io->wq,
(void *)&(work_item[i]), (void *)&(work_item[i]),
...@@ -525,7 +527,6 @@ buf_mtflu_flush_work_items( ...@@ -525,7 +527,6 @@ buf_mtflu_flush_work_items(
node items areallocated */ node items areallocated */
work_heap = mem_heap_create(0); work_heap = mem_heap_create(0);
reply_heap = mem_heap_create(0); reply_heap = mem_heap_create(0);
memset(work_item, 0, sizeof(wrk_t)*MTFLUSH_MAX_WORKER);
for(i=0;i<buf_pool_inst; i++) { for(i=0;i<buf_pool_inst; i++) {
...@@ -537,6 +538,8 @@ buf_mtflu_flush_work_items( ...@@ -537,6 +538,8 @@ buf_mtflu_flush_work_items(
work_item[i].wi_status = WRK_ITEM_UNSET; work_item[i].wi_status = WRK_ITEM_UNSET;
work_item[i].wheap = work_heap; work_item[i].wheap = work_heap;
work_item[i].rheap = reply_heap; work_item[i].rheap = reply_heap;
work_item[i].n_flushed = 0;
work_item[i].id_usr = 0;
ib_wqueue_add(mtflush_ctx->wq, ib_wqueue_add(mtflush_ctx->wq,
(void *)(work_item + i), (void *)(work_item + i),
...@@ -551,7 +554,7 @@ buf_mtflu_flush_work_items( ...@@ -551,7 +554,7 @@ buf_mtflu_flush_work_items(
if (done_wi != NULL) { if (done_wi != NULL) {
per_pool_pages_flushed[i] = done_wi->n_flushed; per_pool_pages_flushed[i] = done_wi->n_flushed;
#if UNIV_DEBUG #ifdef UNIV_MTFLUSH_DEBUG
if((int)done_wi->id_usr == 0 && if((int)done_wi->id_usr == 0 &&
(done_wi->wi_status == WRK_ITEM_SET || (done_wi->wi_status == WRK_ITEM_SET ||
done_wi->wi_status == WRK_ITEM_UNSET)) { done_wi->wi_status == WRK_ITEM_UNSET)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment