From f1a288d8e1c48706572bf22c4b08561dff263050 Mon Sep 17 00:00:00 2001 From: Brandon Rodriguez <brodriguez8774@gmail.com> Date: Thu, 18 Feb 2021 17:35:39 -0500 Subject: [PATCH] Clean up load_balance_general file --- src/load_balance_general.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/src/load_balance_general.c b/src/load_balance_general.c index 6523c0c..e0f9a3b 100644 --- a/src/load_balance_general.c +++ b/src/load_balance_general.c @@ -114,9 +114,6 @@ void main_terminate_workers(thread_struct* thread_args_ptr) { // Send second termination message, because for some reason some workers seem to get stuck otherwise. for (index = 1; index < thread_args_ptr->total_processors; index++) { - // char* log_msg = calloc(256, sizeof(char)); - // sprintf(log_msg, "Sending termination message to process %i.", index); - // log(log_msg); free(log_msg); MPI_Isend(&index, 1, MPI_INT, index, tag_main_termination, MPI_COMM_WORLD, &request_var); } @@ -246,9 +243,6 @@ void worker_handle_request(thread_struct* thread_args_ptr) { // One or more messages present. Loop through and check for message from all processors. for (index = 1; index < thread_args_ptr->total_processors; index++) { - // char* msg = calloc(256, sizeof(char)); - // sprintf(msg, " Checking for request from process %i", index); - // log(msg); free(msg); msg_status_flag = 0; MPI_Iprobe(index, tag_work_request, MPI_COMM_WORLD, &msg_status_flag, MPI_STATUS_IGNORE); if (msg_status_flag == 1) { @@ -258,29 +252,18 @@ void worker_handle_request(thread_struct* thread_args_ptr) { MPI_Recv(&temp, 1, MPI_INT, index, tag_work_request, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // Check if donating worker has work to split and send. - if (thread_args_ptr->remaining_loads > 4) { + if (thread_args_ptr->remaining_loads > 9) { // Worker has at least one load to split and send. Do so. - // char* msg = calloc(256, sizeof(char)); - // sprintf(msg, " orig load: %i", thread_args_ptr->remaining_loads); - // log(msg); free(msg); msg = calloc(256, sizeof(char)); work_send_value = thread_args_ptr->remaining_loads / 2; - // sprintf(msg, " sending value: %i", work_send_value); - // log(msg); free(msg); msg = calloc(256, sizeof(char)); thread_args_ptr->remaining_loads = work_send_value + (thread_args_ptr->remaining_loads % 2); - // sprintf(msg, " new personal load value: %i", thread_args_ptr->remaining_loads); - // log(msg); free(msg); MPI_Isend(&work_send_value, 1, MPI_INT, index, tag_work_response, MPI_COMM_WORLD, &request_var); // Load has been split. Immediately attempt to update main processor of change. worker_send_status(thread_args_ptr->thread_num, thread_args_ptr->remaining_loads); worker_send_status(index, work_send_value); } else { - // Worker has exactly 0 or 1 loads. Not enough to send. Reject request instead. + // Not enough loads to split work. Reject request instead. work_send_value = -1; - // char* msg = calloc(256, sizeof(char)); - // sprintf(msg, " rejecting request with current load of %i", thread_args_ptr->remaining_loads); - // log(msg); - // free(msg); MPI_Isend(&work_send_value, 1, MPI_INT, index, tag_work_response, MPI_COMM_WORLD, &request_var); } -- GitLab