Skip to content
15 changes: 5 additions & 10 deletions cpp/libmps_parser/src/mps_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -544,35 +544,30 @@ std::vector<char> mps_parser_t<i_t, f_t>::file_to_string(const std::string& file
#endif // MPS_PARSER_WITH_ZLIB

// Faster than using C++ I/O
FILE* fp = fopen(file.c_str(), "r");
std::unique_ptr<FILE, FcloseDeleter> fp{fopen(file.c_str(), "r")};
mps_parser_expects(fp != nullptr,
error_type_t::ValidationError,
"Error opening MPS file! Given path: %s",
mps_file.c_str());

mps_parser_expects(fseek(fp, 0L, SEEK_END) == 0,
mps_parser_expects(fseek(fp.get(), 0L, SEEK_END) == 0,
error_type_t::ValidationError,
"File browsing MPS file! Given path: %s",
mps_file.c_str());
const long bufsize = ftell(fp);
const long bufsize = ftell(fp.get());
mps_parser_expects(bufsize != -1L,
error_type_t::ValidationError,
"File browsing MPS file! Given path: %s",
mps_file.c_str());
std::vector<char> buf(bufsize + 1);
rewind(fp);
rewind(fp.get());

mps_parser_expects(fread(buf.data(), sizeof(char), bufsize, fp) == bufsize,
mps_parser_expects(fread(buf.data(), sizeof(char), bufsize, fp.get()) == bufsize,
error_type_t::ValidationError,
"Error reading MPS file! Given path: %s",
mps_file.c_str());
buf[bufsize] = '\0';

mps_parser_expects(fclose(fp) == 0,
error_type_t::ValidationError,
"Error closing MPS file! Given path: %s",
mps_file.c_str());

return buf;
}

Expand Down
52 changes: 0 additions & 52 deletions cpp/src/dual_simplex/presolve.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,58 +50,6 @@ struct lp_problem_t {
f_t obj_scale; // 1.0 for min, -1.0 for max
bool objective_is_integral{false};

void write_problem(const std::string& path) const
{
FILE* fid = fopen(path.c_str(), "w");
if (fid) {
fwrite(&num_rows, sizeof(i_t), 1, fid);
fwrite(&num_cols, sizeof(i_t), 1, fid);
fwrite(&obj_constant, sizeof(f_t), 1, fid);
fwrite(&obj_scale, sizeof(f_t), 1, fid);
i_t is_integral = objective_is_integral ? 1 : 0;
fwrite(&is_integral, sizeof(i_t), 1, fid);
fwrite(objective.data(), sizeof(f_t), num_cols, fid);
fwrite(rhs.data(), sizeof(f_t), num_rows, fid);
fwrite(lower.data(), sizeof(f_t), num_cols, fid);
fwrite(upper.data(), sizeof(f_t), num_cols, fid);
fwrite(A.col_start.data(), sizeof(i_t), A.col_start.size(), fid);
fwrite(A.i.data(), sizeof(i_t), A.i.size(), fid);
fwrite(A.x.data(), sizeof(f_t), A.x.size(), fid);
fclose(fid);
}
}

void read_problem(const std::string& path)
{
FILE* fid = fopen(path.c_str(), "r");
if (fid) {
fread(&num_rows, sizeof(i_t), 1, fid);
fread(&num_cols, sizeof(i_t), 1, fid);
fread(&obj_constant, sizeof(f_t), 1, fid);
fread(&obj_scale, sizeof(f_t), 1, fid);
i_t is_integral;
fread(&is_integral, sizeof(i_t), 1, fid);
objective_is_integral = is_integral == 1;
objective.resize(num_cols);
fread(objective.data(), sizeof(f_t), num_cols, fid);
rhs.resize(num_rows);
fread(rhs.data(), sizeof(f_t), num_rows, fid);
lower.resize(num_cols);
fread(lower.data(), sizeof(f_t), num_cols, fid);
upper.resize(num_cols);
fread(upper.data(), sizeof(f_t), num_cols, fid);
A.n = num_cols;
A.m = num_rows;
A.col_start.resize(num_cols + 1);
fread(A.col_start.data(), sizeof(i_t), num_cols + 1, fid);
A.i.resize(A.col_start[num_cols]);
fread(A.i.data(), sizeof(i_t), A.i.size(), fid);
A.x.resize(A.i.size());
fread(A.x.data(), sizeof(f_t), A.x.size(), fid);
fclose(fid);
}
}

void write_mps(const std::string& path) const
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove this too

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you mean write_mps function ?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

│ write_mps (lines 74-127) │ Used behind #ifdef debug flags in branch_and_bound.cpp — not dead, but debug-only │

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But there is │ write_problem (lines 53-72) │ Dead — defined but never called anywhere in the codebase │

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, this is dead code that we can delete

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, @chris-maes mentioned this is used for debugging. So I am leaving it as it is, but removing write_problem.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rg20 yes lp_problem_t::write_mps is useful for debugging inside of branch and bound. Since it is not easy to convert an lp_problem_t to a optimization_problem_t and use the other MPS writer.

{
std::ofstream mps_file(path);
Expand Down
12 changes: 6 additions & 6 deletions cpp/src/grpc/server/grpc_server_main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,16 +189,16 @@ int main(int argc, char** argv)

ensure_log_dir_exists();

shm_unlink(SHM_JOB_QUEUE);
shm_unlink(SHM_RESULT_QUEUE);
shm_unlink(SHM_CONTROL);
shm_unlink(SHM_JOB_QUEUE.c_str());
shm_unlink(SHM_RESULT_QUEUE.c_str());
shm_unlink(SHM_CONTROL.c_str());

job_queue = static_cast<JobQueueEntry*>(
create_shared_memory(SHM_JOB_QUEUE, sizeof(JobQueueEntry) * MAX_JOBS));
create_shared_memory(SHM_JOB_QUEUE.c_str(), sizeof(JobQueueEntry) * MAX_JOBS));
result_queue = static_cast<ResultQueueEntry*>(
create_shared_memory(SHM_RESULT_QUEUE, sizeof(ResultQueueEntry) * MAX_RESULTS));
create_shared_memory(SHM_RESULT_QUEUE.c_str(), sizeof(ResultQueueEntry) * MAX_RESULTS));
shm_ctrl = static_cast<SharedMemoryControl*>(
create_shared_memory(SHM_CONTROL, sizeof(SharedMemoryControl)));
create_shared_memory(SHM_CONTROL.c_str(), sizeof(SharedMemoryControl)));
new (shm_ctrl) SharedMemoryControl{};

for (size_t i = 0; i < MAX_JOBS; ++i) {
Expand Down
13 changes: 10 additions & 3 deletions cpp/src/grpc/server/grpc_server_types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -255,9 +255,16 @@ inline std::map<std::string, ChunkedUploadState> chunked_uploads;
inline std::mutex chunked_downloads_mutex;
inline std::map<std::string, ChunkedDownloadState> chunked_downloads;

inline const char* SHM_JOB_QUEUE = "/cuopt_job_queue";
inline const char* SHM_RESULT_QUEUE = "/cuopt_result_queue";
inline const char* SHM_CONTROL = "/cuopt_control";
// Shared memory names include PID to prevent local users from accessing
// segments belonging to other server instances on the same host.
inline std::string make_shm_name(const char* base)
{
return std::string(base) + "_" + std::to_string(getpid());
}

inline std::string SHM_JOB_QUEUE = make_shm_name("/cuopt_job_queue");
inline std::string SHM_RESULT_QUEUE = make_shm_name("/cuopt_result_queue");
inline std::string SHM_CONTROL = make_shm_name("/cuopt_control");

inline const std::string LOG_DIR = "/tmp/cuopt_logs";

Expand Down
6 changes: 3 additions & 3 deletions cpp/src/grpc/server/grpc_worker_infra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@ void cleanup_shared_memory()
{
if (job_queue) {
munmap(job_queue, sizeof(JobQueueEntry) * MAX_JOBS);
shm_unlink(SHM_JOB_QUEUE);
shm_unlink(SHM_JOB_QUEUE.c_str());
}
if (result_queue) {
munmap(result_queue, sizeof(ResultQueueEntry) * MAX_RESULTS);
shm_unlink(SHM_RESULT_QUEUE);
shm_unlink(SHM_RESULT_QUEUE.c_str());
}
if (shm_ctrl) {
munmap(shm_ctrl, sizeof(SharedMemoryControl));
shm_unlink(SHM_CONTROL);
shm_unlink(SHM_CONTROL.c_str());
}
}

Expand Down
123 changes: 65 additions & 58 deletions cpp/src/mip_heuristics/feasibility_jump/feasibility_jump.cu
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,9 @@ void fj_t<i_t, f_t>::run_step_device(const rmm::cuda_stream_view& climber_stream
data.cub_storage_bytes.resize(compaction_temp_storage_bytes, climber_stream);
}

if (use_graph) { cudaStreamBeginCapture(climber_stream, cudaStreamCaptureModeThreadLocal); }
if (use_graph) {
RAFT_CUDA_TRY(cudaStreamBeginCapture(climber_stream, cudaStreamCaptureModeThreadLocal));
}
for (i_t i = 0; i < (use_graph ? iterations_per_graph : 1); ++i) {
{
// related varialbe array has to be dynamically computed each iteration
Expand All @@ -719,52 +721,52 @@ void fj_t<i_t, f_t>::run_step_device(const rmm::cuda_stream_view& climber_stream
load_balancing_score_update(climber_stream, climber_idx);
} else {
if (is_binary_pb) {
cudaLaunchCooperativeKernel(
RAFT_CUDA_TRY(cudaLaunchCooperativeKernel(
(void*)compute_mtm_moves_kernel<i_t, f_t, MTMMoveType::FJ_MTM_VIOLATED, true>,
grid_resetmoves_bin,
blocks_resetmoves_bin,
reset_moves_args,
0,
climber_stream);
climber_stream));
} else {
cudaLaunchCooperativeKernel(
RAFT_CUDA_TRY(cudaLaunchCooperativeKernel(
(void*)compute_mtm_moves_kernel<i_t, f_t, MTMMoveType::FJ_MTM_VIOLATED, false>,
grid_resetmoves,
blocks_resetmoves,
reset_moves_args,
0,
climber_stream);
climber_stream));
}
}
#if FJ_DEBUG_LOAD_BALANCING
if (use_load_balancing) {
cudaLaunchCooperativeKernel((void*)compute_mtm_moves_kernel<i_t, f_t>,
grid_resetmoves_bin,
blocks_resetmoves_bin,
reset_moves_args,
0,
climber_stream);
cudaLaunchCooperativeKernel((void*)load_balancing_sanity_checks<i_t, f_t>,
512,
128,
kernel_args,
0,
climber_stream);
RAFT_CUDA_TRY(cudaLaunchCooperativeKernel((void*)compute_mtm_moves_kernel<i_t, f_t>,
grid_resetmoves_bin,
blocks_resetmoves_bin,
reset_moves_args,
0,
climber_stream));
RAFT_CUDA_TRY(cudaLaunchCooperativeKernel((void*)load_balancing_sanity_checks<i_t, f_t>,
512,
128,
kernel_args,
0,
climber_stream));
}
#endif

cudaLaunchKernel((void*)update_lift_moves_kernel<i_t, f_t>,
grid_lift_move,
blocks_lift_move,
kernel_args,
0,
climber_stream);
cudaLaunchKernel((void*)update_breakthrough_moves_kernel<i_t, f_t>,
grid_lift_move,
blocks_lift_move,
kernel_args,
0,
climber_stream);
RAFT_CUDA_TRY(cudaLaunchKernel((void*)update_lift_moves_kernel<i_t, f_t>,
grid_lift_move,
blocks_lift_move,
kernel_args,
0,
climber_stream));
RAFT_CUDA_TRY(cudaLaunchKernel((void*)update_breakthrough_moves_kernel<i_t, f_t>,
grid_lift_move,
blocks_lift_move,
kernel_args,
0,
climber_stream));
}

// compaction kernel
Expand All @@ -777,44 +779,49 @@ void fj_t<i_t, f_t>::run_step_device(const rmm::cuda_stream_view& climber_stream
pb_ptr->n_variables,
climber_stream);

cudaLaunchKernel((void*)select_variable_kernel<i_t, f_t>,
dim3(1),
dim3(256),
kernel_args,
0,
climber_stream);

cudaLaunchCooperativeKernel((void*)handle_local_minimum_kernel<i_t, f_t>,
grid_update_weights,
blocks_update_weights,
kernel_args,
0,
climber_stream);
RAFT_CUDA_TRY(cudaLaunchKernel((void*)select_variable_kernel<i_t, f_t>,
dim3(1),
dim3(256),
kernel_args,
0,
climber_stream));

RAFT_CUDA_TRY(cudaLaunchCooperativeKernel((void*)handle_local_minimum_kernel<i_t, f_t>,
grid_update_weights,
blocks_update_weights,
kernel_args,
0,
climber_stream));
raft::copy(data.break_condition.data(), data.temp_break_condition.data(), 1, climber_stream);
cudaLaunchKernel((void*)update_assignment_kernel<i_t, f_t>,
grid_setval,
blocks_setval,
update_assignment_args,
0,
climber_stream);
cudaLaunchKernel((void*)update_changed_constraints_kernel<i_t, f_t>,
1,
blocks_update_changed_constraints,
kernel_args,
0,
climber_stream);
RAFT_CUDA_TRY(cudaLaunchKernel((void*)update_assignment_kernel<i_t, f_t>,
grid_setval,
blocks_setval,
update_assignment_args,
0,
climber_stream));
RAFT_CUDA_TRY(cudaLaunchKernel((void*)update_changed_constraints_kernel<i_t, f_t>,
1,
blocks_update_changed_constraints,
kernel_args,
0,
climber_stream));
}

if (use_graph) {
cudaStreamEndCapture(climber_stream, &graph);
cudaGraphInstantiate(&graph_instance, graph);
RAFT_CUDA_TRY(cudaStreamEndCapture(climber_stream, &graph));
try {
RAFT_CUDA_TRY(cudaGraphInstantiate(&graph_instance, graph));
} catch (...) {
RAFT_CUDA_TRY(cudaGraphDestroy(graph));
throw;
}
RAFT_CHECK_CUDA(climber_stream);
cudaGraphDestroy(graph);
RAFT_CUDA_TRY(cudaGraphDestroy(graph));
graph_created = true;
Comment thread
coderabbitai[bot] marked this conversation as resolved.
}
}

if (use_graph) cudaGraphLaunch(graph_instance, climber_stream);
if (use_graph) RAFT_CUDA_TRY(cudaGraphLaunch(graph_instance, climber_stream));
}

template <typename i_t, typename f_t>
Expand Down
Loading
Loading