Skip to content

refactor codebase to leverage C++17, enhance cmakelists, and improve code readability #2091

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 9 commits into from
3 changes: 2 additions & 1 deletion engine/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -174,6 +174,7 @@ file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/cortex_openapi.h"

add_executable(${TARGET_NAME} main.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/cpuid/cpu_info.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/hardware/gguf/ggml.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/file_logger.cc

${CMAKE_CURRENT_SOURCE_DIR}/extensions/template_renderer.cc
@@ -204,7 +205,7 @@ if(CMAKE_CXX_STANDARD LESS 17)
find_package(Boost 1.61.0 REQUIRED)
target_include_directories(${TARGET_NAME} PRIVATE ${Boost_INCLUDE_DIRS})
else()
message(STATUS "use c++17")
message(STATUS "use c++${CMAKE_CXX_STANDARD}")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is not needed.

endif()

aux_source_directory(controllers CTL_SRC)
1 change: 1 addition & 0 deletions engine/cli/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -61,6 +61,7 @@ find_package(lfreist-hwinfo CONFIG REQUIRED)

add_executable(${TARGET_NAME} main.cc
${CMAKE_CURRENT_SOURCE_DIR}/../utils/cpuid/cpu_info.cc
${CMAKE_CURRENT_SOURCE_DIR}/../utils/hardware/gguf/ggml.cc
${CMAKE_CURRENT_SOURCE_DIR}/../utils/normalize_engine.cc
${CMAKE_CURRENT_SOURCE_DIR}/../utils/file_logger.cc
${CMAKE_CURRENT_SOURCE_DIR}/../utils/dylib_path_manager.cc
8 changes: 6 additions & 2 deletions engine/cli/command_line_parser.cc
Original file line number Diff line number Diff line change
@@ -53,7 +53,7 @@ CommandLineParser::CommandLineParser()
engine_service_{std::make_shared<EngineService>(
download_service_, dylib_path_manager_, db_service_)} {}

bool CommandLineParser::SetupCommand(int argc, char** argv) {
bool CommandLineParser::SetupCommand() {
app_.usage("Usage:\n" + commands::GetCortexBinary() +
" [options] [subcommand]");
cml_data_.config = file_manager_utils::GetCortexConfig();
@@ -90,6 +90,10 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) {
};
app_.add_flag_function("-v,--version", cb, "Get Cortex version");

return true;
}

bool CommandLineParser::runCommand(int argc, char** argv) {
CLI11_PARSE(app_, argc, argv);
if (argc == 1) {
CLI_LOG(app_.help());
@@ -138,7 +142,7 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) {
void CommandLineParser::SetupCommonCommands() {
auto model_pull_cmd = app_.add_subcommand(
"pull",
"Download models by HuggingFace Repo/ModelID"
"Download models by HuggingFace Repo/ModelID\n"
"See built-in models: https://huggingface.co/cortexso");
model_pull_cmd->group(kCommonCommandsGroup);
model_pull_cmd->usage("Usage:\n" + commands::GetCortexBinary() +
5 changes: 3 additions & 2 deletions engine/cli/command_line_parser.h
Original file line number Diff line number Diff line change
@@ -10,7 +10,8 @@
class CommandLineParser {
public:
CommandLineParser();
bool SetupCommand(int argc, char** argv);
bool SetupCommand();
bool runCommand(int argc, char** argv);

private:
void SetupCommonCommands();
@@ -63,6 +64,6 @@ class CommandLineParser {
CmlData cml_data_;
std::unordered_map<std::string, std::string> config_update_opts_;
bool executed_ = false;
commands::HarwareOptions hw_opts_;
commands::HardwareQueryFlags hw_opts_;
std::unordered_map<std::string, std::string> run_settings_;
};
254 changes: 126 additions & 128 deletions engine/cli/commands/hardware_list_cmd.cc
Original file line number Diff line number Diff line change
@@ -9,15 +9,16 @@
#include "utils/logging_utils.h"
// clang-format off
#include <tabulate/table.hpp>
#include<numeric>
// clang-format on

namespace commands {
using namespace tabulate;
using Row_t =
std::vector<variant<std::string, const char*, string_view, Table>>;
using Row_t = std::vector<
variant<std::string, const char*, string_view, tabulate::Table>>;

bool HardwareListCmd::Exec(const std::string& host, int port,
const std::optional<HarwareOptions>& ho) {
bool HardwareListCmd::Exec(
const std::string& host, int port,
const std::optional<HardwareQueryFlags>& query_flags) {
// Start server if server is not started yet
if (!commands::IsServerAlive(host, port)) {
CLI_LOG("Starting server ...");
@@ -33,149 +34,146 @@ bool HardwareListCmd::Exec(const std::string& host, int port,
/* .pathParams = */ {"v1", "hardware"},
/* .queries = */ {},
};
auto result = curl_utils::SimpleGetJson(url.ToFullPath());
if (result.has_error()) {
CTL_ERR(result.error());

auto hardware_json_response = curl_utils::SimpleGetJson(url.ToFullPath());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a variable name change?

if (hardware_json_response.has_error()) {
CTL_ERR(hardware_json_response.error());
return false;
}

if (!ho.has_value() || ho.value().show_cpu) {
// CPU Section
if (!query_flags.has_value() || query_flags.value().show_cpu) {
std::cout << "CPU Information:" << std::endl;
Table table;
std::vector<std::string> column_headers{"#", "Arch", "Cores",
"Model", "Usage", "Instructions"};

Row_t header{column_headers.begin(), column_headers.end()};
table.add_row(header);
table.format().font_color(Color::green);
std::vector<std::string> row = {"1"};
cortex::hw::CPU cpu = cortex::hw::cpu::FromJson(result.value()["cpu"]);
row.emplace_back(cpu.arch);
row.emplace_back(std::to_string(cpu.cores));
row.emplace_back(cpu.model);
row.emplace_back(std::to_string(cpu.usage));
std::string insts;
for (auto const& i : cpu.instructions) {
insts += i + " ";
};
row.emplace_back(insts);
table.add_row({row.begin(), row.end()});
std::cout << table << std::endl;
std::cout << std::endl;
tabulate::Table cpu_table;
cpu_table.add_row(Row_t(CPU_INFO_HEADERS.begin(), CPU_INFO_HEADERS.end()));
cpu_table.format()
.font_style({tabulate::FontStyle::bold})
.font_align(tabulate::FontAlign::center)
.padding_left(1)
.padding_right(1);

cortex::hw::CPU cpu =
cortex::hw::cpu::FromJson(hardware_json_response.value()["cpu"]);
std::vector<std::string> cpu_row = {
"1",
cpu.arch,
std::to_string(cpu.cores),
cpu.model,
std::to_string(cpu.usage),
std::accumulate(cpu.instructions.begin(), cpu.instructions.end(),
std::string{},
[](const std::string& a, const std::string& b) {
return a + (a.empty() ? "" : " ") + b;
})};
cpu_table.add_row(Row_t(cpu_row.begin(), cpu_row.end()));
std::cout << cpu_table << std::endl << std::endl;
}

if (!ho.has_value() || ho.value().show_os) {
// OS Section
if (!query_flags.has_value() || query_flags.value().show_os) {
std::cout << "OS Information:" << std::endl;
Table table;
std::vector<std::string> column_headers{"#", "Version", "Name"};

Row_t header{column_headers.begin(), column_headers.end()};
table.add_row(header);
table.format().font_color(Color::green);
std::vector<std::string> row = {"1"};
cortex::hw::OS os = cortex::hw::os::FromJson(result.value()["os"]);
row.emplace_back(os.version);
row.emplace_back(os.name);
table.add_row({row.begin(), row.end()});
std::cout << table << std::endl;
std::cout << std::endl;
tabulate::Table os_table;
os_table.add_row(Row_t(OS_INFO_HEADERS.begin(), OS_INFO_HEADERS.end()));
os_table.format()
.font_style({tabulate::FontStyle::bold})
.font_align(tabulate::FontAlign::center)
.padding_left(1)
.padding_right(1);

cortex::hw::OS os =
cortex::hw::os::FromJson(hardware_json_response.value()["os"]);
std::vector<std::string> os_row = {"1", os.version, os.name};
os_table.add_row(Row_t(os_row.begin(), os_row.end()));
std::cout << os_table << std::endl << std::endl;
}

if (!ho.has_value() || ho.value().show_ram) {
// RAM Section
if (!query_flags.has_value() || query_flags.value().show_ram) {
std::cout << "RAM Information:" << std::endl;
Table table;
std::vector<std::string> column_headers{"#", "Total (MiB)",
"Available (MiB)"};

Row_t header{column_headers.begin(), column_headers.end()};
table.add_row(header);
table.format().font_color(Color::green);
std::vector<std::string> row = {"1"};
cortex::hw::Memory m = cortex::hw::memory::FromJson(result.value()["ram"]);
row.emplace_back(std::to_string(m.total_MiB));
row.emplace_back(std::to_string(m.available_MiB));
table.add_row({row.begin(), row.end()});
std::cout << table << std::endl;
std::cout << std::endl;
tabulate::Table ram_table;
ram_table.add_row(Row_t(RAM_INFO_HEADERS.begin(), RAM_INFO_HEADERS.end()));
ram_table.format()
.font_style({tabulate::FontStyle::bold})
.font_align(tabulate::FontAlign::center)
.padding_left(1)
.padding_right(1);

cortex::hw::Memory ram =
cortex::hw::memory::FromJson(hardware_json_response.value()["ram"]);
std::vector<std::string> ram_row = {"1", std::to_string(ram.total_MiB),
std::to_string(ram.available_MiB)};
ram_table.add_row(Row_t(ram_row.begin(), ram_row.end()));
std::cout << ram_table << std::endl << std::endl;
}

if (!ho.has_value() || ho.value().show_gpu) {
// GPU Section
if (!query_flags.has_value() || query_flags.value().show_gpu) {
std::cout << "GPU Information:" << std::endl;
Table table;
std::vector<std::string> column_headers{"#",
"GPU ID",
"Name",
"Version",
"Total (MiB)",
"Available (MiB)",
"Driver Version",
"Compute Capability",
"Activated"};

Row_t header{column_headers.begin(), column_headers.end()};
table.add_row(header);
table.format().font_color(Color::green);
int count = 1;
tabulate::Table gpu_table;
gpu_table.add_row(Row_t(GPU_INFO_HEADERS.begin(), GPU_INFO_HEADERS.end()));
gpu_table.format()
.font_style({tabulate::FontStyle::bold})
.font_align(tabulate::FontAlign::center)
.padding_left(1)
.padding_right(1);

std::vector<cortex::hw::GPU> gpus =
cortex::hw::gpu::FromJson(result.value()["gpus"]);
for (auto const& gpu : gpus) {
std::vector<std::string> row = {std::to_string(count)};
row.emplace_back(gpu.id);
row.emplace_back(gpu.name);
row.emplace_back(gpu.version);
row.emplace_back(std::to_string(gpu.total_vram));
row.emplace_back(std::to_string(gpu.free_vram));
row.emplace_back(
std::get<cortex::hw::NvidiaAddInfo>(gpu.add_info).driver_version);
row.emplace_back(
std::get<cortex::hw::NvidiaAddInfo>(gpu.add_info).compute_cap);
row.emplace_back(gpu.is_activated ? "Yes" : "No");
table.add_row({row.begin(), row.end()});
count++;
cortex::hw::gpu::FromJson(hardware_json_response.value()["gpus"]);
int gpu_index = 1;
for (const auto& gpu : gpus) {
std::vector<std::string> gpu_row = {
std::to_string(gpu_index),
gpu.id,
gpu.name,
gpu.version,
std::to_string(gpu.total_vram),
std::to_string(gpu.free_vram),
std::get<cortex::hw::NvidiaAddInfo>(gpu.add_info).driver_version,
std::get<cortex::hw::NvidiaAddInfo>(gpu.add_info).compute_cap,
gpu.is_activated ? "Yes" : "No"};
gpu_table.add_row(Row_t(gpu_row.begin(), gpu_row.end()));
gpu_index++;
}

std::cout << table << std::endl;
std::cout << std::endl;
std::cout << gpu_table << std::endl << std::endl;
}

if (!ho.has_value() || ho.value().show_storage) {

// Storage Section
if (!query_flags.has_value() || query_flags.value().show_storage) {
std::cout << "Storage Information:" << std::endl;
Table table;
std::vector<std::string> column_headers{"#", "Total (GiB)",
"Available (GiB)"};

Row_t header{column_headers.begin(), column_headers.end()};
table.add_row(header);
table.format().font_color(Color::green);
std::vector<std::string> row = {"1"};
cortex::hw::StorageInfo si =
cortex::hw::storage::FromJson(result.value()["storage"]);
row.emplace_back(std::to_string(si.total));
row.emplace_back(std::to_string(si.available));
table.add_row({row.begin(), row.end()});
std::cout << table << std::endl;
std::cout << std::endl;
tabulate::Table storage_table;
storage_table.add_row(Row_t(STORAGE_INFO_HEADERS.begin(), STORAGE_INFO_HEADERS.end()));
storage_table.format()
.font_style({tabulate::FontStyle::bold})
.font_align(tabulate::FontAlign::center)
.padding_left(1)
.padding_right(1);

cortex::hw::StorageInfo storage = cortex::hw::storage::FromJson(
hardware_json_response.value()["storage"]);
std::vector<std::string> storage_row = {"1", std::to_string(storage.total),
std::to_string(storage.available)};
storage_table.add_row(Row_t(storage_row.begin(), storage_row.end()));
std::cout << storage_table << std::endl << std::endl;
}

if (!ho.has_value() || ho.value().show_power) {

// Power Section
if (!query_flags.has_value() || query_flags.value().show_power) {
std::cout << "Power Information:" << std::endl;
Table table;
std::vector<std::string> column_headers{"#", "Battery Life",
"Charging Status", "Power Saving"};

Row_t header{column_headers.begin(), column_headers.end()};
table.add_row(header);
table.format().font_color(Color::green);
std::vector<std::string> row = {"1"};
cortex::hw::PowerInfo pi =
cortex::hw::power::FromJson(result.value()["power"]);
row.emplace_back(std::to_string(pi.battery_life));
row.emplace_back(pi.charging_status);
row.emplace_back(pi.is_power_saving ? "Yes" : "No");
table.add_row({row.begin(), row.end()});
std::cout << table << std::endl;
std::cout << std::endl;
tabulate::Table power_table;
power_table.add_row(Row_t(POWER_INFO_HEADERS.begin(), POWER_INFO_HEADERS.end()));
power_table.format()
.font_style({tabulate::FontStyle::bold})
.font_align(tabulate::FontAlign::center)
.padding_left(1)
.padding_right(1);

cortex::hw::PowerInfo power =
cortex::hw::power::FromJson(hardware_json_response.value()["power"]);
std::vector<std::string> power_row = {
"1", std::to_string(power.battery_life), power.charging_status,
power.is_power_saving ? "Yes" : "No"};
power_table.add_row(Row_t(power_row.begin(), power_row.end()));
std::cout << power_table << std::endl << std::endl;
}

return true;
Loading