-
Notifications
You must be signed in to change notification settings - Fork 251
Expand file tree
/
Copy pathgraph_cli_parser.cpp
More file actions
178 lines (163 loc) · 7.91 KB
/
graph_cli_parser.cpp
File metadata and controls
178 lines (163 loc) · 7.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
//*****************************************************************************
// Copyright 2025 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "graph_cli_parser.hpp"
#include <algorithm>
#include <iostream>
#include <optional>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>
#include "../capi_frontend/server_settings.hpp"
#include "../ovms_exit_codes.hpp"
#include "../status.hpp"
#include "graph_queue_cli_options.hpp"
namespace ovms {
TextGenGraphSettingsImpl& GraphCLIParser::defaultGraphSettings() {
static TextGenGraphSettingsImpl instance;
return instance;
}
void GraphCLIParser::createOptions() {
this->options = std::make_unique<cxxopts::Options>("ovms --pull [PULL OPTIONS ... ]", "--pull --task text_generation graph options");
options->allow_unrecognised_options();
// clang-format off
options->add_options("text generation")
("max_num_seqs",
"The maximum number of sequences that can be processed together. Default 256.",
cxxopts::value<uint32_t>()->default_value("256"),
"MAX_NUM_SEQS")
("pipeline_type",
"Type of the pipeline to be used: Choices LM, LM_CB, VLM, VLM_CB, AUTO. AUTO is used by default.",
cxxopts::value<std::string>(),
"PIPELINE_TYPE")
("enable_prefix_caching",
"This algorithm is used to cache the prompt tokens.",
cxxopts::value<std::string>()->default_value("true"),
"ENABLE_PREFIX_CACHING")
("max_num_batched_tokens",
"empty or integer. The maximum number of tokens that can be batched together.",
cxxopts::value<uint32_t>(),
"MAX_NUM_BATCHED_TOKENS")
("cache_size",
"KV cache size in GB, default is 0 which mean dynamic allocation.",
cxxopts::value<uint32_t>()->default_value("0"),
"CACHE_SIZE")
("draft_source_model",
"HF model name or path to the local folder with PyTorch or OpenVINO draft model.",
cxxopts::value<std::string>(),
"DRAFT_SOURCE_MODEL")
("dynamic_split_fuse",
"Dynamic split fuse algorithm enabled. Default true.",
cxxopts::value<std::string>()->default_value("true"),
"DYNAMIC_SPLIT_FUSE")
("reasoning_parser",
"Reasoning parser",
cxxopts::value<std::string>(),
"REASONING_PARSER")
("tool_parser",
"Tool parser",
cxxopts::value<std::string>(),
"TOOL_PARSER")
("enable_tool_guided_generation",
"Enables enforcing tool schema during generation. Requires setting tool parser. Default: false.",
cxxopts::value<std::string>()->default_value("false"),
"ENABLE_TOOL_GUIDED_GENERATION");
addGraphQueueOptions(*options, "plugin config");
options->add_options("plugin config")
("max_prompt_len",
"Sets NPU specific property for maximum number of tokens in the prompt.",
cxxopts::value<uint32_t>(),
"MAX_PROMPT_LEN")
("kv_cache_precision",
"u8 or empty (model default). Reduced kv cache precision to u8 lowers the cache size consumption.",
cxxopts::value<std::string>()->default_value(""),
"KV_CACHE_PRECISION")
("model_distribution_policy",
"TENSOR_PARALLEL, PIPELINE_PARALLEL or empty (model default). Sets model distribution policy for inference with multiple sockets/devices.",
cxxopts::value<std::string>(),
"MODEL_DISTRIBUTION_POLICY");
}
void GraphCLIParser::printHelp() {
if (!this->options) {
this->createOptions();
}
std::cout << options->help({"text generation", "plugin config"}) << std::endl;
}
std::vector<std::string> GraphCLIParser::parse(const std::vector<std::string>& unmatchedOptions) {
if (!this->options) {
this->createOptions();
}
std::vector<const char*> cStrArray;
cStrArray.reserve(unmatchedOptions.size() + 1);
cStrArray.push_back("ovms graph");
std::transform(unmatchedOptions.begin(), unmatchedOptions.end(), std::back_inserter(cStrArray), [](const std::string& str) { return str.c_str(); });
const char* const* args = cStrArray.data();
result = std::make_unique<cxxopts::ParseResult>(options->parse(cStrArray.size(), args));
return result->unmatched();
}
void GraphCLIParser::prepare(OvmsServerMode serverMode, HFSettingsImpl& hfSettings, const std::string& modelName) {
TextGenGraphSettingsImpl graphSettings = GraphCLIParser::defaultGraphSettings();
hfSettings.exportSettings.targetDevice = hfSettings.exportSettings.targetDevice;
// Deduct model name
if (modelName != "") {
hfSettings.exportSettings.modelName = modelName;
} else {
hfSettings.exportSettings.modelName = hfSettings.sourceModel;
}
if (nullptr == result) {
// Pull with default arguments - no arguments from user
if (serverMode != HF_PULL_MODE && serverMode != HF_PULL_AND_START_MODE) {
throw std::logic_error("Tried to prepare server and model settings without graph parse result");
}
} else {
graphSettings.maxNumSeqs = result->operator[]("max_num_seqs").as<uint32_t>();
graphSettings.enablePrefixCaching = result->operator[]("enable_prefix_caching").as<std::string>();
if (graphSettings.enablePrefixCaching == "true" && hfSettings.exportSettings.targetDevice == "NPU") {
hfSettings.exportSettings.pluginConfig.useNpuPrefixCaching = true;
}
graphSettings.cacheSize = result->operator[]("cache_size").as<uint32_t>();
graphSettings.dynamicSplitFuse = result->operator[]("dynamic_split_fuse").as<std::string>();
if (result->count("draft_source_model")) {
graphSettings.draftModelDirName = result->operator[]("draft_source_model").as<std::string>();
}
if (result->count("pipeline_type")) {
graphSettings.pipelineType = result->operator[]("pipeline_type").as<std::string>();
}
if (result->count("max_num_batched_tokens")) {
graphSettings.maxNumBatchedTokens = result->operator[]("max_num_batched_tokens").as<uint32_t>();
}
if (result->count("reasoning_parser")) {
graphSettings.reasoningParser = result->operator[]("reasoning_parser").as<std::string>();
}
if (result->count("tool_parser")) {
graphSettings.toolParser = result->operator[]("tool_parser").as<std::string>();
}
graphSettings.enableToolGuidedGeneration = result->operator[]("enable_tool_guided_generation").as<std::string>();
// Plugin configuration
if (result->count("max_prompt_len")) {
hfSettings.exportSettings.pluginConfig.maxPromptLength = result->operator[]("max_prompt_len").as<uint32_t>();
}
if (result->count("model_distribution_policy")) {
hfSettings.exportSettings.pluginConfig.modelDistributionPolicy = result->operator[]("model_distribution_policy").as<std::string>();
}
if (result->count("kv_cache_precision")) {
hfSettings.exportSettings.pluginConfig.kvCachePrecision = result->operator[]("kv_cache_precision").as<std::string>();
}
extractGraphQueueOptions(*result, hfSettings);
}
hfSettings.graphSettings = std::move(graphSettings);
}
} // namespace ovms