forked from modular/modular
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path__init__.py
More file actions
128 lines (124 loc) · 3.48 KB
/
__init__.py
File metadata and controls
128 lines (124 loc) · 3.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2025, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Types to interface with ML pipelines such as text/token generation."""
from .architectures import register_all_models
from .core import (
AudioGenerationResponse,
EmbeddingsGenerator,
EmbeddingsResponse,
InputContext,
LogProbabilities,
PipelinesFactory,
PipelineTask,
TextAndVisionContext,
TextContext,
TextGenerationResponse,
TextGenerationStatus,
TextResponse,
TokenGenerator,
TokenGeneratorContext,
TokenGeneratorRequest,
TokenGeneratorRequestFunction,
TokenGeneratorRequestMessage,
TokenGeneratorRequestTool,
TokenGeneratorResponseFormat,
)
from .lib.config import AudioGenerationConfig, PipelineConfig
from .lib.config_enums import (
PipelineEngine,
PipelineRole,
RepoType,
RopeType,
SupportedEncoding,
)
from .lib.embeddings_pipeline import EmbeddingsPipeline
from .lib.hf_utils import (
HuggingFaceFile,
download_weight_files,
repo_exists_with_retry,
)
from .lib.max_config import (
KVCacheConfig,
ProfilingConfig,
SamplingConfig,
)
from .lib.memory_estimation import MEMORY_ESTIMATOR
from .lib.model_config import MAXModelConfig
from .lib.pipeline import (
ModelInputs,
ModelOutputs,
PipelineModel,
TextGenerationPipeline,
upper_bounded_default,
)
from .lib.registry import PIPELINE_REGISTRY, SupportedArchitecture
from .lib.speculative_decoding import SpeculativeDecodingTextGenerationPipeline
from .lib.tokenizer import (
IdentityPipelineTokenizer,
PipelineTokenizer,
PreTrainedPipelineTokenizer,
TextAndVisionTokenizer,
TextTokenizer,
)
# Hydrate the registry.
register_all_models()
__all__ = [
"AudioGenerationConfig",
"download_weight_files",
"AudioGenerationResponse",
"EmbeddingsGenerator",
"EmbeddingsPipeline",
"EmbeddingsResponse",
"HuggingFaceFile",
"IdentityPipelineTokenizer",
"InputContext",
"KVCacheConfig",
"LogProbabilities",
"MAXModelConfig",
"MEMORY_ESTIMATOR",
"ModelInputs",
"ModelOutputs",
"PIPELINE_REGISTRY",
"PipelineConfig",
"PipelineEngine",
"PipelineRole",
"PipelineModel",
"PipelinesFactory",
"PipelineTask",
"PipelineTokenizer",
"PreTrainedPipelineTokenizer",
"ProfilingConfig",
"repo_exists_with_retry",
"RepoType",
"RopeType",
"SamplingConfig",
"SpeculativeDecodingTextGenerationPipeline",
"SupportedArchitecture",
"SupportedEncoding",
"TextAndVisionContext",
"TextAndVisionTokenizer",
"TextContext",
"TextGenerationPipeline",
"TextGenerationResponse",
"TextGenerationStatus",
"TextResponse",
"TextTokenizer",
"TokenGenerator",
"TokenGeneratorContext",
"TokenGeneratorRequest",
"TokenGeneratorRequestFunction",
"TokenGeneratorRequestMessage",
"TokenGeneratorRequestTool",
"TokenGeneratorResponseFormat",
"upper_bounded_default",
]