-
-
Notifications
You must be signed in to change notification settings - Fork 190
Expand file tree
/
Copy pathgetLlamaGpuTypes.ts
More file actions
42 lines (34 loc) · 1.57 KB
/
getLlamaGpuTypes.ts
File metadata and controls
42 lines (34 loc) · 1.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import process from "process";
import {LlamaGpuType} from "../types.js";
import {getGpuTypesToUseForOption} from "./getGpuTypesToUseForOption.js";
import {getPlatform} from "./getPlatform.js";
/**
* Get the list of GPU types that can be used with `getLlama` on the current machine.
*
* When passing `"supported"`, only the GPU types that have the
* necessary libraries and drivers installed on the current machine will be returned.
* All of these GPU types have prebuilt binaries for the current platform and architecture.
*
* When passing `"allValid"`, all GPU types that are compatible with the current OS and architecture will be returned.
* Some of these GPU types may not have prebuilt binaries for the current platform and architecture,
* as some of them are inadvisable for the current machine (like CUDA on an x64 Mac machine).
*/
export async function getLlamaGpuTypes(include: "supported" | "allValid"): Promise<LlamaGpuType[]> {
const platform = getPlatform();
const arch = process.arch;
if (include === "supported") {
const gpuTypes = new Set(await getGpuTypesToUseForOption("auto"));
if (platform === "win" && arch !== "x64")
gpuTypes.delete("vulkan"); // no Vulkan prebuilt binary yet due to incomplete support for arm64
return [...gpuTypes];
}
const res: LlamaGpuType[] = [];
// Metal is not properly supported by llama.cpp on x64 Mac machines
if (platform === "mac" && arch === "arm64")
res.push("metal");
else
res.push("cuda");
res.push("vulkan");
res.push(false);
return res;
}