Skip to content

Commit 0a70cc1

Browse files
committed
Merge branch 'master' into concedo_experimental
# Conflicts: # .devops/nix/package.nix # .github/workflows/docker.yml # CMakeLists.txt
2 parents af6f495 + c9b316c commit 0a70cc1

11 files changed

Lines changed: 336 additions & 262 deletions

File tree

.devops/main-intel.Dockerfile

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
2+
ARG UBUNTU_VERSION=22.04
3+
4+
FROM intel/hpckit:$ONEAPI_VERSION as build
5+
6+
RUN apt-get update && \
7+
apt-get install -y git
8+
9+
WORKDIR /app
10+
11+
COPY . .
12+
13+
# for some reasons, "-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DLLAMA_NATIVE=ON" give worse performance
14+
RUN mkdir build && \
15+
cd build && \
16+
cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx && \
17+
cmake --build . --config Release --target main server
18+
19+
FROM ubuntu:$UBUNTU_VERSION as runtime
20+
21+
COPY --from=build /app/build/bin/main /main
22+
COPY --from=build /app/build/bin/server /server
23+
24+
ENV LC_ALL=C.utf8
25+
26+
ENTRYPOINT [ "/main" ]

common/common.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -217,12 +217,10 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
217217
}
218218
// store the external file name in params
219219
params.prompt_file = argv[i];
220-
file.seekg(0, std::ios::end);
221-
size_t size = file.tellg();
222-
file.seekg(0, std::ios::beg);
223-
params.prompt.resize(size);
224-
file.read((char *)params.prompt.data(), size);
225-
fprintf(stderr, "Read %zu bytes from binary file %s\n", size, argv[i]);
220+
std::ostringstream ss;
221+
ss << file.rdbuf();
222+
params.prompt = ss.str();
223+
fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), argv[i]);
226224
} else if (arg == "-f" || arg == "--file") {
227225
if (++i >= argc) {
228226
invalid_param = true;

examples/llama.vim

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
" Similarly, you could add an insert mode keybind with
77
" inoremap <C-B> <Cmd>call llama#doLlamaGen()<CR>
88
"
9-
" g:llama_api_url and g:llama_overrides can be configured in your .vimrc
9+
" g:llama_api_url, g:llama_api_key and g:llama_overrides can be configured in your .vimrc
1010
" let g:llama_api_url = "192.168.1.10:8080"
1111
" llama_overrides can also be set through buffer/window scopes. For instance
1212
" autocmd filetype python let b:llama_overrides = {"temp": 0.2}
@@ -82,6 +82,9 @@ func llama#doLlamaGen()
8282
endif
8383
let l:querydata.prompt = join(l:buflines, "\n")
8484
let l:curlcommand = copy(s:curlcommand)
85+
if exists("g:llama_api_key")
86+
call extend(l:curlcommand, ['--header', 'Authorization: Bearer ' .. g:llama_api_key])
87+
endif
8588
let l:curlcommand[2] = json_encode(l:querydata)
8689
let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])})
8790
endfunction

examples/llava/clip.cpp

Lines changed: 19 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,6 @@
22
// so there might be still unnecessary artifacts hanging around
33
// I'll gradually clean and extend it
44

5-
#include <cassert>
6-
#include <cmath>
7-
#include <cstdlib>
8-
#include <cstring>
9-
#include <fstream>
10-
#include <iostream>
11-
#include <map>
12-
#include <regex>
13-
#include <stdexcept>
14-
#include <vector>
15-
#include <sstream>
16-
175
#include "clip.h"
186
#include "ggml.h"
197
#include "ggml-alloc.h"
@@ -30,6 +18,19 @@
3018
#define STB_IMAGE_IMPLEMENTATION
3119
#include "stb_image.h"
3220

21+
#include <cassert>
22+
#include <cmath>
23+
#include <cstdlib>
24+
#include <cstring>
25+
#include <fstream>
26+
#include <iostream>
27+
#include <map>
28+
#include <regex>
29+
#include <stdexcept>
30+
#include <vector>
31+
#include <sstream>
32+
#include <cinttypes>
33+
3334
static std::string format(const char * fmt, ...) {
3435
va_list ap;
3536
va_list ap2;
@@ -217,9 +218,9 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
217218

218219
static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") {
219220
size_t tensor_size = ggml_nbytes(tensor);
220-
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%d, %d, %d, %d], type: %d\n",
221+
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
221222
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
222-
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->type);
223+
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type));
223224
}
224225

225226
static projector_type clip_projector_type_from_string(const std::string & name) {
@@ -592,7 +593,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
592593
mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
593594
mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
594595
// stride = 1, padding = 1, bias is nullptr
595-
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, nullptr, 1, 1, 1, 1, 1, 1);
596+
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
596597

597598
// layer norm
598599
// // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
@@ -640,7 +641,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
640641
// block_2
641642
{
642643
// stride = 2
643-
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, nullptr, 2, 2, 1, 1, 1, 1);
644+
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
644645

645646
// block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
646647
// layer norm
@@ -741,18 +742,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
741742
{
742743
std::map<enum ggml_type, uint32_t> n_type;
743744

744-
uint32_t n_type_max = 0;
745-
enum ggml_type type_max = GGML_TYPE_F32;
746-
747745
for (int i = 0; i < n_tensors; i++) {
748746
enum ggml_type type = gguf_get_tensor_type(ctx, i);
749747

750748
n_type[type]++;
751-
752-
if (n_type_max < n_type[type]) {
753-
n_type_max = n_type[type];
754-
type_max = type;
755-
}
756749
}
757750

758751
printf("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
@@ -795,14 +788,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
795788
size_t tensor_size = ggml_nbytes(cur);
796789
buffer_size += tensor_size;
797790
if (verbosity >= 3) {
798-
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%d, %d, %d, %d], type: %d\n", __func__, i,
799-
ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], type);
791+
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
792+
__func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
800793
}
801794
}
802795
}
803796

804-
805-
806797
buffer_size += n_tensors * 128 /* CLIP PADDING */;
807798

808799
clip_ctx * new_clip = new clip_ctx;

examples/perplexity/perplexity.cpp

Lines changed: 63 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -223,13 +223,18 @@ struct kl_divergence_result {
223223
double sum_kld2 = 0;
224224
double sum_nll_diff = 0;
225225
double sum_nll_diff2 = 0;
226+
size_t n_same_top = 0;
226227
size_t count = 0;
227228
};
228229

229-
static void log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
230+
static double log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
230231
float max_logit = logits[0];
232+
int imax = 0;
231233
for (int i = 1; i < n_vocab; ++i) {
232-
max_logit = std::max(max_logit, logits[i]);
234+
if (logits[i] > max_logit) {
235+
max_logit = logits[i];
236+
imax = i;
237+
}
233238
}
234239
double sum_exp = 0.0;
235240
for (int i = 0; i < n_vocab; ++i) {
@@ -248,8 +253,14 @@ static void log_softmax(int n_vocab, const float * logits, const uint16_t * base
248253
kld.sum_nll_diff2 += nll*nll;
249254
max_logit += log_sum_exp;
250255
double sum = 0;
256+
int imax_base = -1;
257+
float p_log_base_max = 0;
251258
for (int i = 0; i < n_vocab; ++i) {
252259
const float p_log_base = scale*base_log_prob[i] + min_log_prob;
260+
if (i == 0 || p_log_base > p_log_base_max) {
261+
p_log_base_max = p_log_base;
262+
imax_base = i;
263+
}
253264
if (p_log_base > -16.f) {
254265
const float p_base = expf(p_log_base);
255266
sum += p_base * (p_log_base - logits[i] + max_logit);
@@ -258,14 +269,17 @@ static void log_softmax(int n_vocab, const float * logits, const uint16_t * base
258269
kld.sum_kld += sum;
259270
kld.sum_kld2 += sum*sum;
260271
++kld.count;
272+
if (imax == imax_base) ++kld.n_same_top;
273+
return sum;
261274
}
262275

263276
static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token,
264-
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld) {
277+
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld,
278+
float * kld_values) {
265279
std::mutex mutex;
266280
const int nv = 2*((n_vocab + 1)/2) + 4;
267281
int counter = 0;
268-
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv] () {
282+
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv, kld_values] () {
269283
kl_divergence_result local_kld;
270284
while (true) {
271285
std::unique_lock<std::mutex> lock(mutex);
@@ -277,11 +291,13 @@ static void process_logits(int n_vocab, const float * logits, const int * tokens
277291
kld.sum_kld2 += local_kld.sum_kld2;
278292
kld.sum_nll_diff += local_kld.sum_nll_diff;
279293
kld.sum_nll_diff2 += local_kld.sum_nll_diff2;
294+
kld.n_same_top += local_kld.n_same_top;
280295
kld.count += local_kld.count;
281296
break;
282297
}
283298
lock.unlock();
284-
log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
299+
double v = log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
300+
kld_values[i] = (float)v;
285301
}
286302
};
287303
for (auto & w : workers) {
@@ -1203,11 +1219,11 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
12031219
printf("Final Winogrande score(%d tasks): %.4lf +/- %.4lf\n", n_done, 100*p, sigma);
12041220
}
12051221

1206-
static bool deserialize_string(std::istream& in, std::string& str) {
1222+
static bool deserialize_string(std::istream & in, std::string & str) {
12071223
uint32_t size;
12081224
if (!in.read((char *)&size, sizeof(size)).fail()) {
12091225
str.resize(size);
1210-
if (!in.read((char *)str.data(), size).fail()) return true;
1226+
if (!in.read((char *)&str[0], size).fail()) return true;
12111227
}
12121228
return false;
12131229
}
@@ -1616,7 +1632,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
16161632
in.read((char *)&n_vocab, sizeof(n_vocab));
16171633
in.read((char *)&n_chunk, sizeof(n_chunk));
16181634
if (in.fail()) {
1619-
fprintf(stderr, "%s: failed rwading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str());
1635+
fprintf(stderr, "%s: failed reading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str());
16201636
return;
16211637
}
16221638
if (n_vocab != llama_n_vocab(llama_get_model(ctx))) {
@@ -1635,6 +1651,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
16351651
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
16361652

16371653
std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
1654+
std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
16381655
std::vector<float> logits;
16391656
if (num_batches > 1) {
16401657
logits.reserve(n_ctx * n_vocab);
@@ -1653,6 +1670,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
16531670
};
16541671

16551672
kl_divergence_result kld;
1673+
auto kld_ptr = kld_values.data();
16561674

16571675
for (int i = 0; i < n_chunk; ++i) {
16581676
const int start = i * n_ctx;
@@ -1706,27 +1724,60 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
17061724
}
17071725
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
17081726

1709-
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence\n");
1727+
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence Same top\n");
17101728
}
17111729

17121730
const int first = n_ctx/2;
17131731
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
17141732
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
1715-
workers, log_probs_uint16, kld);
1733+
workers, log_probs_uint16, kld, kld_ptr);
1734+
kld_ptr += n_ctx - 1 - first;
17161735

17171736
auto ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
17181737
auto log_ppl_ratio = mean_and_uncertainty(kld.sum_nll_diff, kld.sum_nll_diff2, kld.count);
17191738
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
1739+
auto p_top = 1.*kld.n_same_top/kld.count;
1740+
auto d_p_top = sqrt(p_top*(1 - p_top)/(kld.count - 1));
17201741

1721-
printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf\n", i+1, exp(ppl.first),
1722-
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second);
1742+
printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf %.5f ± %.5f\n", i+1, exp(ppl.first),
1743+
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second,
1744+
p_top, d_p_top);
17231745

17241746
fflush(stdout);
17251747

17261748
logits.clear();
17271749
}
17281750
printf("\n");
17291751

1752+
if (kld.count < 100) return; // we do not wish to do statistics on so few values
1753+
1754+
std::sort(kld_values.begin(), kld_values.end());
1755+
1756+
printf("===== KL-divergence statistics\n");
1757+
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
1758+
printf("Average: %10.6f ±%10.6lf\n", kl_div.first, kl_div.second);
1759+
auto kld_median = kld_values.size()%2 == 0 ? 0.5f*(kld_values[kld_values.size()/2] + kld_values[kld_values.size()/2-1])
1760+
: kld_values[kld_values.size()/2];
1761+
printf("Median : %10.6f\n", kld_median);
1762+
1763+
auto percentile = [&kld_values] (float fraction) {
1764+
if (fraction <= 0) return kld_values.front();
1765+
if (fraction >= 1) return kld_values.back();
1766+
float p = fraction*(kld_values.size() - 1);
1767+
size_t ip = size_t(p); p -= ip;
1768+
return (1 - p)*kld_values[ip] + p*kld_values[std::min(ip+1, kld_values.size()-1)];
1769+
};
1770+
1771+
printf("Maximum: %10.6f\n", kld_values.back());
1772+
printf("KLD_99 : %10.6f\n", percentile(0.99f));
1773+
printf("KLD_95 : %10.6f\n", percentile(0.95f));
1774+
printf("KLD_90 : %10.6f\n", percentile(0.90f));
1775+
1776+
printf("Minimum: %10.6f\n", kld_values.front());
1777+
printf("KLD_01 : %10.6f\n", percentile(0.01f));
1778+
printf("KLD_05 : %10.6f\n", percentile(0.05f));
1779+
printf("KLD_10 : %10.6f\n", percentile(0.10f));
1780+
17301781
}
17311782

17321783
int main(int argc, char ** argv) {

ggml-alloc.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
109109
if (block->size >= size) {
110110
best_fit_block = alloc->n_free_blocks - 1;
111111
} else {
112-
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
113-
__func__, size, max_avail);
112+
fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, largest block available %zu)\n",
113+
__func__, tensor->name, size, max_avail);
114114
GGML_ASSERT(!"not enough space in the buffer");
115115
return;
116116
}

0 commit comments

Comments
 (0)