Skip to content

Commit

Permalink
llama : wrap llama_new_context_with_model in try/catch
Browse files Browse the repository at this point in the history
This fixes a crash where ggml_vk_allocate fails in llama_kv_cache_init,
but the exception is never caught.
  • Loading branch information
cebtenzzre committed Sep 26, 2024
1 parent 0d00f77 commit 817aadb
Showing 1 changed file with 13 additions and 1 deletion.
14 changes: 13 additions & 1 deletion src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18783,7 +18783,7 @@ void llama_free_model(struct llama_model * model) {
delete model;
}

struct llama_context * llama_new_context_with_model(
static struct llama_context * llama_new_context_with_model_internal(
struct llama_model * model,
struct llama_context_params params) {

Expand Down Expand Up @@ -19179,6 +19179,18 @@ struct llama_context * llama_new_context_with_model(
return ctx;
}

struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params
) {
try {
return llama_new_context_with_model_internal(model, params);
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to init context: %s\n", __func__, err.what());
return nullptr;
}
}

void llama_free(struct llama_context * ctx) {
delete ctx;
}
Expand Down

0 comments on commit 817aadb

Please sign in to comment.