mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-04-16 11:36:08 +00:00
llama-tts : avoid crashes related to bad model file paths (#12482)
This commit is contained in:
parent
1aa87ee53d
commit
ea1518e839
@ -571,6 +571,10 @@ int main(int argc, char ** argv) {
|
|||||||
model_ttc = llama_init_ttc.model.get();
|
model_ttc = llama_init_ttc.model.get();
|
||||||
ctx_ttc = llama_init_ttc.context.get();
|
ctx_ttc = llama_init_ttc.context.get();
|
||||||
|
|
||||||
|
if (model_ttc == nullptr || ctx_ttc == nullptr) {
|
||||||
|
return ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
const llama_vocab * vocab = llama_model_get_vocab(model_ttc);
|
const llama_vocab * vocab = llama_model_get_vocab(model_ttc);
|
||||||
|
|
||||||
// TODO: refactor in a common struct
|
// TODO: refactor in a common struct
|
||||||
@ -586,6 +590,10 @@ int main(int argc, char ** argv) {
|
|||||||
model_cts = llama_init_cts.model.get();
|
model_cts = llama_init_cts.model.get();
|
||||||
ctx_cts = llama_init_cts.context.get();
|
ctx_cts = llama_init_cts.context.get();
|
||||||
|
|
||||||
|
if (model_cts == nullptr || ctx_cts == nullptr) {
|
||||||
|
return ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<common_sampler *> smpl(n_parallel);
|
std::vector<common_sampler *> smpl(n_parallel);
|
||||||
for (int i = 0; i < n_parallel; ++i) {
|
for (int i = 0; i < n_parallel; ++i) {
|
||||||
params.sampling.no_perf = (i != 0);
|
params.sampling.no_perf = (i != 0);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user