Spaces:
Running
Running
talk-llama : reject runs without required arguments (#2153)
Browse files* Extended talk-llama example to reject runs without required arguments.
Print warning and exit if models are not specified on the command line.
* Update examples/talk-llama/talk-llama.cpp
* Update examples/talk-llama/talk-llama.cpp
---------
Co-authored-by: Georgi Gerganov <[email protected]>
examples/talk-llama/talk-llama.cpp
CHANGED
|
@@ -288,6 +288,10 @@ int main(int argc, char ** argv) {
|
|
| 288 |
cparams.use_gpu = params.use_gpu;
|
| 289 |
|
| 290 |
struct whisper_context * ctx_wsp = whisper_init_from_file_with_params(params.model_wsp.c_str(), cparams);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
|
| 292 |
// llama init
|
| 293 |
|
|
@@ -301,6 +305,10 @@ int main(int argc, char ** argv) {
|
|
| 301 |
}
|
| 302 |
|
| 303 |
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 304 |
|
| 305 |
llama_context_params lcparams = llama_context_default_params();
|
| 306 |
|
|
|
|
| 288 |
cparams.use_gpu = params.use_gpu;
|
| 289 |
|
| 290 |
struct whisper_context * ctx_wsp = whisper_init_from_file_with_params(params.model_wsp.c_str(), cparams);
|
| 291 |
+
if (!ctx_wsp) {
|
| 292 |
+
fprintf(stderr, "No whisper.cpp model specified. Please provide using -mw <modelfile>\n");
|
| 293 |
+
return 1;
|
| 294 |
+
}
|
| 295 |
|
| 296 |
// llama init
|
| 297 |
|
|
|
|
| 305 |
}
|
| 306 |
|
| 307 |
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
|
| 308 |
+
if (!model_llama) {
|
| 309 |
+
fprintf(stderr, "No llama.cpp model specified. Please provide using -ml <modelfile>\n");
|
| 310 |
+
return 1;
|
| 311 |
+
}
|
| 312 |
|
| 313 |
llama_context_params lcparams = llama_context_default_params();
|
| 314 |
|