|
|
@ -106,12 +106,12 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|
|
|
{
|
|
|
|
{
|
|
|
|
uint32_t magic;
|
|
|
|
uint32_t magic;
|
|
|
|
fin.read((char *) &magic, sizeof(magic));
|
|
|
|
fin.read((char *) &magic, sizeof(magic));
|
|
|
|
if (magic == 0x67676d6c) {
|
|
|
|
if (magic == FILE_MAGIC_UNVERSIONED) {
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
|
|
|
|
__func__, fname.c_str());
|
|
|
|
__func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (magic != 0x67676d66) {
|
|
|
|
if (magic != FILE_MAGIC) {
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -119,9 +119,9 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|
|
|
uint32_t format_version;
|
|
|
|
uint32_t format_version;
|
|
|
|
fin.read((char *) &format_version, sizeof(format_version));
|
|
|
|
fin.read((char *) &format_version, sizeof(format_version));
|
|
|
|
|
|
|
|
|
|
|
|
if (format_version != 1) {
|
|
|
|
if (format_version != FILE_VERSION) {
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ")\n",
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
|
|
|
|
__func__, fname.c_str(), format_version);
|
|
|
|
__func__, fname.c_str(), format_version, FILE_VERSION);
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|