Skip to content

Commit

Permalink
fixups for GPT4All v3.5.0-rc2 (nomic-ai#3239)
Browse files Browse the repository at this point in the history
Signed-off-by: Jared Van Bortel <[email protected]>
  • Loading branch information
cebtenzzre authored Dec 9, 2024
1 parent 7628106 commit 70cca3f
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 11 deletions.
3 changes: 3 additions & 0 deletions gpt4all-chat/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
### Changed
- Update Italian translation ([#3236](https://github.com/nomic-ai/gpt4all/pull/3236))

### Fixed
- Fix a few more problems with the Jinja changes ([#3239](https://github.com/nomic-ai/gpt4all/pull/3239))

## [3.5.0-rc2] - 2024-12-06

### Changed
Expand Down
9 changes: 8 additions & 1 deletion gpt4all-chat/qml/ChatItemView.qml
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,14 @@ GridLayout {
Layout.fillWidth: false
name: editingDisabledReason ?? qsTr("Redo")
source: "qrc:/gpt4all/icons/regenerate.svg"
onClicked: redoResponseDialog.open()
onClicked: {
if (index == chatModel.count - 1) {
// regenerate last message without confirmation
currentChat.regenerateResponse(index);
return;
}
redoResponseDialog.open();
}
}

ChatMessageButton {
Expand Down
15 changes: 10 additions & 5 deletions gpt4all-chat/src/chatllm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <jinja2cpp/error_info.h>
#include <jinja2cpp/template.h>
#include <jinja2cpp/template_env.h>
#include <jinja2cpp/user_callable.h>
#include <jinja2cpp/value.h>

#include <QDataStream>
Expand Down Expand Up @@ -70,19 +71,23 @@ static jinja2::TemplateEnv *jinjaEnv()
settings.lstripBlocks = true;
env.AddGlobal("raise_exception", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
auto &message = params.args.at("message").asString();
throw std::runtime_error(fmt::format("Jinja template error: {}", message));
auto messageArg = params.args.find("message");
if (messageArg == params.args.end() || !messageArg->second.isString())
throw std::runtime_error("'message' argument to raise_exception() must be a string");
throw std::runtime_error(fmt::format("Jinja template error: {}", messageArg->second.asString()));
},
/*argsInfo*/ { jinja2::ArgInfo("message", /*isMandatory*/ true) }
));
env.AddGlobal("strftime_now", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
using Clock = std::chrono::system_clock;
auto &format = params.args.at("format").asString();
auto formatArg = params.args.find("format");
if (formatArg == params.args.end() || !formatArg->second.isString())
throw std::runtime_error("'format' argument to strftime_now() must be a string");
time_t nowUnix = Clock::to_time_t(Clock::now());
auto localDate = *std::localtime(&nowUnix);
std::ostringstream ss;
ss << std::put_time(&localDate, format.c_str());
ss << std::put_time(&localDate, formatArg->second.asString().c_str());
return ss.str();
},
/*argsInfo*/ { jinja2::ArgInfo("format", /*isMandatory*/ true) }
Expand Down Expand Up @@ -923,7 +928,7 @@ auto ChatLLM::promptInternal(
if (auto limit = nCtx - 4; lastMessageLength > limit) {
throw std::invalid_argument(
tr("Your message was too long and could not be processed (%1 > %2). "
"Please try again with something shorter.").arg(lastMessageLength, limit).toUtf8().constData()
"Please try again with something shorter.").arg(lastMessageLength).arg(limit).toUtf8().constData()
);
}
}
Expand Down
42 changes: 37 additions & 5 deletions gpt4all-chat/src/modellist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,33 @@ using namespace Qt::Literals::StringLiterals;

#define MODELS_JSON_VERSION "3"


static const QStringList FILENAME_BLACKLIST { u"gpt4all-nomic-embed-text-v1.rmodel"_s };

static const QString RMODEL_CHAT_TEMPLATE = uR"(<chat>
{%- set loop_messages = messages %}
{%- for message in loop_messages %}
{{- raise_exception('Unknown role: ' + messages['role']) }}
{{- '<' + message['role'] + '>' }}
{%- if message['role'] == 'user' %}
{%- for source in message.sources %}
{%- if loop.first %}
{{- '### Context:\n' }}
{%- endif %}
{{- 'Collection: ' + source.collection + '\n' +
'Path: ' + source.path + '\n' +
'Excerpt: ' + source.text + '\n\n' }}
{%- endfor %}
{%- endif %}
{%- for attachment in message.prompt_attachments %}
{{- attachment.processed_content + '\n\n' }}
{%- endfor %}
{{- message.content }}
{{- '</' + message['role'] + '>' }}
{%- endfor %}
</chat>)"_s;


QString ModelInfo::id() const
{
return m_id;
Expand Down Expand Up @@ -1367,6 +1392,7 @@ void ModelList::processModelDirectory(const QString &path)
// The description is hard-coded into "GPT4All.ini" due to performance issue.
// If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList.
data.append({ DescriptionRole, description });
data.append({ ChatTemplateRole, RMODEL_CHAT_TEMPLATE });
}
updateData(id, data);
}
Expand Down Expand Up @@ -1655,7 +1681,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::ParametersRole, "?" },
{ ModelList::QuantRole, "NA" },
{ ModelList::TypeRole, "GPT" },
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions"},
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions" },
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
};
updateData(id, data);
}
Expand Down Expand Up @@ -1683,7 +1710,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::ParametersRole, "?" },
{ ModelList::QuantRole, "NA" },
{ ModelList::TypeRole, "GPT" },
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions"},
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions" },
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
};
updateData(id, data);
}
Expand Down Expand Up @@ -1714,7 +1742,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::ParametersRole, "?" },
{ ModelList::QuantRole, "NA" },
{ ModelList::TypeRole, "Mistral" },
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"},
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" },
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
};
updateData(id, data);
}
Expand All @@ -1739,7 +1768,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::ParametersRole, "?" },
{ ModelList::QuantRole, "NA" },
{ ModelList::TypeRole, "Mistral" },
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"},
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" },
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
};
updateData(id, data);
}
Expand All @@ -1765,7 +1795,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::ParametersRole, "?" },
{ ModelList::QuantRole, "NA" },
{ ModelList::TypeRole, "Mistral" },
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"},
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" },
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
};
updateData(id, data);
}
Expand Down Expand Up @@ -1794,6 +1825,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::ParametersRole, "?" },
{ ModelList::QuantRole, "NA" },
{ ModelList::TypeRole, "NA" },
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
};
updateData(id, data);
}
Expand Down

0 comments on commit 70cca3f

Please sign in to comment.