Skip to content

Commit

Permalink
Fix for issue nomic-ai#2080 where the GUI appears to hang when a chat…
Browse files Browse the repository at this point in the history
… with a large

model is deleted. There is no reason to save the context for a chat that
is being deleted.

Signed-off-by: Adam Treat <[email protected]>
  • Loading branch information
manyoso committed Mar 6, 2024
1 parent 4471768 commit 17dee02
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 1 deletion.
5 changes: 5 additions & 0 deletions gpt4all-chat/chat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,11 @@ void Chat::unloadAndDeleteLater()
unloadModel();
}

void Chat::markForDeletion()
{
m_llmodel->setMarkedForDeletion(true);
}

void Chat::unloadModel()
{
stopGenerating();
Expand Down
1 change: 1 addition & 0 deletions gpt4all-chat/chat.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ class Chat : public QObject
Q_INVOKABLE void forceReloadModel();
Q_INVOKABLE void trySwitchContextOfLoadedModel();
void unloadAndDeleteLater();
void markForDeletion();

qint64 creationDate() const { return m_creationDate; }
bool serialize(QDataStream &stream, int version) const;
Expand Down
2 changes: 2 additions & 0 deletions gpt4all-chat/chatlistmodel.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ class ChatListModel : public QAbstractListModel
m_newChat = nullptr;
}

chat->markForDeletion();

const int index = m_chats.indexOf(chat);
if (m_chats.count() < 3 /*m_serverChat included*/) {
addChat();
Expand Down
5 changes: 4 additions & 1 deletion gpt4all-chat/chatllm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer)
, m_isRecalc(false)
, m_shouldBeLoaded(false)
, m_forceUnloadModel(false)
, m_markedForDeletion(false)
, m_shouldTrySwitchContext(false)
, m_stopGenerating(false)
, m_timer(nullptr)
Expand Down Expand Up @@ -690,7 +691,9 @@ void ChatLLM::unloadModel()
else
emit modelLoadingPercentageChanged(std::numeric_limits<float>::min()); // small non-zero positive value

saveState();
if (!m_markedForDeletion)
saveState();

#if defined(DEBUG_MODEL_LOADING)
qDebug() << "unloadModel" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
Expand Down
2 changes: 2 additions & 0 deletions gpt4all-chat/chatllm.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ class ChatLLM : public QObject
void setShouldBeLoaded(bool b);
void setShouldTrySwitchContext(bool b);
void setForceUnloadModel(bool b) { m_forceUnloadModel = b; }
void setMarkedForDeletion(bool b) { m_markedForDeletion = b; }

QString response() const;

Expand Down Expand Up @@ -177,6 +178,7 @@ public Q_SLOTS:
std::atomic<bool> m_shouldTrySwitchContext;
std::atomic<bool> m_isRecalc;
std::atomic<bool> m_forceUnloadModel;
std::atomic<bool> m_markedForDeletion;
bool m_isServer;
bool m_forceMetal;
bool m_reloadingToChangeVariant;
Expand Down

0 comments on commit 17dee02

Please sign in to comment.