Skip to content

Commit

Permalink
Now SpeakGPT support custom fine-tuned models
Browse files Browse the repository at this point in the history
  • Loading branch information
AndraxDev committed Apr 7, 2023
1 parent 274995c commit 6b54230
Show file tree
Hide file tree
Showing 13 changed files with 235 additions and 1,799 deletions.
18 changes: 7 additions & 11 deletions .idea/sonarlint/issuestore/index.pb

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 7 additions & 11 deletions .idea/sonarlint/securityhotspotstore/index.pb

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ SpeakGPT is an advanced and highly intuitive voice assistant that utilizes the p
- [x] Pay as you go system
- [x] Long trial period (up to 4 months*)
- [x] Tips for newbies
- [x] Custom fine-tuned models are supported

> \* Note
>
Expand All @@ -54,14 +55,14 @@ SpeakGPT is an advanced and highly intuitive voice assistant that utilizes the p
- [x] Adaptive design
- [x] DALL-e integration
- [x] A lot of different models
- [x] Add support of custom fine-tuned models

### ❌ Planned to add (Share your ideas in Issues)

- [ ] Device routines (like set alarm or open app)
- [ ] Sync chat history
- [ ] Browse model so SpeakGPT can provide information from Internet
- [ ] New experimental model llama 7b
- [ ] Add support of custom fine-tuned models
- [ ] Add models exchange portal like prompts store

### You are appreciated to:
Expand Down
4 changes: 2 additions & 2 deletions app/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ android {
applicationId "org.teslasoft.assistant"
minSdk 28
targetSdk 33
versionCode 206
versionName "2.6"
versionCode 207
versionName "2.7"
externalNativeBuild {
cmake {
cppFlags ''
Expand Down
Binary file modified app/release/app-release.aab
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,14 @@ class Preferences private constructor(private var preferences: SharedPreferences
putString("model", model)
}

fun getMaxTokens() : Int {
return getString("max_tokens", "1500").toInt()
}

fun setMaxTokens(tokens: Int) {
putString("max_tokens", tokens.toString())
}

fun getResolution() : String {
return getString("resolution", "512x512")
}
Expand All @@ -70,6 +78,14 @@ class Preferences private constructor(private var preferences: SharedPreferences
putBoolean("silence_mode", mode)
}

fun getEndSeparator() : String {
return getString("end", "")
}

fun setEndSeparator(separator: String) {
putString("end", separator)
}

fun getPrompt() : String {
return getString("prompt", "")
}
Expand Down
48 changes: 29 additions & 19 deletions app/src/main/java/org/teslasoft/assistant/ui/ChatActivity.kt
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ class ChatActivity : FragmentActivity() {
private var ai: OpenAI? = null
private var key: String? = null
private var model = ""
private var endSeparator = ""

// Init DALL-e
private var resolution = "512x152"
Expand Down Expand Up @@ -131,11 +132,11 @@ class ChatActivity : FragmentActivity() {
if (matches != null && matches.size > 0) {
val recognizedText = matches[0]

putMessage(recognizedText, false)
putMessage(recognizedText + endSeparator, false)

chatMessages.add(ChatMessage(
role = ChatRole.User,
content = recognizedText
content = recognizedText + endSeparator
))

saveSettings()
Expand All @@ -145,7 +146,7 @@ class ChatActivity : FragmentActivity() {
progress?.visibility = View.VISIBLE

CoroutineScope(Dispatchers.Main).launch {
generateResponse(recognizedText, true)
generateResponse(recognizedText + endSeparator, true)
}
}
}
Expand Down Expand Up @@ -231,6 +232,8 @@ class ChatActivity : FragmentActivity() {
private fun initSettings() {
key = Preferences.getPreferences(this).getApiKey(this)

endSeparator = Preferences.getPreferences(this).getEndSeparator()

loadResolution()

if (key == null) {
Expand Down Expand Up @@ -472,7 +475,7 @@ class ChatActivity : FragmentActivity() {

keyboardMode = false

putMessage(message, false)
putMessage(message + endSeparator, false)
saveSettings()

btnMicro?.isEnabled = false
Expand Down Expand Up @@ -503,11 +506,11 @@ class ChatActivity : FragmentActivity() {
} else {
chatMessages.add(ChatMessage(
role = ChatRole.User,
content = message
content = message + endSeparator
))

CoroutineScope(Dispatchers.Main).launch {
generateResponse(message, false)
generateResponse(message + endSeparator, false)
}
}
}
Expand Down Expand Up @@ -548,11 +551,9 @@ class ChatActivity : FragmentActivity() {
var response = ""

try {
if (model.contains("davinci") || model.contains("curie") || model.contains("babbage") || model.contains("ada")) {
if (model.contains("davinci") || model.contains("curie") || model.contains("babbage") || model.contains("ada") || model.contains(":ft-")) {

val tokens = if (model.contains("text-davinci") || model.contains("code-davinci")) {
2048
} else 1500
val tokens = Preferences.getPreferences(this).getMaxTokens()

val completionRequest = CompletionRequest(
model = ModelId(model),
Expand All @@ -573,9 +574,12 @@ class ChatActivity : FragmentActivity() {
}
}
} else {
val tokens = Preferences.getPreferences(this).getMaxTokens()

val chatCompletionRequest = ChatCompletionRequest(
model = ModelId(model),
messages = chatMessages
messages = chatMessages,
maxTokens = tokens
)

val completions: Flow<ChatCompletionChunk> = ai!!.chatCompletions(chatCompletionRequest)
Expand Down Expand Up @@ -605,20 +609,24 @@ class ChatActivity : FragmentActivity() {
tts!!.speak(response, TextToSpeech.QUEUE_FLUSH, null,"")
}
} catch (e: Exception) {
if (e.stackTraceToString().contains("does not exist")) {
response += "Looks like this model (${model}) is not available to you right now. It can be because of high demand or this model is currently in limited beta."
response += if (e.stackTraceToString().contains("does not exist")) {
"Looks like this model (${model}) is not available to you right now. It can be because of high demand or this model is currently in limited beta."
} else if (e.stackTraceToString().contains("Connect timeout has expired") || e.stackTraceToString().contains("SocketTimeoutException")) {
response += "Could not connect to OpenAI servers. It may happen when your Internet speed is slow or too many users are using this model at the same time. Try to switch to another model."
"Could not connect to OpenAI servers. It may happen when your Internet speed is slow or too many users are using this model at the same time. Try to switch to another model."
} else if (e.stackTraceToString().contains("This model's maximum")) {
response += "Too many tokens. It is an internal error, please report it. Also try to truncate your input. Sometimes it may help."
"Too many tokens. It is an internal error, please report it. Also try to truncate your input. Sometimes it may help."
} else if (e.stackTraceToString().contains("No address associated with hostname")) {
response += "You are currently offline. Please check your connection and try again."
"You are currently offline. Please check your connection and try again."
} else if (e.stackTraceToString().contains("Incorrect API key")) {
response += "Your API key is incorrect. Change it in Settings > Change OpenAI key. If you think this is an error please check if your API key has not been rotated. If you accidentally published your key it might be automatically revoked."
"Your API key is incorrect. Change it in Settings > Change OpenAI key. If you think this is an error please check if your API key has not been rotated. If you accidentally published your key it might be automatically revoked."
} else if (e.stackTraceToString().contains("you must provide a model")) {
"No valid model is set in settings. Please change the model and try again."
} else if (e.stackTraceToString().contains("Software caused connection abort")) {
response += "\n\n[error] An error occurred while generating response. It may be due to a weak connection or high demand. Try to switch to another model or try again later."
"\n\n[error] An error occurred while generating response. It may be due to a weak connection or high demand. Try to switch to another model or try again later."
} else if (e.stackTraceToString().contains("You exceeded your current quota")) {
"You exceeded your current quota. If you had free trial usage please add payment info. Also please check your usage limits. You can change your limits in Account settings."
} else {
response += e.stackTraceToString()
e.stackTraceToString()
}

messages[messages.size - 1]["message"] = "${response}\n"
Expand Down Expand Up @@ -662,6 +670,8 @@ class ChatActivity : FragmentActivity() {
putMessage("Your API key is incorrect. Change it in Settings > Change OpenAI key. If you think this is an error please check if your API key has not been rotated. If you accidentally published your key it might be automatically revoked.", true);
} else if (e.stackTraceToString().contains("Software caused connection abort")) {
putMessage("An error occurred while generating response. It may be due to a weak connection or high demand. Try again later.", true);
} else if (e.stackTraceToString().contains("You exceeded your current quota")) {
putMessage("You exceeded your current quota. If you had free trial usage please add payment info. Also please check your usage limits. You can change your limits in Account settings.", true)
} else {
putMessage(e.stackTraceToString(), true)
}
Expand Down
13 changes: 12 additions & 1 deletion app/src/main/java/org/teslasoft/assistant/ui/SettingsActivity.kt
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,22 @@ class SettingsActivity : FragmentActivity() {
private var activationPrompt : String = ""

private var modelChangedListener: ModelDialogFragment.StateChangesListener = object : ModelDialogFragment.StateChangesListener {
override fun onSelected(name: String) {
override fun onSelected(name: String, maxTokens: String, endSeparator: String) {
model = name
preferences?.setModel(name)
preferences?.setMaxTokens(maxTokens.toInt())
preferences?.setEndSeparator(endSeparator)
modelDesc?.text = model
}

override fun onFormError(name: String, maxTokens: String, endSeparator: String) {
if (name == "") Toast.makeText(this@SettingsActivity, "Error, no model name is provided", Toast.LENGTH_SHORT).show()
else if (name.contains("gpt-4")) Toast.makeText(this@SettingsActivity, "Error, GPT4 support maximum of 8192 tokens", Toast.LENGTH_SHORT).show()
else Toast.makeText(this@SettingsActivity, "Error, more than 2048 tokens is not supported", Toast.LENGTH_SHORT).show()
val modelDialogFragment: ModelDialogFragment = ModelDialogFragment.newInstance(model)
modelDialogFragment.setStateChangedListener(this)
modelDialogFragment.show(supportFragmentManager.beginTransaction(), "ModelDialog")
}
}

private var promptChangedListener: ActivationPromptDialog.StateChangesListener = object : ActivationPromptDialog.StateChangesListener {
Expand Down
Loading

0 comments on commit 6b54230

Please sign in to comment.