Skip to content

Commit

Permalink
working on window.ai integration
Browse files Browse the repository at this point in the history
  • Loading branch information
zoan37 committed Jun 6, 2023
1 parent 34bdfb3 commit cc84f67
Show file tree
Hide file tree
Showing 8 changed files with 1,592 additions and 1,456 deletions.
2,816 changes: 1,362 additions & 1,454 deletions poetry.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ postgrest-py = "^0.10.6"
aiosqlite = "^0.19.0"
hyperdb-python = "^0.1.3"
quart = "^0.18.4"
websocket-client = "^1.5.2"

[tool.black]
line-length = 88
Expand Down
1 change: 1 addition & 0 deletions src/utils/model_name.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ class ChatModelName(Enum):
GPT4 = "gpt-4"
CLAUDE = "claude-v1"
CLAUDE_INSTANT = "claude-instant-v1"
WINDOW = "window"
3 changes: 3 additions & 0 deletions src/utils/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from langchain.chat_models.base import BaseChatModel
from langchain.llms import OpenAI
from langchain.schema import BaseMessage
from utils.windowai_model import ChatWindowAI

from .cache import chat_json_cache, json_cache
from .model_name import ChatModelName
Expand All @@ -26,6 +27,8 @@ def get_chat_model(name: ChatModelName, **kwargs) -> BaseChatModel:
return ChatOpenAI(model_name=name.value, **kwargs)
elif name == ChatModelName.CLAUDE:
return ChatAnthropic(model=name.value, **kwargs)
elif name == ChatModelName.WINDOW:
return ChatWindowAI(model_name=name.value, **kwargs)
else:
raise ValueError(f"Invalid model name: {name}")

Expand Down
4 changes: 2 additions & 2 deletions src/utils/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@
ANNOUNCER_DISCORD_TOKEN = os.getenv("ANNOUNCER_DISCORD_TOKEN")

DEFAULT_SMART_MODEL = (
ChatModelName.TURBO if "--turbo" in sys.argv else ChatModelName.CLAUDE if "--claude" in sys.argv else ChatModelName.GPT4
ChatModelName.TURBO if "--turbo" in sys.argv else ChatModelName.CLAUDE if "--claude" in sys.argv else ChatModelName.WINDOW if "--window" in sys.argv else ChatModelName.GPT4
)

DEFAULT_FAST_MODEL = (
ChatModelName.CLAUDE_INSTANT if "--claude" in sys.argv else ChatModelName.TURBO
ChatModelName.CLAUDE_INSTANT if "--claude" in sys.argv else ChatModelName.WINDOW if "--window" in sys.argv else ChatModelName.TURBO
)


Expand Down
143 changes: 143 additions & 0 deletions src/utils/windowai_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
from typing import Any, Dict, List, Mapping, Optional, Sequence
import uuid
import time
import langchain
from langchain.chat_models.base import BaseChatModel, SimpleChatModel
from langchain.schema import BaseMessage
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
LLMResult,
PromptValue,
)
import asyncio
import websocket
import json


class WindowAIRouter:
_instance = None

window_requests = []
window_responses = {}

def __new__(cls):
if cls._instance is None:
print('Creating the object')
cls._instance = super(WindowAIRouter, cls).__new__(cls)
# Put any initialization here.
return cls._instance

def add_window_request(self, request):
self.window_requests.append(request)

def add_window_response(self, request_id, response):
self.window_responses[request_id] = response

def get_window_requests(self):
return self.window_requests

def get_window_response(self, request_id):
if request_id not in self.window_responses:
return None

return self.window_responses[request_id]

def delete_window_request(self, request):
self.window_requests.remove(request)

def delete_window_response(self, request_id):
del self.window_responses[request_id]

window_router = WindowAIRouter()


class ChatWindowAI(BaseChatModel):
model_name: str = "window"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""

# TODO: use temperature in window.ai API call

@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "window-chat"

def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
output_str = self._call(messages, stop=stop)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
result = ChatResult(generations=[generation])

print("ChatWindowAI _generate result", result)

return result

async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
return self._generate(messages, stop=stop)

def _call(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> str:
"""Simpler interface."""
print("ChatWindowAI _call messages", messages)

request_id = str(uuid.uuid4())

# python object with messages and request ID
request = {
"messages": messages,
"request_id": request_id,
}

# websocket.enableTrace(True)
ws = websocket.WebSocket()
ws.connect("ws://127.0.0.1:5000/windowmodel")
ws.send(str(request))
message = ws.recv()
ws.close()

print(f"Received: {message}")

response = "Response!"

"""
request_id = str(uuid.uuid4())
# python object with messages and request ID
request = {
"messages": messages,
"request_id": request_id,
}
window_router.add_window_request(request)
response = None
while True:
requests = window_router.get_window_requests()
print("ChatWindowAI _call requests", requests)
response = window_router.get_window_response(request_id)
if response:
# window_router.delete_window_request(request)
# window_router.delete_window_response(request_id)
break
time.sleep(0.25)
# print("ChatWindowAI _call waiting for response")
"""

print("ChatWindowAI _call response", response)

return response

#return "Hello from ChatWindowAI._call"
56 changes: 56 additions & 0 deletions src/web/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,12 @@
from src.utils.database.base import Tables
from src.utils.database.client import get_database

from src.utils.windowai_model import WindowAIRouter

load_dotenv()

window_router = WindowAIRouter()


def get_server():
app = Quart(__name__)
Expand Down Expand Up @@ -89,4 +93,56 @@ async def world_websocket():
{"agents": sorted_agents, "name": worlds[0]["name"]}
)

@app.websocket("/window")
async def window_websocket():
while True:
await asyncio.sleep(0.25)

data = await websocket.receive()

# client should ping server periodically to see if new requests to process

print("window_websocket data", data)

await websocket.send(data)

@app.websocket("/windowmodel")
async def window_model_websocket():
while True:
await asyncio.sleep(0.25)

data = await websocket.receive()

print("window_websocket data", data)

await websocket.send(data)

"""
requests = window_router.get_window_requests()
print("window_websocket requests", requests)
# loop through requests
for request in requests:
request_id = request["request_id"]
messages = request["messages"]
# get response
response = window_router.get_window_response(request_id)
if not response:
print("window_websocket request", request)
# send request
await websocket.send_json(request)
response = await websocket.receive_json()
print("window_websocket response", response)
window_router.add_window_response(response)
"""



return app
24 changes: 24 additions & 0 deletions src/web/templates/logs.html
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,30 @@
};
}, []);

React.useEffect(() => {
const socket = new WebSocket('ws://' + window.location.host + '/window');

socket.onmessage = (e) => {
console.log('window message');
console.log(e);

if (!window.ai) {
alert('window.ai not found. Please install at https://windowai.io/');
return;
}



const data = JSON.parse(e.data);

// call window.ai extension
};

return () => {
socket.close();
};
}, []);



return (
Expand Down

0 comments on commit cc84f67

Please sign in to comment.